sbi->s_lvid_bh = NULL;
}
+static struct udf_vds_record *get_volume_descriptor_record(
+ struct udf_vds_record *vds, uint16_t ident)
+{
+ switch (ident) {
+ case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
+ return &vds[VDS_POS_PRIMARY_VOL_DESC];
+ case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
+ return &vds[VDS_POS_IMP_USE_VOL_DESC];
+ case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
+ return &vds[VDS_POS_LOGICAL_VOL_DESC];
+ case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
+ return &vds[VDS_POS_UNALLOC_SPACE_DESC];
+ }
+ return NULL;
+}
/*
* Process a main/reserve volume descriptor sequence.
gd = (struct generic_desc *)bh->b_data;
vdsn = le32_to_cpu(gd->volDescSeqNum);
switch (ident) {
- case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
- curr = &vds[VDS_POS_PRIMARY_VOL_DESC];
- if (vdsn >= curr->volDescSeqNum) {
- curr->volDescSeqNum = vdsn;
- curr->block = block;
- }
- break;
case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
if (++indirections > UDF_MAX_TD_NESTING) {
udf_err(sb, "too many Volume Descriptor "
/* For loop is going to increment 'block' again */
block--;
break;
+ case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
- curr = &vds[VDS_POS_IMP_USE_VOL_DESC];
+ case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
+ case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
+ curr = get_volume_descriptor_record(vds, ident);
if (vdsn >= curr->volDescSeqNum) {
curr->volDescSeqNum = vdsn;
curr->block = block;
if (!curr->block)
curr->block = block;
break;
- case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
- curr = &vds[VDS_POS_LOGICAL_VOL_DESC];
- if (vdsn >= curr->volDescSeqNum) {
- curr->volDescSeqNum = vdsn;
- curr->block = block;
- }
- break;
- case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
- curr = &vds[VDS_POS_UNALLOC_SPACE_DESC];
- if (vdsn >= curr->volDescSeqNum) {
- curr->volDescSeqNum = vdsn;
- curr->block = block;
- }
- break;
case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
vds[VDS_POS_TERMINATING_DESC].block = block;
done = true;