aboutsummaryrefslogtreecommitdiffstats
path: root/fs/udf/partition.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/udf/partition.c')
-rw-r--r--fs/udf/partition.c160
1 files changed, 104 insertions, 56 deletions
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index eeb4714b3641..027c879969f1 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -34,8 +34,8 @@ inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
34 struct udf_sb_info *sbi = UDF_SB(sb); 34 struct udf_sb_info *sbi = UDF_SB(sb);
35 struct udf_part_map *map; 35 struct udf_part_map *map;
36 if (partition >= sbi->s_partitions) { 36 if (partition >= sbi->s_partitions) {
37 udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n", 37 udf_debug("block=%d, partition=%d, offset=%d: "
38 block, partition, offset); 38 "invalid partition\n", block, partition, offset);
39 return 0xFFFFFFFF; 39 return 0xFFFFFFFF;
40 } 40 }
41 map = &sbi->s_partmaps[partition]; 41 map = &sbi->s_partmaps[partition];
@@ -54,13 +54,15 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
54 uint32_t loc; 54 uint32_t loc;
55 struct udf_sb_info *sbi = UDF_SB(sb); 55 struct udf_sb_info *sbi = UDF_SB(sb);
56 struct udf_part_map *map; 56 struct udf_part_map *map;
57 struct udf_virtual_data *vdata;
57 58
58 map = &sbi->s_partmaps[partition]; 59 map = &sbi->s_partmaps[partition];
59 index = (sb->s_blocksize - map->s_type_specific.s_virtual.s_start_offset) / sizeof(uint32_t); 60 vdata = &map->s_type_specific.s_virtual;
61 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
60 62
61 if (block > map->s_type_specific.s_virtual.s_num_entries) { 63 if (block > vdata->s_num_entries) {
62 udf_debug("Trying to access block beyond end of VAT (%d max %d)\n", 64 udf_debug("Trying to access block beyond end of VAT "
63 block, map->s_type_specific.s_virtual.s_num_entries); 65 "(%d max %d)\n", block, vdata->s_num_entries);
64 return 0xFFFFFFFF; 66 return 0xFFFFFFFF;
65 } 67 }
66 68
@@ -70,12 +72,13 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
70 index = block % (sb->s_blocksize / sizeof(uint32_t)); 72 index = block % (sb->s_blocksize / sizeof(uint32_t));
71 } else { 73 } else {
72 newblock = 0; 74 newblock = 0;
73 index = map->s_type_specific.s_virtual.s_start_offset / sizeof(uint32_t) + block; 75 index = vdata->s_start_offset / sizeof(uint32_t) + block;
74 } 76 }
75 77
76 loc = udf_block_map(sbi->s_vat_inode, newblock); 78 loc = udf_block_map(sbi->s_vat_inode, newblock);
77 79
78 if (!(bh = sb_bread(sb, loc))) { 80 bh = sb_bread(sb, loc);
81 if (!bh) {
79 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n", 82 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
80 sb, block, partition, loc, index); 83 sb, block, partition, loc, index);
81 return 0xFFFFFFFF; 84 return 0xFFFFFFFF;
@@ -85,17 +88,19 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
85 88
86 brelse(bh); 89 brelse(bh);
87 90
88 if (UDF_I_LOCATION(sbi->s_vat_inode).partitionReferenceNum == partition) { 91 if (UDF_I_LOCATION(sbi->s_vat_inode).partitionReferenceNum ==
92 partition) {
89 udf_debug("recursive call to udf_get_pblock!\n"); 93 udf_debug("recursive call to udf_get_pblock!\n");
90 return 0xFFFFFFFF; 94 return 0xFFFFFFFF;
91 } 95 }
92 96
93 return udf_get_pblock(sb, loc, 97 return udf_get_pblock(sb, loc,
94 UDF_I_LOCATION(sbi->s_vat_inode).partitionReferenceNum, 98 UDF_I_LOCATION(sbi->s_vat_inode).
99 partitionReferenceNum,
95 offset); 100 offset);
96} 101}
97 102
98inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block, 103inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
99 uint16_t partition, uint32_t offset) 104 uint16_t partition, uint32_t offset)
100{ 105{
101 return udf_get_pblock_virt15(sb, block, partition, offset); 106 return udf_get_pblock_virt15(sb, block, partition, offset);
@@ -109,27 +114,32 @@ uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
109 struct udf_sb_info *sbi = UDF_SB(sb); 114 struct udf_sb_info *sbi = UDF_SB(sb);
110 struct udf_part_map *map; 115 struct udf_part_map *map;
111 uint32_t packet; 116 uint32_t packet;
117 struct udf_sparing_data *sdata;
112 118
113 map = &sbi->s_partmaps[partition]; 119 map = &sbi->s_partmaps[partition];
114 packet = (block + offset) & ~(map->s_type_specific.s_sparing.s_packet_len - 1); 120 sdata = &map->s_type_specific.s_sparing;
121 packet = (block + offset) & ~(sdata->s_packet_len - 1);
115 122
116 for (i = 0; i < 4; i++) { 123 for (i = 0; i < 4; i++) {
117 if (map->s_type_specific.s_sparing.s_spar_map[i] != NULL) { 124 if (sdata->s_spar_map[i] != NULL) {
118 st = (struct sparingTable *)map->s_type_specific.s_sparing.s_spar_map[i]->b_data; 125 st = (struct sparingTable *)
126 sdata->s_spar_map[i]->b_data;
119 break; 127 break;
120 } 128 }
121 } 129 }
122 130
123 if (st) { 131 if (st) {
124 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) { 132 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
125 if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0) { 133 struct sparingEntry *entry = &st->mapEntry[i];
134 u32 origLoc = le32_to_cpu(entry->origLocation);
135 if (origLoc >= 0xFFFFFFF0)
126 break; 136 break;
127 } else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) { 137 else if (origLoc == packet)
128 return le32_to_cpu(st->mapEntry[i].mappedLocation) + 138 return le32_to_cpu(entry->mappedLocation) +
129 ((block + offset) & (map->s_type_specific.s_sparing.s_packet_len - 1)); 139 ((block + offset) &
130 } else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) { 140 (sdata->s_packet_len - 1));
141 else if (origLoc > packet)
131 break; 142 break;
132 }
133 } 143 }
134 } 144 }
135 145
@@ -144,63 +154,101 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
144 uint32_t packet; 154 uint32_t packet;
145 int i, j, k, l; 155 int i, j, k, l;
146 struct udf_sb_info *sbi = UDF_SB(sb); 156 struct udf_sb_info *sbi = UDF_SB(sb);
157 u16 reallocationTableLen;
158 struct buffer_head *bh;
147 159
148 for (i = 0; i < sbi->s_partitions; i++) { 160 for (i = 0; i < sbi->s_partitions; i++) {
149 struct udf_part_map *map = &sbi->s_partmaps[i]; 161 struct udf_part_map *map = &sbi->s_partmaps[i];
150 if (old_block > map->s_partition_root && 162 if (old_block > map->s_partition_root &&
151 old_block < map->s_partition_root + map->s_partition_len) { 163 old_block < map->s_partition_root + map->s_partition_len) {
152 sdata = &map->s_type_specific.s_sparing; 164 sdata = &map->s_type_specific.s_sparing;
153 packet = (old_block - map->s_partition_root) & ~(sdata->s_packet_len - 1); 165 packet = (old_block - map->s_partition_root) &
166 ~(sdata->s_packet_len - 1);
154 167
155 for (j = 0; j < 4; j++) { 168 for (j = 0; j < 4; j++)
156 if (map->s_type_specific.s_sparing.s_spar_map[j] != NULL) { 169 if (sdata->s_spar_map[j] != NULL) {
157 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; 170 st = (struct sparingTable *)
171 sdata->s_spar_map[j]->b_data;
158 break; 172 break;
159 } 173 }
160 }
161 174
162 if (!st) 175 if (!st)
163 return 1; 176 return 1;
164 177
165 for (k = 0; k < le16_to_cpu(st->reallocationTableLen); k++) { 178 reallocationTableLen =
166 if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF) { 179 le16_to_cpu(st->reallocationTableLen);
180 for (k = 0; k < reallocationTableLen; k++) {
181 struct sparingEntry *entry = &st->mapEntry[k];
182 u32 origLoc = le32_to_cpu(entry->origLocation);
183
184 if (origLoc == 0xFFFFFFFF) {
167 for (; j < 4; j++) { 185 for (; j < 4; j++) {
168 if (sdata->s_spar_map[j]) { 186 int len;
169 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; 187 bh = sdata->s_spar_map[j];
170 st->mapEntry[k].origLocation = cpu_to_le32(packet); 188 if (!bh)
171 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry)); 189 continue;
172 mark_buffer_dirty(sdata->s_spar_map[j]); 190
173 } 191 st = (struct sparingTable *)
192 bh->b_data;
193 entry->origLocation =
194 cpu_to_le32(packet);
195 len =
196 sizeof(struct sparingTable) +
197 reallocationTableLen *
198 sizeof(struct sparingEntry);
199 udf_update_tag((char *)st, len);
200 mark_buffer_dirty(bh);
174 } 201 }
175 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + 202 *new_block = le32_to_cpu(
176 ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); 203 entry->mappedLocation) +
204 ((old_block -
205 map->s_partition_root) &
206 (sdata->s_packet_len - 1));
177 return 0; 207 return 0;
178 } else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) { 208 } else if (origLoc == packet) {
179 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + 209 *new_block = le32_to_cpu(
180 ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); 210 entry->mappedLocation) +
211 ((old_block -
212 map->s_partition_root) &
213 (sdata->s_packet_len - 1));
181 return 0; 214 return 0;
182 } else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) { 215 } else if (origLoc > packet)
183 break; 216 break;
184 }
185 } 217 }
186 218
187 for (l = k; l < le16_to_cpu(st->reallocationTableLen); l++) { 219 for (l = k; l < reallocationTableLen; l++) {
188 if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF) { 220 struct sparingEntry *entry = &st->mapEntry[l];
189 for (; j < 4; j++) { 221 u32 origLoc = le32_to_cpu(entry->origLocation);
190 if (sdata->s_spar_map[j]) { 222
191 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; 223 if (origLoc != 0xFFFFFFFF)
192 mapEntry = st->mapEntry[l]; 224 continue;
193 mapEntry.origLocation = cpu_to_le32(packet); 225
194 memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry)); 226 for (; j < 4; j++) {
195 st->mapEntry[k] = mapEntry; 227 bh = sdata->s_spar_map[j];
196 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry)); 228 if (!bh)
197 mark_buffer_dirty(sdata->s_spar_map[j]); 229 continue;
198 } 230
199 } 231 st = (struct sparingTable *)bh->b_data;
200 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + 232 mapEntry = st->mapEntry[l];
201 ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); 233 mapEntry.origLocation =
202 return 0; 234 cpu_to_le32(packet);
235 memmove(&st->mapEntry[k + 1],
236 &st->mapEntry[k],
237 (l - k) *
238 sizeof(struct sparingEntry));
239 st->mapEntry[k] = mapEntry;
240 udf_update_tag((char *)st,
241 sizeof(struct sparingTable) +
242 reallocationTableLen *
243 sizeof(struct sparingEntry));
244 mark_buffer_dirty(bh);
203 } 245 }
246 *new_block =
247 le32_to_cpu(
248 st->mapEntry[k].mappedLocation) +
249 ((old_block - map->s_partition_root) &
250 (sdata->s_packet_len - 1));
251 return 0;
204 } 252 }
205 253
206 return 1; 254 return 1;