aboutsummaryrefslogtreecommitdiffstats
path: root/fs/udf/balloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/udf/balloc.c')
-rw-r--r--fs/udf/balloc.c420
1 files changed, 161 insertions, 259 deletions
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index ef48d094dd2b..276f7207a564 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -70,9 +70,9 @@ static inline int find_next_one_bit(void *addr, int size, int offset)
70 if (!size) 70 if (!size)
71 return result; 71 return result;
72 tmp = leBPL_to_cpup(p); 72 tmp = leBPL_to_cpup(p);
73 found_first: 73found_first:
74 tmp &= ~0UL >> (BITS_PER_LONG - size); 74 tmp &= ~0UL >> (BITS_PER_LONG - size);
75 found_middle: 75found_middle:
76 return result + ffz(~tmp); 76 return result + ffz(~tmp);
77} 77}
78 78
@@ -110,11 +110,11 @@ static int __load_block_bitmap(struct super_block *sb,
110 nr_groups); 110 nr_groups);
111 } 111 }
112 112
113 if (bitmap->s_block_bitmap[block_group]) 113 if (bitmap->s_block_bitmap[block_group]) {
114 return block_group; 114 return block_group;
115 else { 115 } else {
116 retval = 116 retval = read_block_bitmap(sb, bitmap, block_group,
117 read_block_bitmap(sb, bitmap, block_group, block_group); 117 block_group);
118 if (retval < 0) 118 if (retval < 0)
119 return retval; 119 return retval;
120 return block_group; 120 return block_group;
@@ -155,22 +155,16 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
155 155
156 mutex_lock(&sbi->s_alloc_mutex); 156 mutex_lock(&sbi->s_alloc_mutex);
157 if (bloc.logicalBlockNum < 0 || 157 if (bloc.logicalBlockNum < 0 ||
158 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, 158 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
159 bloc. 159 udf_debug("%d < %d || %d + %d > %d\n",
160 partitionReferenceNum)) 160 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
161 { 161 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
162 udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
163 bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
164 bloc.
165 partitionReferenceNum));
166 goto error_return; 162 goto error_return;
167 } 163 }
168 164
169 block = 165 block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3);
170 bloc.logicalBlockNum + offset +
171 (sizeof(struct spaceBitmapDesc) << 3);
172 166
173 do_more: 167do_more:
174 overflow = 0; 168 overflow = 0;
175 block_group = block >> (sb->s_blocksize_bits + 3); 169 block_group = block >> (sb->s_blocksize_bits + 3);
176 bit = block % (sb->s_blocksize << 3); 170 bit = block % (sb->s_blocksize << 3);
@@ -190,18 +184,13 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
190 for (i = 0; i < count; i++) { 184 for (i = 0; i < count; i++) {
191 if (udf_set_bit(bit + i, bh->b_data)) { 185 if (udf_set_bit(bit + i, bh->b_data)) {
192 udf_debug("bit %ld already set\n", bit + i); 186 udf_debug("bit %ld already set\n", bit + i);
193 udf_debug("byte=%2x\n", 187 udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]);
194 ((char *)bh->b_data)[(bit + i) >> 3]);
195 } else { 188 } else {
196 if (inode) 189 if (inode)
197 DQUOT_FREE_BLOCK(inode, 1); 190 DQUOT_FREE_BLOCK(inode, 1);
198 if (UDF_SB_LVIDBH(sb)) { 191 if (UDF_SB_LVIDBH(sb)) {
199 UDF_SB_LVID(sb)-> 192 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
200 freeSpaceTable[UDF_SB_PARTITION(sb)] = 193 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1);
201 cpu_to_le32(le32_to_cpu
202 (UDF_SB_LVID(sb)->
203 freeSpaceTable[UDF_SB_PARTITION
204 (sb)]) + 1);
205 } 194 }
206 } 195 }
207 } 196 }
@@ -211,7 +200,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
211 count = overflow; 200 count = overflow;
212 goto do_more; 201 goto do_more;
213 } 202 }
214 error_return: 203error_return:
215 sb->s_dirt = 1; 204 sb->s_dirt = 1;
216 if (UDF_SB_LVIDBH(sb)) 205 if (UDF_SB_LVIDBH(sb))
217 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 206 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
@@ -238,7 +227,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
238 if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) 227 if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
239 block_count = UDF_SB_PARTLEN(sb, partition) - first_block; 228 block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
240 229
241 repeat: 230repeat:
242 nr_groups = (UDF_SB_PARTLEN(sb, partition) + 231 nr_groups = (UDF_SB_PARTLEN(sb, partition) +
243 (sizeof(struct spaceBitmapDesc) << 3) + 232 (sizeof(struct spaceBitmapDesc) << 3) +
244 (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8); 233 (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
@@ -254,11 +243,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
254 bit = block % (sb->s_blocksize << 3); 243 bit = block % (sb->s_blocksize << 3);
255 244
256 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 245 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
257 if (!udf_test_bit(bit, bh->b_data)) 246 if (!udf_test_bit(bit, bh->b_data)) {
258 goto out; 247 goto out;
259 else if (DQUOT_PREALLOC_BLOCK(inode, 1)) 248 } else if (DQUOT_PREALLOC_BLOCK(inode, 1)) {
260 goto out; 249 goto out;
261 else if (!udf_clear_bit(bit, bh->b_data)) { 250 } else if (!udf_clear_bit(bit, bh->b_data)) {
262 udf_debug("bit already cleared for block %d\n", bit); 251 udf_debug("bit already cleared for block %d\n", bit);
263 DQUOT_FREE_BLOCK(inode, 1); 252 DQUOT_FREE_BLOCK(inode, 1);
264 goto out; 253 goto out;
@@ -271,12 +260,10 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
271 mark_buffer_dirty(bh); 260 mark_buffer_dirty(bh);
272 if (block_count > 0) 261 if (block_count > 0)
273 goto repeat; 262 goto repeat;
274 out: 263out:
275 if (UDF_SB_LVIDBH(sb)) { 264 if (UDF_SB_LVIDBH(sb)) {
276 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 265 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
277 cpu_to_le32(le32_to_cpu 266 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
278 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
279 alloc_count);
280 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 267 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
281 } 268 }
282 sb->s_dirt = 1; 269 sb->s_dirt = 1;
@@ -299,7 +286,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
299 *err = -ENOSPC; 286 *err = -ENOSPC;
300 mutex_lock(&sbi->s_alloc_mutex); 287 mutex_lock(&sbi->s_alloc_mutex);
301 288
302 repeat: 289repeat:
303 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 290 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
304 goal = 0; 291 goal = 0;
305 292
@@ -312,31 +299,27 @@ static int udf_bitmap_new_block(struct super_block *sb,
312 if (bitmap_nr < 0) 299 if (bitmap_nr < 0)
313 goto error_return; 300 goto error_return;
314 bh = bitmap->s_block_bitmap[bitmap_nr]; 301 bh = bitmap->s_block_bitmap[bitmap_nr];
315 ptr = 302 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
316 memscan((char *)bh->b_data + group_start, 0xFF, 303 sb->s_blocksize - group_start);
317 sb->s_blocksize - group_start);
318 304
319 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 305 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
320 bit = block % (sb->s_blocksize << 3); 306 bit = block % (sb->s_blocksize << 3);
321 307 if (udf_test_bit(bit, bh->b_data))
322 if (udf_test_bit(bit, bh->b_data)) {
323 goto got_block; 308 goto got_block;
324 } 309
325 end_goal = (bit + 63) & ~63; 310 end_goal = (bit + 63) & ~63;
326 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); 311 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
327 if (bit < end_goal) 312 if (bit < end_goal)
328 goto got_block; 313 goto got_block;
329 ptr = 314
330 memscan((char *)bh->b_data + (bit >> 3), 0xFF, 315 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3));
331 sb->s_blocksize - ((bit + 7) >> 3));
332 newbit = (ptr - ((char *)bh->b_data)) << 3; 316 newbit = (ptr - ((char *)bh->b_data)) << 3;
333 if (newbit < sb->s_blocksize << 3) { 317 if (newbit < sb->s_blocksize << 3) {
334 bit = newbit; 318 bit = newbit;
335 goto search_back; 319 goto search_back;
336 } 320 }
337 newbit = 321
338 udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, 322 newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit);
339 bit);
340 if (newbit < sb->s_blocksize << 3) { 323 if (newbit < sb->s_blocksize << 3) {
341 bit = newbit; 324 bit = newbit;
342 goto got_block; 325 goto got_block;
@@ -354,18 +337,16 @@ static int udf_bitmap_new_block(struct super_block *sb,
354 goto error_return; 337 goto error_return;
355 bh = bitmap->s_block_bitmap[bitmap_nr]; 338 bh = bitmap->s_block_bitmap[bitmap_nr];
356 if (i < nr_groups) { 339 if (i < nr_groups) {
357 ptr = 340 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
358 memscan((char *)bh->b_data + group_start, 0xFF, 341 sb->s_blocksize - group_start);
359 sb->s_blocksize - group_start);
360 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 342 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
361 bit = (ptr - ((char *)bh->b_data)) << 3; 343 bit = (ptr - ((char *)bh->b_data)) << 3;
362 break; 344 break;
363 } 345 }
364 } else { 346 } else {
365 bit = 347 bit = udf_find_next_one_bit((char *)bh->b_data,
366 udf_find_next_one_bit((char *)bh->b_data, 348 sb->s_blocksize << 3,
367 sb->s_blocksize << 3, 349 group_start << 3);
368 group_start << 3);
369 if (bit < sb->s_blocksize << 3) 350 if (bit < sb->s_blocksize << 3)
370 break; 351 break;
371 } 352 }
@@ -377,20 +358,17 @@ static int udf_bitmap_new_block(struct super_block *sb,
377 if (bit < sb->s_blocksize << 3) 358 if (bit < sb->s_blocksize << 3)
378 goto search_back; 359 goto search_back;
379 else 360 else
380 bit = 361 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
381 udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
382 group_start << 3);
383 if (bit >= sb->s_blocksize << 3) { 362 if (bit >= sb->s_blocksize << 3) {
384 mutex_unlock(&sbi->s_alloc_mutex); 363 mutex_unlock(&sbi->s_alloc_mutex);
385 return 0; 364 return 0;
386 } 365 }
387 366
388 search_back: 367search_back:
389 for (i = 0; 368 for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--)
390 i < 7 && bit > (group_start << 3) 369 ; /* empty loop */
391 && udf_test_bit(bit - 1, bh->b_data); i++, bit--) ;
392 370
393 got_block: 371got_block:
394 372
395 /* 373 /*
396 * Check quota for allocation of this block. 374 * Check quota for allocation of this block.
@@ -402,7 +380,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
402 } 380 }
403 381
404 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - 382 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
405 (sizeof(struct spaceBitmapDesc) << 3); 383 (sizeof(struct spaceBitmapDesc) << 3);
406 384
407 if (!udf_clear_bit(bit, bh->b_data)) { 385 if (!udf_clear_bit(bit, bh->b_data)) {
408 udf_debug("bit already cleared for block %d\n", bit); 386 udf_debug("bit already cleared for block %d\n", bit);
@@ -413,9 +391,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
413 391
414 if (UDF_SB_LVIDBH(sb)) { 392 if (UDF_SB_LVIDBH(sb)) {
415 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 393 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
416 cpu_to_le32(le32_to_cpu 394 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
417 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
418 1);
419 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 395 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
420 } 396 }
421 sb->s_dirt = 1; 397 sb->s_dirt = 1;
@@ -423,7 +399,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
423 *err = 0; 399 *err = 0;
424 return newblock; 400 return newblock;
425 401
426 error_return: 402error_return:
427 *err = -EIO; 403 *err = -EIO;
428 mutex_unlock(&sbi->s_alloc_mutex); 404 mutex_unlock(&sbi->s_alloc_mutex);
429 return 0; 405 return 0;
@@ -445,14 +421,10 @@ static void udf_table_free_blocks(struct super_block *sb,
445 421
446 mutex_lock(&sbi->s_alloc_mutex); 422 mutex_lock(&sbi->s_alloc_mutex);
447 if (bloc.logicalBlockNum < 0 || 423 if (bloc.logicalBlockNum < 0 ||
448 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, 424 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
449 bloc. 425 udf_debug("%d < %d || %d + %d > %d\n",
450 partitionReferenceNum)) 426 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
451 { 427 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
452 udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
453 bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
454 bloc.
455 partitionReferenceNum));
456 goto error_return; 428 goto error_return;
457 } 429 }
458 430
@@ -462,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
462 DQUOT_FREE_BLOCK(inode, count); 434 DQUOT_FREE_BLOCK(inode, count);
463 if (UDF_SB_LVIDBH(sb)) { 435 if (UDF_SB_LVIDBH(sb)) {
464 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = 436 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
465 cpu_to_le32(le32_to_cpu 437 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
466 (UDF_SB_LVID(sb)->
467 freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
468 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 438 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
469 } 439 }
470 440
@@ -476,47 +446,28 @@ static void udf_table_free_blocks(struct super_block *sb,
476 epos.block = oepos.block = UDF_I_LOCATION(table); 446 epos.block = oepos.block = UDF_I_LOCATION(table);
477 epos.bh = oepos.bh = NULL; 447 epos.bh = oepos.bh = NULL;
478 448
479 while (count && (etype = 449 while (count &&
480 udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 450 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
481 if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == 451 if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) {
482 start)) { 452 if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
483 if ((0x3FFFFFFF - elen) < 453 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
484 (count << sb->s_blocksize_bits)) { 454 start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
485 count -= 455 elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
486 ((0x3FFFFFFF -
487 elen) >> sb->s_blocksize_bits);
488 start +=
489 ((0x3FFFFFFF -
490 elen) >> sb->s_blocksize_bits);
491 elen =
492 (etype << 30) | (0x40000000 -
493 sb->s_blocksize);
494 } else { 456 } else {
495 elen = (etype << 30) | 457 elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
496 (elen + (count << sb->s_blocksize_bits));
497 start += count; 458 start += count;
498 count = 0; 459 count = 0;
499 } 460 }
500 udf_write_aext(table, &oepos, eloc, elen, 1); 461 udf_write_aext(table, &oepos, eloc, elen, 1);
501 } else if (eloc.logicalBlockNum == (end + 1)) { 462 } else if (eloc.logicalBlockNum == (end + 1)) {
502 if ((0x3FFFFFFF - elen) < 463 if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) {
503 (count << sb->s_blocksize_bits)) { 464 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
504 count -= 465 end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
505 ((0x3FFFFFFF - 466 eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits);
506 elen) >> sb->s_blocksize_bits); 467 elen = (etype << 30) | (0x40000000 - sb->s_blocksize);
507 end -=
508 ((0x3FFFFFFF -
509 elen) >> sb->s_blocksize_bits);
510 eloc.logicalBlockNum -=
511 ((0x3FFFFFFF -
512 elen) >> sb->s_blocksize_bits);
513 elen =
514 (etype << 30) | (0x40000000 -
515 sb->s_blocksize);
516 } else { 468 } else {
517 eloc.logicalBlockNum = start; 469 eloc.logicalBlockNum = start;
518 elen = (etype << 30) | 470 elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits));
519 (elen + (count << sb->s_blocksize_bits));
520 end -= count; 471 end -= count;
521 count = 0; 472 count = 0;
522 } 473 }
@@ -530,21 +481,23 @@ static void udf_table_free_blocks(struct super_block *sb,
530 get_bh(epos.bh); 481 get_bh(epos.bh);
531 oepos.bh = epos.bh; 482 oepos.bh = epos.bh;
532 oepos.offset = 0; 483 oepos.offset = 0;
533 } else 484 } else {
534 oepos.offset = epos.offset; 485 oepos.offset = epos.offset;
486 }
535 } 487 }
536 488
537 if (count) { 489 if (count) {
538 /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate 490 /*
539 a new block, and since we hold the super block lock already 491 * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
540 very bad things would happen :) 492 * a new block, and since we hold the super block lock already
541 493 * very bad things would happen :)
542 We copy the behavior of udf_add_aext, but instead of 494 *
543 trying to allocate a new block close to the existing one, 495 * We copy the behavior of udf_add_aext, but instead of
544 we just steal a block from the extent we are trying to add. 496 * trying to allocate a new block close to the existing one,
545 497 * we just steal a block from the extent we are trying to add.
546 It would be nice if the blocks were close together, but it 498 *
547 isn't required. 499 * It would be nice if the blocks were close together, but it
500 * isn't required.
548 */ 501 */
549 502
550 int adsize; 503 int adsize;
@@ -553,13 +506,14 @@ static void udf_table_free_blocks(struct super_block *sb,
553 struct allocExtDesc *aed; 506 struct allocExtDesc *aed;
554 507
555 eloc.logicalBlockNum = start; 508 eloc.logicalBlockNum = start;
556 elen = EXT_RECORDED_ALLOCATED | (count << sb->s_blocksize_bits); 509 elen = EXT_RECORDED_ALLOCATED |
510 (count << sb->s_blocksize_bits);
557 511
558 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) 512 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) {
559 adsize = sizeof(short_ad); 513 adsize = sizeof(short_ad);
560 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) 514 } else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) {
561 adsize = sizeof(long_ad); 515 adsize = sizeof(long_ad);
562 else { 516 } else {
563 brelse(oepos.bh); 517 brelse(oepos.bh);
564 brelse(epos.bh); 518 brelse(epos.bh);
565 goto error_return; 519 goto error_return;
@@ -577,28 +531,21 @@ static void udf_table_free_blocks(struct super_block *sb,
577 eloc.logicalBlockNum++; 531 eloc.logicalBlockNum++;
578 elen -= sb->s_blocksize; 532 elen -= sb->s_blocksize;
579 533
580 if (!(epos.bh = udf_tread(sb, 534 if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) {
581 udf_get_lb_pblock(sb,
582 epos.block,
583 0)))) {
584 brelse(oepos.bh); 535 brelse(oepos.bh);
585 goto error_return; 536 goto error_return;
586 } 537 }
587 aed = (struct allocExtDesc *)(epos.bh->b_data); 538 aed = (struct allocExtDesc *)(epos.bh->b_data);
588 aed->previousAllocExtLocation = 539 aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum);
589 cpu_to_le32(oepos.block.logicalBlockNum);
590 if (epos.offset + adsize > sb->s_blocksize) { 540 if (epos.offset + adsize > sb->s_blocksize) {
591 loffset = epos.offset; 541 loffset = epos.offset;
592 aed->lengthAllocDescs = cpu_to_le32(adsize); 542 aed->lengthAllocDescs = cpu_to_le32(adsize);
593 sptr = UDF_I_DATA(inode) + epos.offset - 543 sptr = UDF_I_DATA(inode) + epos.offset -
594 udf_file_entry_alloc_offset(inode) + 544 udf_file_entry_alloc_offset(inode) +
595 UDF_I_LENEATTR(inode) - adsize; 545 UDF_I_LENEATTR(inode) - adsize;
596 dptr = 546 dptr = epos.bh->b_data + sizeof(struct allocExtDesc);
597 epos.bh->b_data +
598 sizeof(struct allocExtDesc);
599 memcpy(dptr, sptr, adsize); 547 memcpy(dptr, sptr, adsize);
600 epos.offset = 548 epos.offset = sizeof(struct allocExtDesc) + adsize;
601 sizeof(struct allocExtDesc) + adsize;
602 } else { 549 } else {
603 loffset = epos.offset + adsize; 550 loffset = epos.offset + adsize;
604 aed->lengthAllocDescs = cpu_to_le32(0); 551 aed->lengthAllocDescs = cpu_to_le32(0);
@@ -606,60 +553,46 @@ static void udf_table_free_blocks(struct super_block *sb,
606 epos.offset = sizeof(struct allocExtDesc); 553 epos.offset = sizeof(struct allocExtDesc);
607 554
608 if (oepos.bh) { 555 if (oepos.bh) {
609 aed = 556 aed = (struct allocExtDesc *)oepos.bh->b_data;
610 (struct allocExtDesc *)oepos.bh->
611 b_data;
612 aed->lengthAllocDescs = 557 aed->lengthAllocDescs =
613 cpu_to_le32(le32_to_cpu 558 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
614 (aed->
615 lengthAllocDescs) +
616 adsize);
617 } else { 559 } else {
618 UDF_I_LENALLOC(table) += adsize; 560 UDF_I_LENALLOC(table) += adsize;
619 mark_inode_dirty(table); 561 mark_inode_dirty(table);
620 } 562 }
621 } 563 }
622 if (UDF_SB_UDFREV(sb) >= 0x0200) 564 if (UDF_SB_UDFREV(sb) >= 0x0200)
623 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 565 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
624 1, epos.block.logicalBlockNum, 566 epos.block.logicalBlockNum, sizeof(tag));
625 sizeof(tag));
626 else 567 else
627 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 568 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1,
628 1, epos.block.logicalBlockNum, 569 epos.block.logicalBlockNum, sizeof(tag));
629 sizeof(tag)); 570
630 switch (UDF_I_ALLOCTYPE(table)) { 571 switch (UDF_I_ALLOCTYPE(table)) {
631 case ICBTAG_FLAG_AD_SHORT: 572 case ICBTAG_FLAG_AD_SHORT:
632 { 573 sad = (short_ad *)sptr;
633 sad = (short_ad *) sptr; 574 sad->extLength = cpu_to_le32(
634 sad->extLength = 575 EXT_NEXT_EXTENT_ALLOCDECS |
635 cpu_to_le32 576 sb->s_blocksize);
636 (EXT_NEXT_EXTENT_ALLOCDECS | sb-> 577 sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum);
637 s_blocksize);
638 sad->extPosition =
639 cpu_to_le32(epos.block.
640 logicalBlockNum);
641 break; 578 break;
642 } 579 case ICBTAG_FLAG_AD_LONG:
643 case ICBTAG_FLAG_AD_LONG: 580 lad = (long_ad *)sptr;
644 { 581 lad->extLength = cpu_to_le32(
645 lad = (long_ad *) sptr; 582 EXT_NEXT_EXTENT_ALLOCDECS |
646 lad->extLength = 583 sb->s_blocksize);
647 cpu_to_le32 584 lad->extLocation = cpu_to_lelb(epos.block);
648 (EXT_NEXT_EXTENT_ALLOCDECS | sb->
649 s_blocksize);
650 lad->extLocation =
651 cpu_to_lelb(epos.block);
652 break; 585 break;
653 }
654 } 586 }
655 if (oepos.bh) { 587 if (oepos.bh) {
656 udf_update_tag(oepos.bh->b_data, loffset); 588 udf_update_tag(oepos.bh->b_data, loffset);
657 mark_buffer_dirty(oepos.bh); 589 mark_buffer_dirty(oepos.bh);
658 } else 590 } else {
659 mark_inode_dirty(table); 591 mark_inode_dirty(table);
592 }
660 } 593 }
661 594
662 if (elen) { /* It's possible that stealing the block emptied the extent */ 595 if (elen) { /* It's possible that stealing the block emptied the extent */
663 udf_write_aext(table, &epos, eloc, elen, 1); 596 udf_write_aext(table, &epos, eloc, elen, 1);
664 597
665 if (!epos.bh) { 598 if (!epos.bh) {
@@ -668,9 +601,7 @@ static void udf_table_free_blocks(struct super_block *sb,
668 } else { 601 } else {
669 aed = (struct allocExtDesc *)epos.bh->b_data; 602 aed = (struct allocExtDesc *)epos.bh->b_data;
670 aed->lengthAllocDescs = 603 aed->lengthAllocDescs =
671 cpu_to_le32(le32_to_cpu 604 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
672 (aed->lengthAllocDescs) +
673 adsize);
674 udf_update_tag(epos.bh->b_data, epos.offset); 605 udf_update_tag(epos.bh->b_data, epos.offset);
675 mark_buffer_dirty(epos.bh); 606 mark_buffer_dirty(epos.bh);
676 } 607 }
@@ -680,7 +611,7 @@ static void udf_table_free_blocks(struct super_block *sb,
680 brelse(epos.bh); 611 brelse(epos.bh);
681 brelse(oepos.bh); 612 brelse(oepos.bh);
682 613
683 error_return: 614error_return:
684 sb->s_dirt = 1; 615 sb->s_dirt = 1;
685 mutex_unlock(&sbi->s_alloc_mutex); 616 mutex_unlock(&sbi->s_alloc_mutex);
686 return; 617 return;
@@ -714,47 +645,36 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
714 epos.bh = NULL; 645 epos.bh = NULL;
715 eloc.logicalBlockNum = 0xFFFFFFFF; 646 eloc.logicalBlockNum = 0xFFFFFFFF;
716 647
717 while (first_block != eloc.logicalBlockNum && (etype = 648 while (first_block != eloc.logicalBlockNum &&
718 udf_next_aext(table, 649 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
719 &epos,
720 &eloc,
721 &elen,
722 1)) !=
723 -1) {
724 udf_debug("eloc=%d, elen=%d, first_block=%d\n", 650 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
725 eloc.logicalBlockNum, elen, first_block); 651 eloc.logicalBlockNum, elen, first_block);
726 ; /* empty loop body */ 652 ; /* empty loop body */
727 } 653 }
728 654
729 if (first_block == eloc.logicalBlockNum) { 655 if (first_block == eloc.logicalBlockNum) {
730 epos.offset -= adsize; 656 epos.offset -= adsize;
731 657
732 alloc_count = (elen >> sb->s_blocksize_bits); 658 alloc_count = (elen >> sb->s_blocksize_bits);
733 if (inode 659 if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) {
734 && DQUOT_PREALLOC_BLOCK(inode,
735 alloc_count >
736 block_count ? block_count :
737 alloc_count))
738 alloc_count = 0; 660 alloc_count = 0;
739 else if (alloc_count > block_count) { 661 } else if (alloc_count > block_count) {
740 alloc_count = block_count; 662 alloc_count = block_count;
741 eloc.logicalBlockNum += alloc_count; 663 eloc.logicalBlockNum += alloc_count;
742 elen -= (alloc_count << sb->s_blocksize_bits); 664 elen -= (alloc_count << sb->s_blocksize_bits);
743 udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 665 udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1);
744 1); 666 } else {
745 } else 667 udf_delete_aext(table, epos, eloc, (etype << 30) | elen);
746 udf_delete_aext(table, epos, eloc, 668 }
747 (etype << 30) | elen); 669 } else {
748 } else
749 alloc_count = 0; 670 alloc_count = 0;
671 }
750 672
751 brelse(epos.bh); 673 brelse(epos.bh);
752 674
753 if (alloc_count && UDF_SB_LVIDBH(sb)) { 675 if (alloc_count && UDF_SB_LVIDBH(sb)) {
754 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 676 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
755 cpu_to_le32(le32_to_cpu 677 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count);
756 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
757 alloc_count);
758 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 678 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
759 sb->s_dirt = 1; 679 sb->s_dirt = 1;
760 } 680 }
@@ -797,18 +717,17 @@ static int udf_table_new_block(struct super_block *sb,
797 epos.block = UDF_I_LOCATION(table); 717 epos.block = UDF_I_LOCATION(table);
798 epos.bh = goal_epos.bh = NULL; 718 epos.bh = goal_epos.bh = NULL;
799 719
800 while (spread && (etype = 720 while (spread &&
801 udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 721 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
802 if (goal >= eloc.logicalBlockNum) { 722 if (goal >= eloc.logicalBlockNum) {
803 if (goal < 723 if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits))
804 eloc.logicalBlockNum +
805 (elen >> sb->s_blocksize_bits))
806 nspread = 0; 724 nspread = 0;
807 else 725 else
808 nspread = goal - eloc.logicalBlockNum - 726 nspread = goal - eloc.logicalBlockNum -
809 (elen >> sb->s_blocksize_bits); 727 (elen >> sb->s_blocksize_bits);
810 } else 728 } else {
811 nspread = eloc.logicalBlockNum - goal; 729 nspread = eloc.logicalBlockNum - goal;
730 }
812 731
813 if (nspread < spread) { 732 if (nspread < spread) {
814 spread = nspread; 733 spread = nspread;
@@ -856,9 +775,7 @@ static int udf_table_new_block(struct super_block *sb,
856 775
857 if (UDF_SB_LVIDBH(sb)) { 776 if (UDF_SB_LVIDBH(sb)) {
858 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 777 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
859 cpu_to_le32(le32_to_cpu 778 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1);
860 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
861 1);
862 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 779 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
863 } 780 }
864 781
@@ -877,27 +794,23 @@ inline void udf_free_blocks(struct super_block *sb,
877 794
878 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 795 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
879 return udf_bitmap_free_blocks(sb, inode, 796 return udf_bitmap_free_blocks(sb, inode,
880 UDF_SB_PARTMAPS(sb)[partition]. 797 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
881 s_uspace.s_bitmap, bloc, offset, 798 bloc, offset, count);
882 count); 799 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
883 } else if (UDF_SB_PARTFLAGS(sb, partition) &
884 UDF_PART_FLAG_UNALLOC_TABLE) {
885 return udf_table_free_blocks(sb, inode, 800 return udf_table_free_blocks(sb, inode,
886 UDF_SB_PARTMAPS(sb)[partition]. 801 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
887 s_uspace.s_table, bloc, offset, 802 bloc, offset, count);
888 count);
889 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 803 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
890 return udf_bitmap_free_blocks(sb, inode, 804 return udf_bitmap_free_blocks(sb, inode,
891 UDF_SB_PARTMAPS(sb)[partition]. 805 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
892 s_fspace.s_bitmap, bloc, offset, 806 bloc, offset, count);
893 count);
894 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 807 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
895 return udf_table_free_blocks(sb, inode, 808 return udf_table_free_blocks(sb, inode,
896 UDF_SB_PARTMAPS(sb)[partition]. 809 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
897 s_fspace.s_table, bloc, offset, 810 bloc, offset, count);
898 count); 811 } else {
899 } else
900 return; 812 return;
813 }
901} 814}
902 815
903inline int udf_prealloc_blocks(struct super_block *sb, 816inline int udf_prealloc_blocks(struct super_block *sb,
@@ -907,29 +820,23 @@ inline int udf_prealloc_blocks(struct super_block *sb,
907{ 820{
908 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 821 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
909 return udf_bitmap_prealloc_blocks(sb, inode, 822 return udf_bitmap_prealloc_blocks(sb, inode,
910 UDF_SB_PARTMAPS(sb) 823 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
911 [partition].s_uspace.s_bitmap, 824 partition, first_block, block_count);
912 partition, first_block, 825 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
913 block_count);
914 } else if (UDF_SB_PARTFLAGS(sb, partition) &
915 UDF_PART_FLAG_UNALLOC_TABLE) {
916 return udf_table_prealloc_blocks(sb, inode, 826 return udf_table_prealloc_blocks(sb, inode,
917 UDF_SB_PARTMAPS(sb)[partition]. 827 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
918 s_uspace.s_table, partition, 828 partition, first_block, block_count);
919 first_block, block_count);
920 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 829 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
921 return udf_bitmap_prealloc_blocks(sb, inode, 830 return udf_bitmap_prealloc_blocks(sb, inode,
922 UDF_SB_PARTMAPS(sb) 831 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
923 [partition].s_fspace.s_bitmap, 832 partition, first_block, block_count);
924 partition, first_block,
925 block_count);
926 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 833 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
927 return udf_table_prealloc_blocks(sb, inode, 834 return udf_table_prealloc_blocks(sb, inode,
928 UDF_SB_PARTMAPS(sb)[partition]. 835 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
929 s_fspace.s_table, partition, 836 partition, first_block, block_count);
930 first_block, block_count); 837 } else {
931 } else
932 return 0; 838 return 0;
839 }
933} 840}
934 841
935inline int udf_new_block(struct super_block *sb, 842inline int udf_new_block(struct super_block *sb,
@@ -940,26 +847,21 @@ inline int udf_new_block(struct super_block *sb,
940 847
941 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 848 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
942 ret = udf_bitmap_new_block(sb, inode, 849 ret = udf_bitmap_new_block(sb, inode,
943 UDF_SB_PARTMAPS(sb)[partition]. 850 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap,
944 s_uspace.s_bitmap, partition, goal, 851 partition, goal, err);
945 err);
946 return ret; 852 return ret;
947 } else if (UDF_SB_PARTFLAGS(sb, partition) & 853 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) {
948 UDF_PART_FLAG_UNALLOC_TABLE) {
949 return udf_table_new_block(sb, inode, 854 return udf_table_new_block(sb, inode,
950 UDF_SB_PARTMAPS(sb)[partition]. 855 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table,
951 s_uspace.s_table, partition, goal, 856 partition, goal, err);
952 err);
953 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 857 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
954 return udf_bitmap_new_block(sb, inode, 858 return udf_bitmap_new_block(sb, inode,
955 UDF_SB_PARTMAPS(sb)[partition]. 859 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap,
956 s_fspace.s_bitmap, partition, goal, 860 partition, goal, err);
957 err);
958 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 861 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
959 return udf_table_new_block(sb, inode, 862 return udf_table_new_block(sb, inode,
960 UDF_SB_PARTMAPS(sb)[partition]. 863 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table,
961 s_fspace.s_table, partition, goal, 864 partition, goal, err);
962 err);
963 } else { 865 } else {
964 *err = -EIO; 866 *err = -EIO;
965 return 0; 867 return 0;