diff options
author | Marcin Slusarz <marcin.slusarz@gmail.com> | 2008-02-08 07:20:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-08 12:22:35 -0500 |
commit | 4b11111aba6c80cc2969fd1806d2a869bfc9f357 (patch) | |
tree | 57b72a1ca58859dc6a9e47d647f29cebb9bcf7e0 /fs/udf/balloc.c | |
parent | bd45a420f93d18c91115f3f0568dd6a2555aa15a (diff) |
udf: fix coding style
fix coding style errors found by checkpatch:
- assignments in if conditions
- braces {} around single statement blocks
- no spaces after commas
- printks without KERN_*
- lines longer than 80 characters
- spaces between "type *" and variable name
before: 192 errors, 561 warnings, 8987 lines checked
after: 1 errors, 38 warnings, 9468 lines checked
Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/udf/balloc.c')
-rw-r--r-- | fs/udf/balloc.c | 231 |
1 files changed, 139 insertions, 92 deletions
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 3f67d9dc8631..dc9f8a96b6e4 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c | |||
@@ -28,15 +28,16 @@ | |||
28 | #include "udf_i.h" | 28 | #include "udf_i.h" |
29 | #include "udf_sb.h" | 29 | #include "udf_sb.h" |
30 | 30 | ||
31 | #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | 31 | #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr) |
32 | #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr) | 32 | #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr) |
33 | #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr) | 33 | #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr) |
34 | #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size) | 34 | #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size) |
35 | #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset) | 35 | #define udf_find_next_one_bit(addr, size, offset) \ |
36 | find_next_one_bit(addr, size, offset) | ||
36 | 37 | ||
37 | #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x) | 38 | #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x) |
38 | #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y) | 39 | #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y) |
39 | #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y)) | 40 | #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y)) |
40 | #define uintBPL_t uint(BITS_PER_LONG) | 41 | #define uintBPL_t uint(BITS_PER_LONG) |
41 | #define uint(x) xuint(x) | 42 | #define uint(x) xuint(x) |
42 | #define xuint(x) __le ## x | 43 | #define xuint(x) __le ## x |
@@ -62,7 +63,8 @@ static inline int find_next_one_bit(void *addr, int size, int offset) | |||
62 | result += BITS_PER_LONG; | 63 | result += BITS_PER_LONG; |
63 | } | 64 | } |
64 | while (size & ~(BITS_PER_LONG - 1)) { | 65 | while (size & ~(BITS_PER_LONG - 1)) { |
65 | if ((tmp = leBPL_to_cpup(p++))) | 66 | tmp = leBPL_to_cpup(p++); |
67 | if (tmp) | ||
66 | goto found_middle; | 68 | goto found_middle; |
67 | result += BITS_PER_LONG; | 69 | result += BITS_PER_LONG; |
68 | size -= BITS_PER_LONG; | 70 | size -= BITS_PER_LONG; |
@@ -91,9 +93,9 @@ static int read_block_bitmap(struct super_block *sb, | |||
91 | loc.partitionReferenceNum = UDF_SB(sb)->s_partition; | 93 | loc.partitionReferenceNum = UDF_SB(sb)->s_partition; |
92 | 94 | ||
93 | bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); | 95 | bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); |
94 | if (!bh) { | 96 | if (!bh) |
95 | retval = -EIO; | 97 | retval = -EIO; |
96 | } | 98 | |
97 | bitmap->s_block_bitmap[bitmap_nr] = bh; | 99 | bitmap->s_block_bitmap[bitmap_nr] = bh; |
98 | return retval; | 100 | return retval; |
99 | } | 101 | } |
@@ -155,14 +157,17 @@ static void udf_bitmap_free_blocks(struct super_block *sb, | |||
155 | 157 | ||
156 | mutex_lock(&sbi->s_alloc_mutex); | 158 | mutex_lock(&sbi->s_alloc_mutex); |
157 | if (bloc.logicalBlockNum < 0 || | 159 | if (bloc.logicalBlockNum < 0 || |
158 | (bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) { | 160 | (bloc.logicalBlockNum + count) > |
161 | sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) { | ||
159 | udf_debug("%d < %d || %d + %d > %d\n", | 162 | udf_debug("%d < %d || %d + %d > %d\n", |
160 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, | 163 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, |
161 | sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len); | 164 | sbi->s_partmaps[bloc.partitionReferenceNum]. |
165 | s_partition_len); | ||
162 | goto error_return; | 166 | goto error_return; |
163 | } | 167 | } |
164 | 168 | ||
165 | block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3); | 169 | block = bloc.logicalBlockNum + offset + |
170 | (sizeof(struct spaceBitmapDesc) << 3); | ||
166 | 171 | ||
167 | do_more: | 172 | do_more: |
168 | overflow = 0; | 173 | overflow = 0; |
@@ -184,7 +189,8 @@ do_more: | |||
184 | for (i = 0; i < count; i++) { | 189 | for (i = 0; i < count; i++) { |
185 | if (udf_set_bit(bit + i, bh->b_data)) { | 190 | if (udf_set_bit(bit + i, bh->b_data)) { |
186 | udf_debug("bit %ld already set\n", bit + i); | 191 | udf_debug("bit %ld already set\n", bit + i); |
187 | udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]); | 192 | udf_debug("byte=%2x\n", |
193 | ((char *)bh->b_data)[(bit + i) >> 3]); | ||
188 | } else { | 194 | } else { |
189 | if (inode) | 195 | if (inode) |
190 | DQUOT_FREE_BLOCK(inode, 1); | 196 | DQUOT_FREE_BLOCK(inode, 1); |
@@ -314,14 +320,16 @@ repeat: | |||
314 | if (bit < end_goal) | 320 | if (bit < end_goal) |
315 | goto got_block; | 321 | goto got_block; |
316 | 322 | ||
317 | ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3)); | 323 | ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, |
324 | sb->s_blocksize - ((bit + 7) >> 3)); | ||
318 | newbit = (ptr - ((char *)bh->b_data)) << 3; | 325 | newbit = (ptr - ((char *)bh->b_data)) << 3; |
319 | if (newbit < sb->s_blocksize << 3) { | 326 | if (newbit < sb->s_blocksize << 3) { |
320 | bit = newbit; | 327 | bit = newbit; |
321 | goto search_back; | 328 | goto search_back; |
322 | } | 329 | } |
323 | 330 | ||
324 | newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit); | 331 | newbit = udf_find_next_one_bit(bh->b_data, |
332 | sb->s_blocksize << 3, bit); | ||
325 | if (newbit < sb->s_blocksize << 3) { | 333 | if (newbit < sb->s_blocksize << 3) { |
326 | bit = newbit; | 334 | bit = newbit; |
327 | goto got_block; | 335 | goto got_block; |
@@ -360,15 +368,20 @@ repeat: | |||
360 | if (bit < sb->s_blocksize << 3) | 368 | if (bit < sb->s_blocksize << 3) |
361 | goto search_back; | 369 | goto search_back; |
362 | else | 370 | else |
363 | bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); | 371 | bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, |
372 | group_start << 3); | ||
364 | if (bit >= sb->s_blocksize << 3) { | 373 | if (bit >= sb->s_blocksize << 3) { |
365 | mutex_unlock(&sbi->s_alloc_mutex); | 374 | mutex_unlock(&sbi->s_alloc_mutex); |
366 | return 0; | 375 | return 0; |
367 | } | 376 | } |
368 | 377 | ||
369 | search_back: | 378 | search_back: |
370 | for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--) | 379 | i = 0; |
371 | ; /* empty loop */ | 380 | while (i < 7 && bit > (group_start << 3) && |
381 | udf_test_bit(bit - 1, bh->b_data)) { | ||
382 | ++i; | ||
383 | --bit; | ||
384 | } | ||
372 | 385 | ||
373 | got_block: | 386 | got_block: |
374 | 387 | ||
@@ -424,15 +437,17 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
424 | 437 | ||
425 | mutex_lock(&sbi->s_alloc_mutex); | 438 | mutex_lock(&sbi->s_alloc_mutex); |
426 | if (bloc.logicalBlockNum < 0 || | 439 | if (bloc.logicalBlockNum < 0 || |
427 | (bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) { | 440 | (bloc.logicalBlockNum + count) > |
441 | sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) { | ||
428 | udf_debug("%d < %d || %d + %d > %d\n", | 442 | udf_debug("%d < %d || %d + %d > %d\n", |
429 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, | 443 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, |
430 | sbi->s_partmaps[bloc.partitionReferenceNum]->s_partition_len); | 444 | sbi->s_partmaps[bloc.partitionReferenceNum]. |
445 | s_partition_len); | ||
431 | goto error_return; | 446 | goto error_return; |
432 | } | 447 | } |
433 | 448 | ||
434 | /* We do this up front - There are some error conditions that could occure, | 449 | /* We do this up front - There are some error conditions that |
435 | but.. oh well */ | 450 | could occure, but.. oh well */ |
436 | if (inode) | 451 | if (inode) |
437 | DQUOT_FREE_BLOCK(inode, count); | 452 | DQUOT_FREE_BLOCK(inode, count); |
438 | if (sbi->s_lvid_bh) { | 453 | if (sbi->s_lvid_bh) { |
@@ -452,26 +467,39 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
452 | 467 | ||
453 | while (count && | 468 | while (count && |
454 | (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { | 469 | (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { |
455 | if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) { | 470 | if (((eloc.logicalBlockNum + |
456 | if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { | 471 | (elen >> sb->s_blocksize_bits)) == start)) { |
457 | count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 472 | if ((0x3FFFFFFF - elen) < |
458 | start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 473 | (count << sb->s_blocksize_bits)) { |
459 | elen = (etype << 30) | (0x40000000 - sb->s_blocksize); | 474 | uint32_t tmp = ((0x3FFFFFFF - elen) >> |
475 | sb->s_blocksize_bits); | ||
476 | count -= tmp; | ||
477 | start += tmp; | ||
478 | elen = (etype << 30) | | ||
479 | (0x40000000 - sb->s_blocksize); | ||
460 | } else { | 480 | } else { |
461 | elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); | 481 | elen = (etype << 30) | |
482 | (elen + | ||
483 | (count << sb->s_blocksize_bits)); | ||
462 | start += count; | 484 | start += count; |
463 | count = 0; | 485 | count = 0; |
464 | } | 486 | } |
465 | udf_write_aext(table, &oepos, eloc, elen, 1); | 487 | udf_write_aext(table, &oepos, eloc, elen, 1); |
466 | } else if (eloc.logicalBlockNum == (end + 1)) { | 488 | } else if (eloc.logicalBlockNum == (end + 1)) { |
467 | if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { | 489 | if ((0x3FFFFFFF - elen) < |
468 | count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 490 | (count << sb->s_blocksize_bits)) { |
469 | end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 491 | uint32_t tmp = ((0x3FFFFFFF - elen) >> |
470 | eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 492 | sb->s_blocksize_bits); |
471 | elen = (etype << 30) | (0x40000000 - sb->s_blocksize); | 493 | count -= tmp; |
494 | end -= tmp; | ||
495 | eloc.logicalBlockNum -= tmp; | ||
496 | elen = (etype << 30) | | ||
497 | (0x40000000 - sb->s_blocksize); | ||
472 | } else { | 498 | } else { |
473 | eloc.logicalBlockNum = start; | 499 | eloc.logicalBlockNum = start; |
474 | elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); | 500 | elen = (etype << 30) | |
501 | (elen + | ||
502 | (count << sb->s_blocksize_bits)); | ||
475 | end -= count; | 503 | end -= count; |
476 | count = 0; | 504 | count = 0; |
477 | } | 505 | } |
@@ -492,9 +520,9 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
492 | 520 | ||
493 | if (count) { | 521 | if (count) { |
494 | /* | 522 | /* |
495 | * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate | 523 | * NOTE: we CANNOT use udf_add_aext here, as it can try to |
496 | * a new block, and since we hold the super block lock already | 524 | * allocate a new block, and since we hold the super block |
497 | * very bad things would happen :) | 525 | * lock already very bad things would happen :) |
498 | * | 526 | * |
499 | * We copy the behavior of udf_add_aext, but instead of | 527 | * We copy the behavior of udf_add_aext, but instead of |
500 | * trying to allocate a new block close to the existing one, | 528 | * trying to allocate a new block close to the existing one, |
@@ -535,27 +563,35 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
535 | eloc.logicalBlockNum++; | 563 | eloc.logicalBlockNum++; |
536 | elen -= sb->s_blocksize; | 564 | elen -= sb->s_blocksize; |
537 | 565 | ||
538 | if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) { | 566 | epos.bh = udf_tread(sb, |
567 | udf_get_lb_pblock(sb, epos.block, 0)); | ||
568 | if (!epos.bh) { | ||
539 | brelse(oepos.bh); | 569 | brelse(oepos.bh); |
540 | goto error_return; | 570 | goto error_return; |
541 | } | 571 | } |
542 | aed = (struct allocExtDesc *)(epos.bh->b_data); | 572 | aed = (struct allocExtDesc *)(epos.bh->b_data); |
543 | aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum); | 573 | aed->previousAllocExtLocation = |
574 | cpu_to_le32(oepos.block.logicalBlockNum); | ||
544 | if (epos.offset + adsize > sb->s_blocksize) { | 575 | if (epos.offset + adsize > sb->s_blocksize) { |
545 | loffset = epos.offset; | 576 | loffset = epos.offset; |
546 | aed->lengthAllocDescs = cpu_to_le32(adsize); | 577 | aed->lengthAllocDescs = cpu_to_le32(adsize); |
547 | sptr = UDF_I_DATA(table) + epos.offset - adsize; | 578 | sptr = UDF_I_DATA(table) + epos.offset - adsize; |
548 | dptr = epos.bh->b_data + sizeof(struct allocExtDesc); | 579 | dptr = epos.bh->b_data + |
580 | sizeof(struct allocExtDesc); | ||
549 | memcpy(dptr, sptr, adsize); | 581 | memcpy(dptr, sptr, adsize); |
550 | epos.offset = sizeof(struct allocExtDesc) + adsize; | 582 | epos.offset = sizeof(struct allocExtDesc) + |
583 | adsize; | ||
551 | } else { | 584 | } else { |
552 | loffset = epos.offset + adsize; | 585 | loffset = epos.offset + adsize; |
553 | aed->lengthAllocDescs = cpu_to_le32(0); | 586 | aed->lengthAllocDescs = cpu_to_le32(0); |
554 | if (oepos.bh) { | 587 | if (oepos.bh) { |
555 | sptr = oepos.bh->b_data + epos.offset; | 588 | sptr = oepos.bh->b_data + epos.offset; |
556 | aed = (struct allocExtDesc *)oepos.bh->b_data; | 589 | aed = (struct allocExtDesc *) |
590 | oepos.bh->b_data; | ||
557 | aed->lengthAllocDescs = | 591 | aed->lengthAllocDescs = |
558 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); | 592 | cpu_to_le32(le32_to_cpu( |
593 | aed->lengthAllocDescs) + | ||
594 | adsize); | ||
559 | } else { | 595 | } else { |
560 | sptr = UDF_I_DATA(table) + epos.offset; | 596 | sptr = UDF_I_DATA(table) + epos.offset; |
561 | UDF_I_LENALLOC(table) += adsize; | 597 | UDF_I_LENALLOC(table) += adsize; |
@@ -564,27 +600,31 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
564 | epos.offset = sizeof(struct allocExtDesc); | 600 | epos.offset = sizeof(struct allocExtDesc); |
565 | } | 601 | } |
566 | if (sbi->s_udfrev >= 0x0200) | 602 | if (sbi->s_udfrev >= 0x0200) |
567 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1, | 603 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, |
568 | epos.block.logicalBlockNum, sizeof(tag)); | 604 | 3, 1, epos.block.logicalBlockNum, |
605 | sizeof(tag)); | ||
569 | else | 606 | else |
570 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1, | 607 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, |
571 | epos.block.logicalBlockNum, sizeof(tag)); | 608 | 2, 1, epos.block.logicalBlockNum, |
609 | sizeof(tag)); | ||
572 | 610 | ||
573 | switch (UDF_I_ALLOCTYPE(table)) { | 611 | switch (UDF_I_ALLOCTYPE(table)) { |
574 | case ICBTAG_FLAG_AD_SHORT: | 612 | case ICBTAG_FLAG_AD_SHORT: |
575 | sad = (short_ad *)sptr; | 613 | sad = (short_ad *)sptr; |
576 | sad->extLength = cpu_to_le32( | 614 | sad->extLength = cpu_to_le32( |
577 | EXT_NEXT_EXTENT_ALLOCDECS | | 615 | EXT_NEXT_EXTENT_ALLOCDECS | |
578 | sb->s_blocksize); | 616 | sb->s_blocksize); |
579 | sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum); | 617 | sad->extPosition = |
580 | break; | 618 | cpu_to_le32(epos.block.logicalBlockNum); |
581 | case ICBTAG_FLAG_AD_LONG: | 619 | break; |
582 | lad = (long_ad *)sptr; | 620 | case ICBTAG_FLAG_AD_LONG: |
583 | lad->extLength = cpu_to_le32( | 621 | lad = (long_ad *)sptr; |
584 | EXT_NEXT_EXTENT_ALLOCDECS | | 622 | lad->extLength = cpu_to_le32( |
585 | sb->s_blocksize); | 623 | EXT_NEXT_EXTENT_ALLOCDECS | |
586 | lad->extLocation = cpu_to_lelb(epos.block); | 624 | sb->s_blocksize); |
587 | break; | 625 | lad->extLocation = |
626 | cpu_to_lelb(epos.block); | ||
627 | break; | ||
588 | } | 628 | } |
589 | if (oepos.bh) { | 629 | if (oepos.bh) { |
590 | udf_update_tag(oepos.bh->b_data, loffset); | 630 | udf_update_tag(oepos.bh->b_data, loffset); |
@@ -594,7 +634,8 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
594 | } | 634 | } |
595 | } | 635 | } |
596 | 636 | ||
597 | if (elen) { /* It's possible that stealing the block emptied the extent */ | 637 | /* It's possible that stealing the block emptied the extent */ |
638 | if (elen) { | ||
598 | udf_write_aext(table, &epos, eloc, elen, 1); | 639 | udf_write_aext(table, &epos, eloc, elen, 1); |
599 | 640 | ||
600 | if (!epos.bh) { | 641 | if (!epos.bh) { |
@@ -603,7 +644,8 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
603 | } else { | 644 | } else { |
604 | aed = (struct allocExtDesc *)epos.bh->b_data; | 645 | aed = (struct allocExtDesc *)epos.bh->b_data; |
605 | aed->lengthAllocDescs = | 646 | aed->lengthAllocDescs = |
606 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); | 647 | cpu_to_le32(le32_to_cpu( |
648 | aed->lengthAllocDescs) + adsize); | ||
607 | udf_update_tag(epos.bh->b_data, epos.offset); | 649 | udf_update_tag(epos.bh->b_data, epos.offset); |
608 | mark_buffer_dirty(epos.bh); | 650 | mark_buffer_dirty(epos.bh); |
609 | } | 651 | } |
@@ -631,7 +673,8 @@ static int udf_table_prealloc_blocks(struct super_block *sb, | |||
631 | struct extent_position epos; | 673 | struct extent_position epos; |
632 | int8_t etype = -1; | 674 | int8_t etype = -1; |
633 | 675 | ||
634 | if (first_block < 0 || first_block >= sbi->s_partmaps[partition].s_partition_len) | 676 | if (first_block < 0 || |
677 | first_block >= sbi->s_partmaps[partition].s_partition_len) | ||
635 | return 0; | 678 | return 0; |
636 | 679 | ||
637 | if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) | 680 | if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) |
@@ -658,16 +701,18 @@ static int udf_table_prealloc_blocks(struct super_block *sb, | |||
658 | epos.offset -= adsize; | 701 | epos.offset -= adsize; |
659 | 702 | ||
660 | alloc_count = (elen >> sb->s_blocksize_bits); | 703 | alloc_count = (elen >> sb->s_blocksize_bits); |
661 | if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) { | 704 | if (inode && DQUOT_PREALLOC_BLOCK(inode, |
705 | alloc_count > block_count ? block_count : alloc_count)) | ||
662 | alloc_count = 0; | 706 | alloc_count = 0; |
663 | } else if (alloc_count > block_count) { | 707 | else if (alloc_count > block_count) { |
664 | alloc_count = block_count; | 708 | alloc_count = block_count; |
665 | eloc.logicalBlockNum += alloc_count; | 709 | eloc.logicalBlockNum += alloc_count; |
666 | elen -= (alloc_count << sb->s_blocksize_bits); | 710 | elen -= (alloc_count << sb->s_blocksize_bits); |
667 | udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1); | 711 | udf_write_aext(table, &epos, eloc, |
668 | } else { | 712 | (etype << 30) | elen, 1); |
669 | udf_delete_aext(table, epos, eloc, (etype << 30) | elen); | 713 | } else |
670 | } | 714 | udf_delete_aext(table, epos, eloc, |
715 | (etype << 30) | elen); | ||
671 | } else { | 716 | } else { |
672 | alloc_count = 0; | 717 | alloc_count = 0; |
673 | } | 718 | } |
@@ -711,10 +756,10 @@ static int udf_table_new_block(struct super_block *sb, | |||
711 | if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len) | 756 | if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len) |
712 | goal = 0; | 757 | goal = 0; |
713 | 758 | ||
714 | /* We search for the closest matching block to goal. If we find a exact hit, | 759 | /* We search for the closest matching block to goal. If we find |
715 | we stop. Otherwise we keep going till we run out of extents. | 760 | a exact hit, we stop. Otherwise we keep going till we run out |
716 | We store the buffer_head, bloc, and extoffset of the current closest | 761 | of extents. We store the buffer_head, bloc, and extoffset |
717 | match and use that when we are done. | 762 | of the current closest match and use that when we are done. |
718 | */ | 763 | */ |
719 | epos.offset = sizeof(struct unallocSpaceEntry); | 764 | epos.offset = sizeof(struct unallocSpaceEntry); |
720 | epos.block = UDF_I_LOCATION(table); | 765 | epos.block = UDF_I_LOCATION(table); |
@@ -723,7 +768,8 @@ static int udf_table_new_block(struct super_block *sb, | |||
723 | while (spread && | 768 | while (spread && |
724 | (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { | 769 | (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { |
725 | if (goal >= eloc.logicalBlockNum) { | 770 | if (goal >= eloc.logicalBlockNum) { |
726 | if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) | 771 | if (goal < eloc.logicalBlockNum + |
772 | (elen >> sb->s_blocksize_bits)) | ||
727 | nspread = 0; | 773 | nspread = 0; |
728 | else | 774 | else |
729 | nspread = goal - eloc.logicalBlockNum - | 775 | nspread = goal - eloc.logicalBlockNum - |
@@ -825,52 +871,53 @@ inline int udf_prealloc_blocks(struct super_block *sb, | |||
825 | { | 871 | { |
826 | struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; | 872 | struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; |
827 | 873 | ||
828 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { | 874 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) |
829 | return udf_bitmap_prealloc_blocks(sb, inode, | 875 | return udf_bitmap_prealloc_blocks(sb, inode, |
830 | map->s_uspace.s_bitmap, | 876 | map->s_uspace.s_bitmap, |
831 | partition, first_block, block_count); | 877 | partition, first_block, |
832 | } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { | 878 | block_count); |
879 | else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) | ||
833 | return udf_table_prealloc_blocks(sb, inode, | 880 | return udf_table_prealloc_blocks(sb, inode, |
834 | map->s_uspace.s_table, | 881 | map->s_uspace.s_table, |
835 | partition, first_block, block_count); | 882 | partition, first_block, |
836 | } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { | 883 | block_count); |
884 | else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) | ||
837 | return udf_bitmap_prealloc_blocks(sb, inode, | 885 | return udf_bitmap_prealloc_blocks(sb, inode, |
838 | map->s_fspace.s_bitmap, | 886 | map->s_fspace.s_bitmap, |
839 | partition, first_block, block_count); | 887 | partition, first_block, |
840 | } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { | 888 | block_count); |
889 | else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) | ||
841 | return udf_table_prealloc_blocks(sb, inode, | 890 | return udf_table_prealloc_blocks(sb, inode, |
842 | map->s_fspace.s_table, | 891 | map->s_fspace.s_table, |
843 | partition, first_block, block_count); | 892 | partition, first_block, |
844 | } else { | 893 | block_count); |
894 | else | ||
845 | return 0; | 895 | return 0; |
846 | } | ||
847 | } | 896 | } |
848 | 897 | ||
849 | inline int udf_new_block(struct super_block *sb, | 898 | inline int udf_new_block(struct super_block *sb, |
850 | struct inode *inode, | 899 | struct inode *inode, |
851 | uint16_t partition, uint32_t goal, int *err) | 900 | uint16_t partition, uint32_t goal, int *err) |
852 | { | 901 | { |
853 | int ret; | ||
854 | struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; | 902 | struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; |
855 | 903 | ||
856 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { | 904 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) |
857 | ret = udf_bitmap_new_block(sb, inode, | 905 | return udf_bitmap_new_block(sb, inode, |
858 | map->s_uspace.s_bitmap, | 906 | map->s_uspace.s_bitmap, |
859 | partition, goal, err); | 907 | partition, goal, err); |
860 | return ret; | 908 | else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) |
861 | } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { | ||
862 | return udf_table_new_block(sb, inode, | 909 | return udf_table_new_block(sb, inode, |
863 | map->s_uspace.s_table, | 910 | map->s_uspace.s_table, |
864 | partition, goal, err); | 911 | partition, goal, err); |
865 | } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { | 912 | else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) |
866 | return udf_bitmap_new_block(sb, inode, | 913 | return udf_bitmap_new_block(sb, inode, |
867 | map->s_fspace.s_bitmap, | 914 | map->s_fspace.s_bitmap, |
868 | partition, goal, err); | 915 | partition, goal, err); |
869 | } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { | 916 | else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) |
870 | return udf_table_new_block(sb, inode, | 917 | return udf_table_new_block(sb, inode, |
871 | map->s_fspace.s_table, | 918 | map->s_fspace.s_table, |
872 | partition, goal, err); | 919 | partition, goal, err); |
873 | } else { | 920 | else { |
874 | *err = -EIO; | 921 | *err = -EIO; |
875 | return 0; | 922 | return 0; |
876 | } | 923 | } |