diff options
author | Evgeniy Dushistov <dushistov@mail.ru> | 2006-06-25 08:47:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-25 13:01:02 -0400 |
commit | 9695ef16ed4e00b59303f39f9a4a422a2c6a3b89 (patch) | |
tree | fba8946e86a523a5d53936cf5ec9e0a150037f73 /fs/ufs/balloc.c | |
parent | b71034e5e67d1577424cebe7bbb7d0ce134a4cd8 (diff) |
[PATCH] ufs: wrong type cast
There are two ugly macros in ufs code:
#define UCPI_UBH ((struct ufs_buffer_head *)ucpi)
#define USPI_UBH ((struct ufs_buffer_head *)uspi)
when uspi looks like
struct {
struct ufs_buffer_head ;
}
and USPI_UBH has some sence,
ucpi looks like
struct {
struct not_ufs_buffer_head;
}
To prevent bugs in future, this patch convert macros to inline function and
fix "ucpi" structure.
Signed-off-by: Evgeniy Dushistov <dushistov@mail.ru>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ufs/balloc.c')
-rw-r--r-- | fs/ufs/balloc.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 06f970d02e3d..68de1312e4b6 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -69,7 +69,7 @@ void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count) | |||
69 | ucpi = ufs_load_cylinder (sb, cgno); | 69 | ucpi = ufs_load_cylinder (sb, cgno); |
70 | if (!ucpi) | 70 | if (!ucpi) |
71 | goto failed; | 71 | goto failed; |
72 | ucg = ubh_get_ucg (UCPI_UBH); | 72 | ucg = ubh_get_ucg (UCPI_UBH(ucpi)); |
73 | if (!ufs_cg_chkmagic(sb, ucg)) { | 73 | if (!ufs_cg_chkmagic(sb, ucg)) { |
74 | ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); | 74 | ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); |
75 | goto failed; | 75 | goto failed; |
@@ -77,11 +77,11 @@ void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count) | |||
77 | 77 | ||
78 | end_bit = bit + count; | 78 | end_bit = bit + count; |
79 | bbase = ufs_blknum (bit); | 79 | bbase = ufs_blknum (bit); |
80 | blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); | 80 | blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); |
81 | ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); | 81 | ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); |
82 | for (i = bit; i < end_bit; i++) { | 82 | for (i = bit; i < end_bit; i++) { |
83 | if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i)) | 83 | if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i)) |
84 | ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i); | 84 | ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i); |
85 | else | 85 | else |
86 | ufs_error (sb, "ufs_free_fragments", | 86 | ufs_error (sb, "ufs_free_fragments", |
87 | "bit already cleared for fragment %u", i); | 87 | "bit already cleared for fragment %u", i); |
@@ -93,14 +93,14 @@ void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count) | |||
93 | fs32_add(sb, &ucg->cg_cs.cs_nffree, count); | 93 | fs32_add(sb, &ucg->cg_cs.cs_nffree, count); |
94 | fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count); | 94 | fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count); |
95 | fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); | 95 | fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); |
96 | blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); | 96 | blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); |
97 | ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); | 97 | ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * Trying to reassemble free fragments into block | 100 | * Trying to reassemble free fragments into block |
101 | */ | 101 | */ |
102 | blkno = ufs_fragstoblks (bbase); | 102 | blkno = ufs_fragstoblks (bbase); |
103 | if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { | 103 | if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { |
104 | fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); | 104 | fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); |
105 | fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb); | 105 | fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb); |
106 | fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); | 106 | fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); |
@@ -114,11 +114,11 @@ void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count) | |||
114 | fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); | 114 | fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); |
115 | } | 115 | } |
116 | 116 | ||
117 | ubh_mark_buffer_dirty (USPI_UBH); | 117 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
118 | ubh_mark_buffer_dirty (UCPI_UBH); | 118 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
119 | if (sb->s_flags & MS_SYNCHRONOUS) { | 119 | if (sb->s_flags & MS_SYNCHRONOUS) { |
120 | ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); | 120 | ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); |
121 | ubh_wait_on_buffer (UCPI_UBH); | 121 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); |
122 | } | 122 | } |
123 | sb->s_dirt = 1; | 123 | sb->s_dirt = 1; |
124 | 124 | ||
@@ -176,7 +176,7 @@ do_more: | |||
176 | ucpi = ufs_load_cylinder (sb, cgno); | 176 | ucpi = ufs_load_cylinder (sb, cgno); |
177 | if (!ucpi) | 177 | if (!ucpi) |
178 | goto failed; | 178 | goto failed; |
179 | ucg = ubh_get_ucg (UCPI_UBH); | 179 | ucg = ubh_get_ucg (UCPI_UBH(ucpi)); |
180 | if (!ufs_cg_chkmagic(sb, ucg)) { | 180 | if (!ufs_cg_chkmagic(sb, ucg)) { |
181 | ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); | 181 | ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); |
182 | goto failed; | 182 | goto failed; |
@@ -184,10 +184,10 @@ do_more: | |||
184 | 184 | ||
185 | for (i = bit; i < end_bit; i += uspi->s_fpb) { | 185 | for (i = bit; i < end_bit; i += uspi->s_fpb) { |
186 | blkno = ufs_fragstoblks(i); | 186 | blkno = ufs_fragstoblks(i); |
187 | if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { | 187 | if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { |
188 | ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); | 188 | ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); |
189 | } | 189 | } |
190 | ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno); | 190 | ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); |
191 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) | 191 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) |
192 | ufs_clusteracct (sb, ucpi, blkno, 1); | 192 | ufs_clusteracct (sb, ucpi, blkno, 1); |
193 | DQUOT_FREE_BLOCK(inode, uspi->s_fpb); | 193 | DQUOT_FREE_BLOCK(inode, uspi->s_fpb); |
@@ -200,11 +200,11 @@ do_more: | |||
200 | fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); | 200 | fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); |
201 | } | 201 | } |
202 | 202 | ||
203 | ubh_mark_buffer_dirty (USPI_UBH); | 203 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
204 | ubh_mark_buffer_dirty (UCPI_UBH); | 204 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
205 | if (sb->s_flags & MS_SYNCHRONOUS) { | 205 | if (sb->s_flags & MS_SYNCHRONOUS) { |
206 | ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); | 206 | ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); |
207 | ubh_wait_on_buffer (UCPI_UBH); | 207 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); |
208 | } | 208 | } |
209 | 209 | ||
210 | if (overflow) { | 210 | if (overflow) { |
@@ -493,7 +493,7 @@ ufs_add_fragments (struct inode * inode, unsigned fragment, | |||
493 | ucpi = ufs_load_cylinder (sb, cgno); | 493 | ucpi = ufs_load_cylinder (sb, cgno); |
494 | if (!ucpi) | 494 | if (!ucpi) |
495 | return 0; | 495 | return 0; |
496 | ucg = ubh_get_ucg (UCPI_UBH); | 496 | ucg = ubh_get_ucg (UCPI_UBH(ucpi)); |
497 | if (!ufs_cg_chkmagic(sb, ucg)) { | 497 | if (!ufs_cg_chkmagic(sb, ucg)) { |
498 | ufs_panic (sb, "ufs_add_fragments", | 498 | ufs_panic (sb, "ufs_add_fragments", |
499 | "internal error, bad magic number on cg %u", cgno); | 499 | "internal error, bad magic number on cg %u", cgno); |
@@ -503,14 +503,14 @@ ufs_add_fragments (struct inode * inode, unsigned fragment, | |||
503 | fragno = ufs_dtogd (fragment); | 503 | fragno = ufs_dtogd (fragment); |
504 | fragoff = ufs_fragnum (fragno); | 504 | fragoff = ufs_fragnum (fragno); |
505 | for (i = oldcount; i < newcount; i++) | 505 | for (i = oldcount; i < newcount; i++) |
506 | if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i)) | 506 | if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) |
507 | return 0; | 507 | return 0; |
508 | /* | 508 | /* |
509 | * Block can be extended | 509 | * Block can be extended |
510 | */ | 510 | */ |
511 | ucg->cg_time = cpu_to_fs32(sb, get_seconds()); | 511 | ucg->cg_time = cpu_to_fs32(sb, get_seconds()); |
512 | for (i = newcount; i < (uspi->s_fpb - fragoff); i++) | 512 | for (i = newcount; i < (uspi->s_fpb - fragoff); i++) |
513 | if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i)) | 513 | if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) |
514 | break; | 514 | break; |
515 | fragsize = i - oldcount; | 515 | fragsize = i - oldcount; |
516 | if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize])) | 516 | if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize])) |
@@ -520,7 +520,7 @@ ufs_add_fragments (struct inode * inode, unsigned fragment, | |||
520 | if (fragsize != count) | 520 | if (fragsize != count) |
521 | fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); | 521 | fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); |
522 | for (i = oldcount; i < newcount; i++) | 522 | for (i = oldcount; i < newcount; i++) |
523 | ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i); | 523 | ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); |
524 | if(DQUOT_ALLOC_BLOCK(inode, count)) { | 524 | if(DQUOT_ALLOC_BLOCK(inode, count)) { |
525 | *err = -EDQUOT; | 525 | *err = -EDQUOT; |
526 | return 0; | 526 | return 0; |
@@ -530,11 +530,11 @@ ufs_add_fragments (struct inode * inode, unsigned fragment, | |||
530 | fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); | 530 | fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); |
531 | fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); | 531 | fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); |
532 | 532 | ||
533 | ubh_mark_buffer_dirty (USPI_UBH); | 533 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
534 | ubh_mark_buffer_dirty (UCPI_UBH); | 534 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
535 | if (sb->s_flags & MS_SYNCHRONOUS) { | 535 | if (sb->s_flags & MS_SYNCHRONOUS) { |
536 | ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); | 536 | ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); |
537 | ubh_wait_on_buffer (UCPI_UBH); | 537 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); |
538 | } | 538 | } |
539 | sb->s_dirt = 1; | 539 | sb->s_dirt = 1; |
540 | 540 | ||
@@ -602,7 +602,7 @@ cg_found: | |||
602 | ucpi = ufs_load_cylinder (sb, cgno); | 602 | ucpi = ufs_load_cylinder (sb, cgno); |
603 | if (!ucpi) | 603 | if (!ucpi) |
604 | return 0; | 604 | return 0; |
605 | ucg = ubh_get_ucg (UCPI_UBH); | 605 | ucg = ubh_get_ucg (UCPI_UBH(ucpi)); |
606 | if (!ufs_cg_chkmagic(sb, ucg)) | 606 | if (!ufs_cg_chkmagic(sb, ucg)) |
607 | ufs_panic (sb, "ufs_alloc_fragments", | 607 | ufs_panic (sb, "ufs_alloc_fragments", |
608 | "internal error, bad magic number on cg %u", cgno); | 608 | "internal error, bad magic number on cg %u", cgno); |
@@ -625,7 +625,7 @@ cg_found: | |||
625 | return 0; | 625 | return 0; |
626 | goal = ufs_dtogd (result); | 626 | goal = ufs_dtogd (result); |
627 | for (i = count; i < uspi->s_fpb; i++) | 627 | for (i = count; i < uspi->s_fpb; i++) |
628 | ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i); | 628 | ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); |
629 | i = uspi->s_fpb - count; | 629 | i = uspi->s_fpb - count; |
630 | DQUOT_FREE_BLOCK(inode, i); | 630 | DQUOT_FREE_BLOCK(inode, i); |
631 | 631 | ||
@@ -644,7 +644,7 @@ cg_found: | |||
644 | return 0; | 644 | return 0; |
645 | } | 645 | } |
646 | for (i = 0; i < count; i++) | 646 | for (i = 0; i < count; i++) |
647 | ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i); | 647 | ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); |
648 | 648 | ||
649 | fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); | 649 | fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); |
650 | fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); | 650 | fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); |
@@ -655,11 +655,11 @@ cg_found: | |||
655 | fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1); | 655 | fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1); |
656 | 656 | ||
657 | succed: | 657 | succed: |
658 | ubh_mark_buffer_dirty (USPI_UBH); | 658 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
659 | ubh_mark_buffer_dirty (UCPI_UBH); | 659 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
660 | if (sb->s_flags & MS_SYNCHRONOUS) { | 660 | if (sb->s_flags & MS_SYNCHRONOUS) { |
661 | ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); | 661 | ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); |
662 | ubh_wait_on_buffer (UCPI_UBH); | 662 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); |
663 | } | 663 | } |
664 | sb->s_dirt = 1; | 664 | sb->s_dirt = 1; |
665 | 665 | ||
@@ -682,7 +682,7 @@ static unsigned ufs_alloccg_block (struct inode * inode, | |||
682 | sb = inode->i_sb; | 682 | sb = inode->i_sb; |
683 | uspi = UFS_SB(sb)->s_uspi; | 683 | uspi = UFS_SB(sb)->s_uspi; |
684 | usb1 = ubh_get_usb_first(uspi); | 684 | usb1 = ubh_get_usb_first(uspi); |
685 | ucg = ubh_get_ucg(UCPI_UBH); | 685 | ucg = ubh_get_ucg(UCPI_UBH(ucpi)); |
686 | 686 | ||
687 | if (goal == 0) { | 687 | if (goal == 0) { |
688 | goal = ucpi->c_rotor; | 688 | goal = ucpi->c_rotor; |
@@ -694,7 +694,7 @@ static unsigned ufs_alloccg_block (struct inode * inode, | |||
694 | /* | 694 | /* |
695 | * If the requested block is available, use it. | 695 | * If the requested block is available, use it. |
696 | */ | 696 | */ |
697 | if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) { | 697 | if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) { |
698 | result = goal; | 698 | result = goal; |
699 | goto gotit; | 699 | goto gotit; |
700 | } | 700 | } |
@@ -706,7 +706,7 @@ norot: | |||
706 | ucpi->c_rotor = result; | 706 | ucpi->c_rotor = result; |
707 | gotit: | 707 | gotit: |
708 | blkno = ufs_fragstoblks(result); | 708 | blkno = ufs_fragstoblks(result); |
709 | ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno); | 709 | ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); |
710 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) | 710 | if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) |
711 | ufs_clusteracct (sb, ucpi, blkno, -1); | 711 | ufs_clusteracct (sb, ucpi, blkno, -1); |
712 | if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { | 712 | if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { |
@@ -739,7 +739,7 @@ static unsigned ufs_bitmap_search (struct super_block * sb, | |||
739 | 739 | ||
740 | uspi = UFS_SB(sb)->s_uspi; | 740 | uspi = UFS_SB(sb)->s_uspi; |
741 | usb1 = ubh_get_usb_first (uspi); | 741 | usb1 = ubh_get_usb_first (uspi); |
742 | ucg = ubh_get_ucg(UCPI_UBH); | 742 | ucg = ubh_get_ucg(UCPI_UBH(ucpi)); |
743 | 743 | ||
744 | if (goal) | 744 | if (goal) |
745 | start = ufs_dtogd(goal) >> 3; | 745 | start = ufs_dtogd(goal) >> 3; |
@@ -747,12 +747,12 @@ static unsigned ufs_bitmap_search (struct super_block * sb, | |||
747 | start = ucpi->c_frotor >> 3; | 747 | start = ucpi->c_frotor >> 3; |
748 | 748 | ||
749 | length = ((uspi->s_fpg + 7) >> 3) - start; | 749 | length = ((uspi->s_fpg + 7) >> 3) - start; |
750 | location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length, | 750 | location = ubh_scanc(UCPI_UBH(ucpi), ucpi->c_freeoff + start, length, |
751 | (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other, | 751 | (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other, |
752 | 1 << (count - 1 + (uspi->s_fpb & 7))); | 752 | 1 << (count - 1 + (uspi->s_fpb & 7))); |
753 | if (location == 0) { | 753 | if (location == 0) { |
754 | length = start + 1; | 754 | length = start + 1; |
755 | location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length, | 755 | location = ubh_scanc(UCPI_UBH(ucpi), ucpi->c_freeoff, length, |
756 | (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other, | 756 | (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other, |
757 | 1 << (count - 1 + (uspi->s_fpb & 7))); | 757 | 1 << (count - 1 + (uspi->s_fpb & 7))); |
758 | if (location == 0) { | 758 | if (location == 0) { |
@@ -769,7 +769,7 @@ static unsigned ufs_bitmap_search (struct super_block * sb, | |||
769 | /* | 769 | /* |
770 | * found the byte in the map | 770 | * found the byte in the map |
771 | */ | 771 | */ |
772 | blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result); | 772 | blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result); |
773 | fragsize = 0; | 773 | fragsize = 0; |
774 | for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) { | 774 | for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) { |
775 | if (blockmap & mask) { | 775 | if (blockmap & mask) { |
@@ -808,9 +808,9 @@ static void ufs_clusteracct(struct super_block * sb, | |||
808 | return; | 808 | return; |
809 | 809 | ||
810 | if (cnt > 0) | 810 | if (cnt > 0) |
811 | ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno); | 811 | ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno); |
812 | else | 812 | else |
813 | ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno); | 813 | ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno); |
814 | 814 | ||
815 | /* | 815 | /* |
816 | * Find the size of the cluster going forward. | 816 | * Find the size of the cluster going forward. |
@@ -819,7 +819,7 @@ static void ufs_clusteracct(struct super_block * sb, | |||
819 | end = start + uspi->s_contigsumsize; | 819 | end = start + uspi->s_contigsumsize; |
820 | if ( end >= ucpi->c_nclusterblks) | 820 | if ( end >= ucpi->c_nclusterblks) |
821 | end = ucpi->c_nclusterblks; | 821 | end = ucpi->c_nclusterblks; |
822 | i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start); | 822 | i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start); |
823 | if (i > end) | 823 | if (i > end) |
824 | i = end; | 824 | i = end; |
825 | forw = i - start; | 825 | forw = i - start; |
@@ -831,7 +831,7 @@ static void ufs_clusteracct(struct super_block * sb, | |||
831 | end = start - uspi->s_contigsumsize; | 831 | end = start - uspi->s_contigsumsize; |
832 | if (end < 0 ) | 832 | if (end < 0 ) |
833 | end = -1; | 833 | end = -1; |
834 | i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end); | 834 | i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end); |
835 | if ( i < end) | 835 | if ( i < end) |
836 | i = end; | 836 | i = end; |
837 | back = start - i; | 837 | back = start - i; |
@@ -843,11 +843,11 @@ static void ufs_clusteracct(struct super_block * sb, | |||
843 | i = back + forw + 1; | 843 | i = back + forw + 1; |
844 | if (i > uspi->s_contigsumsize) | 844 | if (i > uspi->s_contigsumsize) |
845 | i = uspi->s_contigsumsize; | 845 | i = uspi->s_contigsumsize; |
846 | fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt); | 846 | fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt); |
847 | if (back > 0) | 847 | if (back > 0) |
848 | fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt); | 848 | fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt); |
849 | if (forw > 0) | 849 | if (forw > 0) |
850 | fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt); | 850 | fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt); |
851 | } | 851 | } |
852 | 852 | ||
853 | 853 | ||