aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorBob Peterson <rpeterso@redhat.com>2013-09-17 13:12:15 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2013-09-18 05:39:53 -0400
commite579ed4f446e64748a2d26eed8f8b28f728495bd (patch)
treed525f8e67afd5b5d589eb93d8c9f0499c668b4a4 /fs/gfs2
parentb8708905199a85eebbd820f98d18e045c32077bf (diff)
GFS2: Introduce rbm field bii
This is a respin of the original patch. As Steve pointed out, the introduction of field bii makes it easy to eliminate bi itself. This revised patch does just that, replacing bi with bii. This patch adds a new field to the rbm structure, called bii, which is an index into the array of bitmaps for an rgrp. This replaces *bi which was a pointer to the bitmap. This is being done for further optimizations. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/incore.h12
-rw-r--r--fs/gfs2/rgrp.c107
2 files changed, 65 insertions, 54 deletions
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index f1a3243dfaf2..8c8f110d8e35 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -102,19 +102,25 @@ struct gfs2_rgrpd {
102 102
103struct gfs2_rbm { 103struct gfs2_rbm {
104 struct gfs2_rgrpd *rgd; 104 struct gfs2_rgrpd *rgd;
105 struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
106 u32 offset; /* The offset is bitmap relative */ 105 u32 offset; /* The offset is bitmap relative */
106 int bii; /* Bitmap index */
107}; 107};
108 108
109static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
110{
111 return rbm->rgd->rd_bits + rbm->bii;
112}
113
109static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm) 114static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
110{ 115{
111 return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset; 116 return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
117 rbm->offset;
112} 118}
113 119
114static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1, 120static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
115 const struct gfs2_rbm *rbm2) 121 const struct gfs2_rbm *rbm2)
116{ 122{
117 return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) && 123 return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
118 (rbm1->offset == rbm2->offset); 124 (rbm1->offset == rbm2->offset);
119} 125}
120 126
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8e5003820aa7..dd3c4d3d7f41 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -81,11 +81,12 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
81 unsigned char new_state) 81 unsigned char new_state)
82{ 82{
83 unsigned char *byte1, *byte2, *end, cur_state; 83 unsigned char *byte1, *byte2, *end, cur_state;
84 unsigned int buflen = rbm->bi->bi_len; 84 struct gfs2_bitmap *bi = rbm_bi(rbm);
85 unsigned int buflen = bi->bi_len;
85 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; 86 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
86 87
87 byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); 88 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
88 end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen; 89 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
89 90
90 BUG_ON(byte1 >= end); 91 BUG_ON(byte1 >= end);
91 92
@@ -95,18 +96,17 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
95 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, " 96 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
96 "new_state=%d\n", rbm->offset, cur_state, new_state); 97 "new_state=%d\n", rbm->offset, cur_state, new_state);
97 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n", 98 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
98 (unsigned long long)rbm->rgd->rd_addr, 99 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
99 rbm->bi->bi_start);
100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n", 100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
101 rbm->bi->bi_offset, rbm->bi->bi_len); 101 bi->bi_offset, bi->bi_len);
102 dump_stack(); 102 dump_stack();
103 gfs2_consist_rgrpd(rbm->rgd); 103 gfs2_consist_rgrpd(rbm->rgd);
104 return; 104 return;
105 } 105 }
106 *byte1 ^= (cur_state ^ new_state) << bit; 106 *byte1 ^= (cur_state ^ new_state) << bit;
107 107
108 if (do_clone && rbm->bi->bi_clone) { 108 if (do_clone && bi->bi_clone) {
109 byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); 109 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; 110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
111 *byte2 ^= (cur_state ^ new_state) << bit; 111 *byte2 ^= (cur_state ^ new_state) << bit;
112 } 112 }
@@ -121,7 +121,8 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
121 121
122static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm) 122static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
123{ 123{
124 const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset; 124 struct gfs2_bitmap *bi = rbm_bi(rbm);
125 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
125 const u8 *byte; 126 const u8 *byte;
126 unsigned int bit; 127 unsigned int bit;
127 128
@@ -252,25 +253,23 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
252static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) 253static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
253{ 254{
254 u64 rblock = block - rbm->rgd->rd_data0; 255 u64 rblock = block - rbm->rgd->rd_data0;
255 u32 x;
256 256
257 if (WARN_ON_ONCE(rblock > UINT_MAX)) 257 if (WARN_ON_ONCE(rblock > UINT_MAX))
258 return -EINVAL; 258 return -EINVAL;
259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data) 259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
260 return -E2BIG; 260 return -E2BIG;
261 261
262 rbm->bi = rbm->rgd->rd_bits; 262 rbm->bii = 0;
263 rbm->offset = (u32)(rblock); 263 rbm->offset = (u32)(rblock);
264 /* Check if the block is within the first block */ 264 /* Check if the block is within the first block */
265 if (rbm->offset < rbm->bi->bi_blocks) 265 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
266 return 0; 266 return 0;
267 267
268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */ 268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
269 rbm->offset += (sizeof(struct gfs2_rgrp) - 269 rbm->offset += (sizeof(struct gfs2_rgrp) -
270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY; 270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
271 x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap; 271 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
272 rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap; 272 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
273 rbm->bi += x;
274 return 0; 273 return 0;
275} 274}
276 275
@@ -328,6 +327,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
328 u32 chunk_size; 327 u32 chunk_size;
329 u8 *ptr, *start, *end; 328 u8 *ptr, *start, *end;
330 u64 block; 329 u64 block;
330 struct gfs2_bitmap *bi;
331 331
332 if (n_unaligned && 332 if (n_unaligned &&
333 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len)) 333 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
@@ -336,11 +336,12 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
336 n_unaligned = len & 3; 336 n_unaligned = len & 3;
337 /* Start is now byte aligned */ 337 /* Start is now byte aligned */
338 while (len > 3) { 338 while (len > 3) {
339 start = rbm.bi->bi_bh->b_data; 339 bi = rbm_bi(&rbm);
340 if (rbm.bi->bi_clone) 340 start = bi->bi_bh->b_data;
341 start = rbm.bi->bi_clone; 341 if (bi->bi_clone)
342 end = start + rbm.bi->bi_bh->b_size; 342 start = bi->bi_clone;
343 start += rbm.bi->bi_offset; 343 end = start + bi->bi_bh->b_size;
344 start += bi->bi_offset;
344 BUG_ON(rbm.offset & 3); 345 BUG_ON(rbm.offset & 3);
345 start += (rbm.offset / GFS2_NBBY); 346 start += (rbm.offset / GFS2_NBBY);
346 bytes = min_t(u32, len / GFS2_NBBY, (end - start)); 347 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -605,11 +606,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
605 RB_CLEAR_NODE(&rs->rs_node); 606 RB_CLEAR_NODE(&rs->rs_node);
606 607
607 if (rs->rs_free) { 608 if (rs->rs_free) {
609 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
610
608 /* return reserved blocks to the rgrp */ 611 /* return reserved blocks to the rgrp */
609 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); 612 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
610 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free; 613 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
611 rs->rs_free = 0; 614 rs->rs_free = 0;
612 clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags); 615 clear_bit(GBF_FULL, &bi->bi_flags);
613 smp_mb__after_clear_bit(); 616 smp_mb__after_clear_bit();
614 } 617 }
615} 618}
@@ -1558,14 +1561,14 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1558 const struct gfs2_inode *ip, bool nowrap) 1561 const struct gfs2_inode *ip, bool nowrap)
1559{ 1562{
1560 struct buffer_head *bh; 1563 struct buffer_head *bh;
1561 struct gfs2_bitmap *initial_bi; 1564 int initial_bii;
1562 u32 initial_offset; 1565 u32 initial_offset;
1563 u32 offset; 1566 u32 offset;
1564 u8 *buffer; 1567 u8 *buffer;
1565 int index;
1566 int n = 0; 1568 int n = 0;
1567 int iters = rbm->rgd->rd_length; 1569 int iters = rbm->rgd->rd_length;
1568 int ret; 1570 int ret;
1571 struct gfs2_bitmap *bi;
1569 1572
1570 /* If we are not starting at the beginning of a bitmap, then we 1573 /* If we are not starting at the beginning of a bitmap, then we
1571 * need to add one to the bitmap count to ensure that we search 1574 * need to add one to the bitmap count to ensure that we search
@@ -1575,52 +1578,53 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1575 iters++; 1578 iters++;
1576 1579
1577 while(1) { 1580 while(1) {
1578 if (test_bit(GBF_FULL, &rbm->bi->bi_flags) && 1581 bi = rbm_bi(rbm);
1582 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1579 (state == GFS2_BLKST_FREE)) 1583 (state == GFS2_BLKST_FREE))
1580 goto next_bitmap; 1584 goto next_bitmap;
1581 1585
1582 bh = rbm->bi->bi_bh; 1586 bh = bi->bi_bh;
1583 buffer = bh->b_data + rbm->bi->bi_offset; 1587 buffer = bh->b_data + bi->bi_offset;
1584 WARN_ON(!buffer_uptodate(bh)); 1588 WARN_ON(!buffer_uptodate(bh));
1585 if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone) 1589 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1586 buffer = rbm->bi->bi_clone + rbm->bi->bi_offset; 1590 buffer = bi->bi_clone + bi->bi_offset;
1587 initial_offset = rbm->offset; 1591 initial_offset = rbm->offset;
1588 offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state); 1592 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1589 if (offset == BFITNOENT) 1593 if (offset == BFITNOENT)
1590 goto bitmap_full; 1594 goto bitmap_full;
1591 rbm->offset = offset; 1595 rbm->offset = offset;
1592 if (ip == NULL) 1596 if (ip == NULL)
1593 return 0; 1597 return 0;
1594 1598
1595 initial_bi = rbm->bi; 1599 initial_bii = rbm->bii;
1596 ret = gfs2_reservation_check_and_update(rbm, ip, minext); 1600 ret = gfs2_reservation_check_and_update(rbm, ip, minext);
1597 if (ret == 0) 1601 if (ret == 0)
1598 return 0; 1602 return 0;
1599 if (ret > 0) { 1603 if (ret > 0) {
1600 n += (rbm->bi - initial_bi); 1604 n += (rbm->bii - initial_bii);
1601 goto next_iter; 1605 goto next_iter;
1602 } 1606 }
1603 if (ret == -E2BIG) { 1607 if (ret == -E2BIG) {
1604 index = 0; 1608 rbm->bii = 0;
1605 rbm->offset = 0; 1609 rbm->offset = 0;
1606 n += (rbm->bi - initial_bi); 1610 n += (rbm->bii - initial_bii);
1607 goto res_covered_end_of_rgrp; 1611 goto res_covered_end_of_rgrp;
1608 } 1612 }
1609 return ret; 1613 return ret;
1610 1614
1611bitmap_full: /* Mark bitmap as full and fall through */ 1615bitmap_full: /* Mark bitmap as full and fall through */
1612 if ((state == GFS2_BLKST_FREE) && initial_offset == 0) 1616 if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
1613 set_bit(GBF_FULL, &rbm->bi->bi_flags); 1617 struct gfs2_bitmap *bi = rbm_bi(rbm);
1618 set_bit(GBF_FULL, &bi->bi_flags);
1619 }
1614 1620
1615next_bitmap: /* Find next bitmap in the rgrp */ 1621next_bitmap: /* Find next bitmap in the rgrp */
1616 rbm->offset = 0; 1622 rbm->offset = 0;
1617 index = rbm->bi - rbm->rgd->rd_bits; 1623 rbm->bii++;
1618 index++; 1624 if (rbm->bii == rbm->rgd->rd_length)
1619 if (index == rbm->rgd->rd_length) 1625 rbm->bii = 0;
1620 index = 0;
1621res_covered_end_of_rgrp: 1626res_covered_end_of_rgrp:
1622 rbm->bi = &rbm->rgd->rd_bits[index]; 1627 if ((rbm->bii == 0) && nowrap)
1623 if ((index == 0) && nowrap)
1624 break; 1628 break;
1625 n++; 1629 n++;
1626next_iter: 1630next_iter:
@@ -1649,7 +1653,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
1649 struct gfs2_inode *ip; 1653 struct gfs2_inode *ip;
1650 int error; 1654 int error;
1651 int found = 0; 1655 int found = 0;
1652 struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 }; 1656 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1653 1657
1654 while (1) { 1658 while (1) {
1655 down_write(&sdp->sd_log_flush_lock); 1659 down_write(&sdp->sd_log_flush_lock);
@@ -1976,14 +1980,14 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1976 1980
1977 *n = 1; 1981 *n = 1;
1978 block = gfs2_rbm_to_block(rbm); 1982 block = gfs2_rbm_to_block(rbm);
1979 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh); 1983 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
1980 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); 1984 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
1981 block++; 1985 block++;
1982 while (*n < elen) { 1986 while (*n < elen) {
1983 ret = gfs2_rbm_from_block(&pos, block); 1987 ret = gfs2_rbm_from_block(&pos, block);
1984 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE) 1988 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
1985 break; 1989 break;
1986 gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh); 1990 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
1987 gfs2_setbit(&pos, true, GFS2_BLKST_USED); 1991 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
1988 (*n)++; 1992 (*n)++;
1989 block++; 1993 block++;
@@ -2004,6 +2008,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2004 u32 blen, unsigned char new_state) 2008 u32 blen, unsigned char new_state)
2005{ 2009{
2006 struct gfs2_rbm rbm; 2010 struct gfs2_rbm rbm;
2011 struct gfs2_bitmap *bi;
2007 2012
2008 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1); 2013 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2009 if (!rbm.rgd) { 2014 if (!rbm.rgd) {
@@ -2014,15 +2019,15 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2014 2019
2015 while (blen--) { 2020 while (blen--) {
2016 gfs2_rbm_from_block(&rbm, bstart); 2021 gfs2_rbm_from_block(&rbm, bstart);
2022 bi = rbm_bi(&rbm);
2017 bstart++; 2023 bstart++;
2018 if (!rbm.bi->bi_clone) { 2024 if (!bi->bi_clone) {
2019 rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size, 2025 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2020 GFP_NOFS | __GFP_NOFAIL); 2026 GFP_NOFS | __GFP_NOFAIL);
2021 memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset, 2027 memcpy(bi->bi_clone + bi->bi_offset,
2022 rbm.bi->bi_bh->b_data + rbm.bi->bi_offset, 2028 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
2023 rbm.bi->bi_len);
2024 } 2029 }
2025 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh); 2030 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2026 gfs2_setbit(&rbm, false, new_state); 2031 gfs2_setbit(&rbm, false, new_state);
2027 } 2032 }
2028 2033