aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/rgrp.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/rgrp.c')
-rw-r--r--fs/gfs2/rgrp.c208
1 files changed, 130 insertions, 78 deletions
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 69317435faa7..4d83abdd5635 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -81,11 +81,12 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
81 unsigned char new_state) 81 unsigned char new_state)
82{ 82{
83 unsigned char *byte1, *byte2, *end, cur_state; 83 unsigned char *byte1, *byte2, *end, cur_state;
84 unsigned int buflen = rbm->bi->bi_len; 84 struct gfs2_bitmap *bi = rbm_bi(rbm);
85 unsigned int buflen = bi->bi_len;
85 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; 86 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
86 87
87 byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); 88 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
88 end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen; 89 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
89 90
90 BUG_ON(byte1 >= end); 91 BUG_ON(byte1 >= end);
91 92
@@ -95,18 +96,17 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
95 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, " 96 printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
96 "new_state=%d\n", rbm->offset, cur_state, new_state); 97 "new_state=%d\n", rbm->offset, cur_state, new_state);
97 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n", 98 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
98 (unsigned long long)rbm->rgd->rd_addr, 99 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
99 rbm->bi->bi_start);
100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n", 100 printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
101 rbm->bi->bi_offset, rbm->bi->bi_len); 101 bi->bi_offset, bi->bi_len);
102 dump_stack(); 102 dump_stack();
103 gfs2_consist_rgrpd(rbm->rgd); 103 gfs2_consist_rgrpd(rbm->rgd);
104 return; 104 return;
105 } 105 }
106 *byte1 ^= (cur_state ^ new_state) << bit; 106 *byte1 ^= (cur_state ^ new_state) << bit;
107 107
108 if (do_clone && rbm->bi->bi_clone) { 108 if (do_clone && bi->bi_clone) {
109 byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); 109 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; 110 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
111 *byte2 ^= (cur_state ^ new_state) << bit; 111 *byte2 ^= (cur_state ^ new_state) << bit;
112 } 112 }
@@ -121,7 +121,8 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
121 121
122static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm) 122static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
123{ 123{
124 const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset; 124 struct gfs2_bitmap *bi = rbm_bi(rbm);
125 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
125 const u8 *byte; 126 const u8 *byte;
126 unsigned int bit; 127 unsigned int bit;
127 128
@@ -252,29 +253,53 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
252static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) 253static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
253{ 254{
254 u64 rblock = block - rbm->rgd->rd_data0; 255 u64 rblock = block - rbm->rgd->rd_data0;
255 u32 x;
256 256
257 if (WARN_ON_ONCE(rblock > UINT_MAX)) 257 if (WARN_ON_ONCE(rblock > UINT_MAX))
258 return -EINVAL; 258 return -EINVAL;
259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data) 259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
260 return -E2BIG; 260 return -E2BIG;
261 261
262 rbm->bi = rbm->rgd->rd_bits; 262 rbm->bii = 0;
263 rbm->offset = (u32)(rblock); 263 rbm->offset = (u32)(rblock);
264 /* Check if the block is within the first block */ 264 /* Check if the block is within the first block */
265 if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) 265 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
266 return 0; 266 return 0;
267 267
268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */ 268 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
269 rbm->offset += (sizeof(struct gfs2_rgrp) - 269 rbm->offset += (sizeof(struct gfs2_rgrp) -
270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY; 270 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
271 x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap; 271 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
272 rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap; 272 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
273 rbm->bi += x;
274 return 0; 273 return 0;
275} 274}
276 275
277/** 276/**
277 * gfs2_rbm_incr - increment an rbm structure
278 * @rbm: The rbm with rgd already set correctly
279 *
280 * This function takes an existing rbm structure and increments it to the next
281 * viable block offset.
282 *
283 * Returns: If incrementing the offset would cause the rbm to go past the
284 * end of the rgrp, true is returned, otherwise false.
285 *
286 */
287
288static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
289{
290 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
291 rbm->offset++;
292 return false;
293 }
294 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
295 return true;
296
297 rbm->offset = 0;
298 rbm->bii++;
299 return false;
300}
301
302/**
278 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned 303 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
279 * @rbm: Position to search (value/result) 304 * @rbm: Position to search (value/result)
280 * @n_unaligned: Number of unaligned blocks to check 305 * @n_unaligned: Number of unaligned blocks to check
@@ -285,7 +310,6 @@ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
285 310
286static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len) 311static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
287{ 312{
288 u64 block;
289 u32 n; 313 u32 n;
290 u8 res; 314 u8 res;
291 315
@@ -296,8 +320,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
296 (*len)--; 320 (*len)--;
297 if (*len == 0) 321 if (*len == 0)
298 return true; 322 return true;
299 block = gfs2_rbm_to_block(rbm); 323 if (gfs2_rbm_incr(rbm))
300 if (gfs2_rbm_from_block(rbm, block + 1))
301 return true; 324 return true;
302 } 325 }
303 326
@@ -328,6 +351,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
328 u32 chunk_size; 351 u32 chunk_size;
329 u8 *ptr, *start, *end; 352 u8 *ptr, *start, *end;
330 u64 block; 353 u64 block;
354 struct gfs2_bitmap *bi;
331 355
332 if (n_unaligned && 356 if (n_unaligned &&
333 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len)) 357 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
@@ -336,11 +360,12 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
336 n_unaligned = len & 3; 360 n_unaligned = len & 3;
337 /* Start is now byte aligned */ 361 /* Start is now byte aligned */
338 while (len > 3) { 362 while (len > 3) {
339 start = rbm.bi->bi_bh->b_data; 363 bi = rbm_bi(&rbm);
340 if (rbm.bi->bi_clone) 364 start = bi->bi_bh->b_data;
341 start = rbm.bi->bi_clone; 365 if (bi->bi_clone)
342 end = start + rbm.bi->bi_bh->b_size; 366 start = bi->bi_clone;
343 start += rbm.bi->bi_offset; 367 end = start + bi->bi_bh->b_size;
368 start += bi->bi_offset;
344 BUG_ON(rbm.offset & 3); 369 BUG_ON(rbm.offset & 3);
345 start += (rbm.offset / GFS2_NBBY); 370 start += (rbm.offset / GFS2_NBBY);
346 bytes = min_t(u32, len / GFS2_NBBY, (end - start)); 371 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -605,11 +630,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
605 RB_CLEAR_NODE(&rs->rs_node); 630 RB_CLEAR_NODE(&rs->rs_node);
606 631
607 if (rs->rs_free) { 632 if (rs->rs_free) {
633 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
634
608 /* return reserved blocks to the rgrp */ 635 /* return reserved blocks to the rgrp */
609 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); 636 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
610 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free; 637 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
611 rs->rs_free = 0; 638 rs->rs_free = 0;
612 clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags); 639 clear_bit(GBF_FULL, &bi->bi_flags);
613 smp_mb__after_clear_bit(); 640 smp_mb__after_clear_bit();
614 } 641 }
615} 642}
@@ -634,14 +661,13 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
634/** 661/**
635 * gfs2_rs_delete - delete a multi-block reservation 662 * gfs2_rs_delete - delete a multi-block reservation
636 * @ip: The inode for this reservation 663 * @ip: The inode for this reservation
664 * @wcount: The inode's write count, or NULL
637 * 665 *
638 */ 666 */
639void gfs2_rs_delete(struct gfs2_inode *ip) 667void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
640{ 668{
641 struct inode *inode = &ip->i_inode;
642
643 down_write(&ip->i_rw_mutex); 669 down_write(&ip->i_rw_mutex);
644 if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) { 670 if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
645 gfs2_rs_deltree(ip->i_res); 671 gfs2_rs_deltree(ip->i_res);
646 BUG_ON(ip->i_res->rs_free); 672 BUG_ON(ip->i_res->rs_free);
647 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); 673 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
@@ -743,18 +769,21 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
743 bi->bi_offset = sizeof(struct gfs2_rgrp); 769 bi->bi_offset = sizeof(struct gfs2_rgrp);
744 bi->bi_start = 0; 770 bi->bi_start = 0;
745 bi->bi_len = bytes; 771 bi->bi_len = bytes;
772 bi->bi_blocks = bytes * GFS2_NBBY;
746 /* header block */ 773 /* header block */
747 } else if (x == 0) { 774 } else if (x == 0) {
748 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); 775 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
749 bi->bi_offset = sizeof(struct gfs2_rgrp); 776 bi->bi_offset = sizeof(struct gfs2_rgrp);
750 bi->bi_start = 0; 777 bi->bi_start = 0;
751 bi->bi_len = bytes; 778 bi->bi_len = bytes;
779 bi->bi_blocks = bytes * GFS2_NBBY;
752 /* last block */ 780 /* last block */
753 } else if (x + 1 == length) { 781 } else if (x + 1 == length) {
754 bytes = bytes_left; 782 bytes = bytes_left;
755 bi->bi_offset = sizeof(struct gfs2_meta_header); 783 bi->bi_offset = sizeof(struct gfs2_meta_header);
756 bi->bi_start = rgd->rd_bitbytes - bytes_left; 784 bi->bi_start = rgd->rd_bitbytes - bytes_left;
757 bi->bi_len = bytes; 785 bi->bi_len = bytes;
786 bi->bi_blocks = bytes * GFS2_NBBY;
758 /* other blocks */ 787 /* other blocks */
759 } else { 788 } else {
760 bytes = sdp->sd_sb.sb_bsize - 789 bytes = sdp->sd_sb.sb_bsize -
@@ -762,6 +791,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
762 bi->bi_offset = sizeof(struct gfs2_meta_header); 791 bi->bi_offset = sizeof(struct gfs2_meta_header);
763 bi->bi_start = rgd->rd_bitbytes - bytes_left; 792 bi->bi_start = rgd->rd_bitbytes - bytes_left;
764 bi->bi_len = bytes; 793 bi->bi_len = bytes;
794 bi->bi_blocks = bytes * GFS2_NBBY;
765 } 795 }
766 796
767 bytes_left -= bytes; 797 bytes_left -= bytes;
@@ -1392,12 +1422,12 @@ static void rs_insert(struct gfs2_inode *ip)
1392 * rg_mblk_search - find a group of multiple free blocks to form a reservation 1422 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1393 * @rgd: the resource group descriptor 1423 * @rgd: the resource group descriptor
1394 * @ip: pointer to the inode for which we're reserving blocks 1424 * @ip: pointer to the inode for which we're reserving blocks
1395 * @requested: number of blocks required for this allocation 1425 * @ap: the allocation parameters
1396 * 1426 *
1397 */ 1427 */
1398 1428
1399static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, 1429static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1400 unsigned requested) 1430 const struct gfs2_alloc_parms *ap)
1401{ 1431{
1402 struct gfs2_rbm rbm = { .rgd = rgd, }; 1432 struct gfs2_rbm rbm = { .rgd = rgd, };
1403 u64 goal; 1433 u64 goal;
@@ -1410,7 +1440,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1410 if (S_ISDIR(inode->i_mode)) 1440 if (S_ISDIR(inode->i_mode))
1411 extlen = 1; 1441 extlen = 1;
1412 else { 1442 else {
1413 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested); 1443 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
1414 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); 1444 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1415 } 1445 }
1416 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) 1446 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
@@ -1554,14 +1584,14 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1554 const struct gfs2_inode *ip, bool nowrap) 1584 const struct gfs2_inode *ip, bool nowrap)
1555{ 1585{
1556 struct buffer_head *bh; 1586 struct buffer_head *bh;
1557 struct gfs2_bitmap *initial_bi; 1587 int initial_bii;
1558 u32 initial_offset; 1588 u32 initial_offset;
1559 u32 offset; 1589 u32 offset;
1560 u8 *buffer; 1590 u8 *buffer;
1561 int index;
1562 int n = 0; 1591 int n = 0;
1563 int iters = rbm->rgd->rd_length; 1592 int iters = rbm->rgd->rd_length;
1564 int ret; 1593 int ret;
1594 struct gfs2_bitmap *bi;
1565 1595
1566 /* If we are not starting at the beginning of a bitmap, then we 1596 /* If we are not starting at the beginning of a bitmap, then we
1567 * need to add one to the bitmap count to ensure that we search 1597 * need to add one to the bitmap count to ensure that we search
@@ -1571,52 +1601,53 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1571 iters++; 1601 iters++;
1572 1602
1573 while(1) { 1603 while(1) {
1574 if (test_bit(GBF_FULL, &rbm->bi->bi_flags) && 1604 bi = rbm_bi(rbm);
1605 if (test_bit(GBF_FULL, &bi->bi_flags) &&
1575 (state == GFS2_BLKST_FREE)) 1606 (state == GFS2_BLKST_FREE))
1576 goto next_bitmap; 1607 goto next_bitmap;
1577 1608
1578 bh = rbm->bi->bi_bh; 1609 bh = bi->bi_bh;
1579 buffer = bh->b_data + rbm->bi->bi_offset; 1610 buffer = bh->b_data + bi->bi_offset;
1580 WARN_ON(!buffer_uptodate(bh)); 1611 WARN_ON(!buffer_uptodate(bh));
1581 if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone) 1612 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1582 buffer = rbm->bi->bi_clone + rbm->bi->bi_offset; 1613 buffer = bi->bi_clone + bi->bi_offset;
1583 initial_offset = rbm->offset; 1614 initial_offset = rbm->offset;
1584 offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state); 1615 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
1585 if (offset == BFITNOENT) 1616 if (offset == BFITNOENT)
1586 goto bitmap_full; 1617 goto bitmap_full;
1587 rbm->offset = offset; 1618 rbm->offset = offset;
1588 if (ip == NULL) 1619 if (ip == NULL)
1589 return 0; 1620 return 0;
1590 1621
1591 initial_bi = rbm->bi; 1622 initial_bii = rbm->bii;
1592 ret = gfs2_reservation_check_and_update(rbm, ip, minext); 1623 ret = gfs2_reservation_check_and_update(rbm, ip, minext);
1593 if (ret == 0) 1624 if (ret == 0)
1594 return 0; 1625 return 0;
1595 if (ret > 0) { 1626 if (ret > 0) {
1596 n += (rbm->bi - initial_bi); 1627 n += (rbm->bii - initial_bii);
1597 goto next_iter; 1628 goto next_iter;
1598 } 1629 }
1599 if (ret == -E2BIG) { 1630 if (ret == -E2BIG) {
1600 index = 0; 1631 rbm->bii = 0;
1601 rbm->offset = 0; 1632 rbm->offset = 0;
1602 n += (rbm->bi - initial_bi); 1633 n += (rbm->bii - initial_bii);
1603 goto res_covered_end_of_rgrp; 1634 goto res_covered_end_of_rgrp;
1604 } 1635 }
1605 return ret; 1636 return ret;
1606 1637
1607bitmap_full: /* Mark bitmap as full and fall through */ 1638bitmap_full: /* Mark bitmap as full and fall through */
1608 if ((state == GFS2_BLKST_FREE) && initial_offset == 0) 1639 if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
1609 set_bit(GBF_FULL, &rbm->bi->bi_flags); 1640 struct gfs2_bitmap *bi = rbm_bi(rbm);
1641 set_bit(GBF_FULL, &bi->bi_flags);
1642 }
1610 1643
1611next_bitmap: /* Find next bitmap in the rgrp */ 1644next_bitmap: /* Find next bitmap in the rgrp */
1612 rbm->offset = 0; 1645 rbm->offset = 0;
1613 index = rbm->bi - rbm->rgd->rd_bits; 1646 rbm->bii++;
1614 index++; 1647 if (rbm->bii == rbm->rgd->rd_length)
1615 if (index == rbm->rgd->rd_length) 1648 rbm->bii = 0;
1616 index = 0;
1617res_covered_end_of_rgrp: 1649res_covered_end_of_rgrp:
1618 rbm->bi = &rbm->rgd->rd_bits[index]; 1650 if ((rbm->bii == 0) && nowrap)
1619 if ((index == 0) && nowrap)
1620 break; 1651 break;
1621 n++; 1652 n++;
1622next_iter: 1653next_iter:
@@ -1645,7 +1676,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
1645 struct gfs2_inode *ip; 1676 struct gfs2_inode *ip;
1646 int error; 1677 int error;
1647 int found = 0; 1678 int found = 0;
1648 struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 }; 1679 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
1649 1680
1650 while (1) { 1681 while (1) {
1651 down_write(&sdp->sd_log_flush_lock); 1682 down_write(&sdp->sd_log_flush_lock);
@@ -1800,12 +1831,12 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b
1800/** 1831/**
1801 * gfs2_inplace_reserve - Reserve space in the filesystem 1832 * gfs2_inplace_reserve - Reserve space in the filesystem
1802 * @ip: the inode to reserve space for 1833 * @ip: the inode to reserve space for
1803 * @requested: the number of blocks to be reserved 1834 * @ap: the allocation parameters
1804 * 1835 *
1805 * Returns: errno 1836 * Returns: errno
1806 */ 1837 */
1807 1838
1808int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags) 1839int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
1809{ 1840{
1810 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1841 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1811 struct gfs2_rgrpd *begin = NULL; 1842 struct gfs2_rgrpd *begin = NULL;
@@ -1817,17 +1848,16 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
1817 1848
1818 if (sdp->sd_args.ar_rgrplvb) 1849 if (sdp->sd_args.ar_rgrplvb)
1819 flags |= GL_SKIP; 1850 flags |= GL_SKIP;
1820 if (gfs2_assert_warn(sdp, requested)) 1851 if (gfs2_assert_warn(sdp, ap->target))
1821 return -EINVAL; 1852 return -EINVAL;
1822 if (gfs2_rs_active(rs)) { 1853 if (gfs2_rs_active(rs)) {
1823 begin = rs->rs_rbm.rgd; 1854 begin = rs->rs_rbm.rgd;
1824 flags = 0; /* Yoda: Do or do not. There is no try */
1825 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) { 1855 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1826 rs->rs_rbm.rgd = begin = ip->i_rgd; 1856 rs->rs_rbm.rgd = begin = ip->i_rgd;
1827 } else { 1857 } else {
1828 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); 1858 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
1829 } 1859 }
1830 if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV)) 1860 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
1831 skip = gfs2_orlov_skip(ip); 1861 skip = gfs2_orlov_skip(ip);
1832 if (rs->rs_rbm.rgd == NULL) 1862 if (rs->rs_rbm.rgd == NULL)
1833 return -EBADSLT; 1863 return -EBADSLT;
@@ -1869,14 +1899,14 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
1869 1899
1870 /* Get a reservation if we don't already have one */ 1900 /* Get a reservation if we don't already have one */
1871 if (!gfs2_rs_active(rs)) 1901 if (!gfs2_rs_active(rs))
1872 rg_mblk_search(rs->rs_rbm.rgd, ip, requested); 1902 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
1873 1903
1874 /* Skip rgrps when we can't get a reservation on first pass */ 1904 /* Skip rgrps when we can't get a reservation on first pass */
1875 if (!gfs2_rs_active(rs) && (loops < 1)) 1905 if (!gfs2_rs_active(rs) && (loops < 1))
1876 goto check_rgrp; 1906 goto check_rgrp;
1877 1907
1878 /* If rgrp has enough free space, use it */ 1908 /* If rgrp has enough free space, use it */
1879 if (rs->rs_rbm.rgd->rd_free_clone >= requested) { 1909 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
1880 ip->i_rgd = rs->rs_rbm.rgd; 1910 ip->i_rgd = rs->rs_rbm.rgd;
1881 return 0; 1911 return 0;
1882 } 1912 }
@@ -1973,14 +2003,14 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1973 2003
1974 *n = 1; 2004 *n = 1;
1975 block = gfs2_rbm_to_block(rbm); 2005 block = gfs2_rbm_to_block(rbm);
1976 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh); 2006 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
1977 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); 2007 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
1978 block++; 2008 block++;
1979 while (*n < elen) { 2009 while (*n < elen) {
1980 ret = gfs2_rbm_from_block(&pos, block); 2010 ret = gfs2_rbm_from_block(&pos, block);
1981 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE) 2011 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
1982 break; 2012 break;
1983 gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh); 2013 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
1984 gfs2_setbit(&pos, true, GFS2_BLKST_USED); 2014 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
1985 (*n)++; 2015 (*n)++;
1986 block++; 2016 block++;
@@ -2001,6 +2031,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2001 u32 blen, unsigned char new_state) 2031 u32 blen, unsigned char new_state)
2002{ 2032{
2003 struct gfs2_rbm rbm; 2033 struct gfs2_rbm rbm;
2034 struct gfs2_bitmap *bi;
2004 2035
2005 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1); 2036 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2006 if (!rbm.rgd) { 2037 if (!rbm.rgd) {
@@ -2011,15 +2042,15 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2011 2042
2012 while (blen--) { 2043 while (blen--) {
2013 gfs2_rbm_from_block(&rbm, bstart); 2044 gfs2_rbm_from_block(&rbm, bstart);
2045 bi = rbm_bi(&rbm);
2014 bstart++; 2046 bstart++;
2015 if (!rbm.bi->bi_clone) { 2047 if (!bi->bi_clone) {
2016 rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size, 2048 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2017 GFP_NOFS | __GFP_NOFAIL); 2049 GFP_NOFS | __GFP_NOFAIL);
2018 memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset, 2050 memcpy(bi->bi_clone + bi->bi_offset,
2019 rbm.bi->bi_bh->b_data + rbm.bi->bi_offset, 2051 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
2020 rbm.bi->bi_len);
2021 } 2052 }
2022 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh); 2053 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2023 gfs2_setbit(&rbm, false, new_state); 2054 gfs2_setbit(&rbm, false, new_state);
2024 } 2055 }
2025 2056
@@ -2103,6 +2134,35 @@ out:
2103} 2134}
2104 2135
2105/** 2136/**
2137 * gfs2_set_alloc_start - Set starting point for block allocation
2138 * @rbm: The rbm which will be set to the required location
2139 * @ip: The gfs2 inode
2140 * @dinode: Flag to say if allocation includes a new inode
2141 *
2142 * This sets the starting point from the reservation if one is active
2143 * otherwise it falls back to guessing a start point based on the
2144 * inode's goal block or the last allocation point in the rgrp.
2145 */
2146
2147static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2148 const struct gfs2_inode *ip, bool dinode)
2149{
2150 u64 goal;
2151
2152 if (gfs2_rs_active(ip->i_res)) {
2153 *rbm = ip->i_res->rs_rbm;
2154 return;
2155 }
2156
2157 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2158 goal = ip->i_goal;
2159 else
2160 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2161
2162 gfs2_rbm_from_block(rbm, goal);
2163}
2164
2165/**
2106 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode 2166 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2107 * @ip: the inode to allocate the block for 2167 * @ip: the inode to allocate the block for
2108 * @bn: Used to return the starting block number 2168 * @bn: Used to return the starting block number
@@ -2120,22 +2180,14 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2120 struct buffer_head *dibh; 2180 struct buffer_head *dibh;
2121 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, }; 2181 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2122 unsigned int ndata; 2182 unsigned int ndata;
2123 u64 goal;
2124 u64 block; /* block, within the file system scope */ 2183 u64 block; /* block, within the file system scope */
2125 int error; 2184 int error;
2126 2185
2127 if (gfs2_rs_active(ip->i_res)) 2186 gfs2_set_alloc_start(&rbm, ip, dinode);
2128 goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
2129 else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
2130 goal = ip->i_goal;
2131 else
2132 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
2133
2134 gfs2_rbm_from_block(&rbm, goal);
2135 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false); 2187 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
2136 2188
2137 if (error == -ENOSPC) { 2189 if (error == -ENOSPC) {
2138 gfs2_rbm_from_block(&rbm, goal); 2190 gfs2_set_alloc_start(&rbm, ip, dinode);
2139 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false); 2191 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
2140 } 2192 }
2141 2193