aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2012-09-10 05:03:50 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2012-09-24 05:47:26 -0400
commitff7f4cb461163967a9dbb8c569e2447b7520654f (patch)
tree0455ebddc5fc46b035e167f5f41a0f831c5c5694 /fs
parent56aa72d0fcc9c4a3af4d0111d8d7f336b63adff9 (diff)
GFS2: Consolidate free block searching functions
With the recently added block reservation code, an additional function was added to search for free blocks. This had a restriction of only being able to search for aligned extents of free blocks. As a result the allocation patterns when reserving blocks were suboptimal when the existing allocation of blocks for an inode was not aligned to the same boundary. This patch resolves that problem by adding the ability for gfs2_rbm_find to search for extents of a particular minimum size. We can then use gfs2_rbm_find for both looking for reservations, and also looking for free blocks on an individual basis when we actually come to do the allocation later on. As a result we only need a single set of code to deal with both situations. The function gfs2_rbm_from_block() is moved up rgrp.c so that it occurs before all of its callers. Many thanks are due to Bob for helping track down the final issue in this patch. That fix to the rb_tree traversal and to not share block reservations from a dirctory to its children is included here. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/inode.c11
-rw-r--r--fs/gfs2/rgrp.c367
-rw-r--r--fs/gfs2/rgrp.h6
4 files changed, 195 insertions, 190 deletions
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6aaa07c7c731..3d469d37345e 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -99,7 +99,6 @@ struct gfs2_rgrpd {
99#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */ 99#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
100 spinlock_t rd_rsspin; /* protects reservation related vars */ 100 spinlock_t rd_rsspin; /* protects reservation related vars */
101 struct rb_root rd_rstree; /* multi-block reservation tree */ 101 struct rb_root rd_rstree; /* multi-block reservation tree */
102 u32 rd_rs_cnt; /* count of current reservations */
103}; 102};
104 103
105struct gfs2_rbm { 104struct gfs2_rbm {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index f2709ea887da..381893ceefa4 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -712,14 +712,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
712 if (error) 712 if (error)
713 goto fail_gunlock2; 713 goto fail_gunlock2;
714 714
715 /* The newly created inode needs a reservation so it can allocate 715 error = gfs2_rs_alloc(ip);
716 xattrs. At the same time, we want new blocks allocated to the new 716 if (error)
717 dinode to be as contiguous as possible. Since we allocated the 717 goto fail_gunlock2;
718 dinode block under the directory's reservation, we transfer
719 ownership of that reservation to the new inode. The directory
720 doesn't need a reservation unless it needs a new allocation. */
721 ip->i_res = dip->i_res;
722 dip->i_res = NULL;
723 718
724 error = gfs2_acl_create(dip, inode); 719 error = gfs2_acl_create(dip, inode);
725 if (error) 720 if (error)
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index defb8265ce52..b933cdcda7f4 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -35,9 +35,6 @@
35#define BFITNOENT ((u32)~0) 35#define BFITNOENT ((u32)~0)
36#define NO_BLOCK ((u64)~0) 36#define NO_BLOCK ((u64)~0)
37 37
38#define RSRV_CONTENTION_FACTOR 4
39#define RGRP_RSRV_MAX_CONTENDERS 2
40
41#if BITS_PER_LONG == 32 38#if BITS_PER_LONG == 32
42#define LBITMASK (0x55555555UL) 39#define LBITMASK (0x55555555UL)
43#define LBITSKIP55 (0x55555555UL) 40#define LBITSKIP55 (0x55555555UL)
@@ -67,6 +64,10 @@ static const char valid_change[16] = {
67 1, 0, 0, 0 64 1, 0, 0, 0
68}; 65};
69 66
67static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
68 const struct gfs2_inode *ip, bool nowrap);
69
70
70/** 71/**
71 * gfs2_setbit - Set a bit in the bitmaps 72 * gfs2_setbit - Set a bit in the bitmaps
72 * @rbm: The position of the bit to set 73 * @rbm: The position of the bit to set
@@ -235,6 +236,130 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
235} 236}
236 237
237/** 238/**
239 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
240 * @rbm: The rbm with rgd already set correctly
241 * @block: The block number (filesystem relative)
242 *
243 * This sets the bi and offset members of an rbm based on a
244 * resource group and a filesystem relative block number. The
245 * resource group must be set in the rbm on entry, the bi and
246 * offset members will be set by this function.
247 *
248 * Returns: 0 on success, or an error code
249 */
250
251static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
252{
253 u64 rblock = block - rbm->rgd->rd_data0;
254 u32 goal = (u32)rblock;
255 int x;
256
257 if (WARN_ON_ONCE(rblock > UINT_MAX))
258 return -EINVAL;
259 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
260 return -E2BIG;
261
262 for (x = 0; x < rbm->rgd->rd_length; x++) {
263 rbm->bi = rbm->rgd->rd_bits + x;
264 if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
265 rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
266 break;
267 }
268 }
269
270 return 0;
271}
272
273/**
274 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
275 * @rbm: Position to search (value/result)
276 * @n_unaligned: Number of unaligned blocks to check
277 * @len: Decremented for each block found (terminate on zero)
278 *
279 * Returns: true if a non-free block is encountered
280 */
281
282static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
283{
284 u64 block;
285 u32 n;
286 u8 res;
287
288 for (n = 0; n < n_unaligned; n++) {
289 res = gfs2_testbit(rbm);
290 if (res != GFS2_BLKST_FREE)
291 return true;
292 (*len)--;
293 if (*len == 0)
294 return true;
295 block = gfs2_rbm_to_block(rbm);
296 if (gfs2_rbm_from_block(rbm, block + 1))
297 return true;
298 }
299
300 return false;
301}
302
303/**
304 * gfs2_free_extlen - Return extent length of free blocks
305 * @rbm: Starting position
306 * @len: Max length to check
307 *
308 * Starting at the block specified by the rbm, see how many free blocks
309 * there are, not reading more than len blocks ahead. This can be done
310 * using memchr_inv when the blocks are byte aligned, but has to be done
311 * on a block by block basis in case of unaligned blocks. Also this
312 * function can cope with bitmap boundaries (although it must stop on
313 * a resource group boundary)
314 *
315 * Returns: Number of free blocks in the extent
316 */
317
318static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
319{
320 struct gfs2_rbm rbm = *rrbm;
321 u32 n_unaligned = rbm.offset & 3;
322 u32 size = len;
323 u32 bytes;
324 u32 chunk_size;
325 u8 *ptr, *start, *end;
326 u64 block;
327
328 if (n_unaligned &&
329 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
330 goto out;
331
332 /* Start is now byte aligned */
333 while (len > 3) {
334 start = rbm.bi->bi_bh->b_data;
335 if (rbm.bi->bi_clone)
336 start = rbm.bi->bi_clone;
337 end = start + rbm.bi->bi_bh->b_size;
338 start += rbm.bi->bi_offset;
339 BUG_ON(rbm.offset & 3);
340 start += (rbm.offset / GFS2_NBBY);
341 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
342 ptr = memchr_inv(start, 0, bytes);
343 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
344 chunk_size *= GFS2_NBBY;
345 BUG_ON(len < chunk_size);
346 len -= chunk_size;
347 block = gfs2_rbm_to_block(&rbm);
348 gfs2_rbm_from_block(&rbm, block + chunk_size);
349 n_unaligned = 3;
350 if (ptr)
351 break;
352 n_unaligned = len & 3;
353 }
354
355 /* Deal with any bits left over at the end */
356 if (n_unaligned)
357 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
358out:
359 return size - len;
360}
361
362/**
238 * gfs2_bitcount - count the number of bits in a certain state 363 * gfs2_bitcount - count the number of bits in a certain state
239 * @rgd: the resource group descriptor 364 * @rgd: the resource group descriptor
240 * @buffer: the buffer that holds the bitmaps 365 * @buffer: the buffer that holds the bitmaps
@@ -472,8 +597,6 @@ static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
472 trace_gfs2_rs(rs, TRACE_RS_TREEDEL); 597 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
473 rb_erase(&rs->rs_node, &rgd->rd_rstree); 598 rb_erase(&rs->rs_node, &rgd->rd_rstree);
474 RB_CLEAR_NODE(&rs->rs_node); 599 RB_CLEAR_NODE(&rs->rs_node);
475 BUG_ON(!rgd->rd_rs_cnt);
476 rgd->rd_rs_cnt--;
477 600
478 if (rs->rs_free) { 601 if (rs->rs_free) {
479 /* return reserved blocks to the rgrp and the ip */ 602 /* return reserved blocks to the rgrp and the ip */
@@ -1208,179 +1331,85 @@ out:
1208 1331
1209/** 1332/**
1210 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree 1333 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1211 * @bi: the bitmap with the blocks
1212 * @ip: the inode structure 1334 * @ip: the inode structure
1213 * @biblk: the 32-bit block number relative to the start of the bitmap
1214 * @amount: the number of blocks to reserve
1215 * 1335 *
1216 * Returns: NULL - reservation was already taken, so not inserted
1217 * pointer to the inserted reservation
1218 */ 1336 */
1219static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi, 1337static void rs_insert(struct gfs2_inode *ip)
1220 struct gfs2_inode *ip, u32 biblk,
1221 int amount)
1222{ 1338{
1223 struct rb_node **newn, *parent = NULL; 1339 struct rb_node **newn, *parent = NULL;
1224 int rc; 1340 int rc;
1225 struct gfs2_blkreserv *rs = ip->i_res; 1341 struct gfs2_blkreserv *rs = ip->i_res;
1226 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd; 1342 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1227 u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0; 1343 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1228 1344
1229 spin_lock(&rgd->rd_rsspin);
1230 newn = &rgd->rd_rstree.rb_node;
1231 BUG_ON(!ip->i_res);
1232 BUG_ON(gfs2_rs_active(rs)); 1345 BUG_ON(gfs2_rs_active(rs));
1233 /* Figure out where to put new node */
1234 1346
1347 spin_lock(&rgd->rd_rsspin);
1348 newn = &rgd->rd_rstree.rb_node;
1235 while (*newn) { 1349 while (*newn) {
1236 struct gfs2_blkreserv *cur = 1350 struct gfs2_blkreserv *cur =
1237 rb_entry(*newn, struct gfs2_blkreserv, rs_node); 1351 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1238 1352
1239 parent = *newn; 1353 parent = *newn;
1240 rc = rs_cmp(fsblock, amount, cur); 1354 rc = rs_cmp(fsblock, rs->rs_free, cur);
1241 if (rc > 0) 1355 if (rc > 0)
1242 newn = &((*newn)->rb_right); 1356 newn = &((*newn)->rb_right);
1243 else if (rc < 0) 1357 else if (rc < 0)
1244 newn = &((*newn)->rb_left); 1358 newn = &((*newn)->rb_left);
1245 else { 1359 else {
1246 spin_unlock(&rgd->rd_rsspin); 1360 spin_unlock(&rgd->rd_rsspin);
1247 return NULL; /* reservation already in use */ 1361 WARN_ON(1);
1362 return;
1248 } 1363 }
1249 } 1364 }
1250 1365
1251 /* Do our reservation work */
1252 rs = ip->i_res;
1253 rs->rs_free = amount;
1254 rs->rs_rbm.offset = biblk;
1255 rs->rs_rbm.bi = bi;
1256 rs->rs_inum = ip->i_no_addr;
1257 rb_link_node(&rs->rs_node, parent, newn); 1366 rb_link_node(&rs->rs_node, parent, newn);
1258 rb_insert_color(&rs->rs_node, &rgd->rd_rstree); 1367 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1259 1368
1260 /* Do our rgrp accounting for the reservation */ 1369 /* Do our rgrp accounting for the reservation */
1261 rgd->rd_reserved += amount; /* blocks reserved */ 1370 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
1262 rgd->rd_rs_cnt++; /* number of in-tree reservations */
1263 spin_unlock(&rgd->rd_rsspin); 1371 spin_unlock(&rgd->rd_rsspin);
1264 trace_gfs2_rs(rs, TRACE_RS_INSERT); 1372 trace_gfs2_rs(rs, TRACE_RS_INSERT);
1265 return rs;
1266} 1373}
1267 1374
1268/** 1375/**
1269 * unclaimed_blocks - return number of blocks that aren't spoken for 1376 * rg_mblk_search - find a group of multiple free blocks to form a reservation
1270 */
1271static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd)
1272{
1273 return rgd->rd_free_clone - rgd->rd_reserved;
1274}
1275
1276/**
1277 * rg_mblk_search - find a group of multiple free blocks
1278 * @rgd: the resource group descriptor 1377 * @rgd: the resource group descriptor
1279 * @ip: pointer to the inode for which we're reserving blocks 1378 * @ip: pointer to the inode for which we're reserving blocks
1280 * @requested: number of blocks required for this allocation 1379 * @requested: number of blocks required for this allocation
1281 * 1380 *
1282 * This is very similar to rgblk_search, except we're looking for whole
1283 * 64-bit words that represent a chunk of 32 free blocks. I'm only focusing
1284 * on aligned dwords for speed's sake.
1285 *
1286 */ 1381 */
1287 1382
1288static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, unsigned requested) 1383static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1384 unsigned requested)
1289{ 1385{
1290 struct gfs2_bitmap *bi = rgd->rd_bits; 1386 struct gfs2_rbm rbm = { .rgd = rgd, };
1291 const u32 length = rgd->rd_length; 1387 u64 goal;
1292 u32 blk; 1388 struct gfs2_blkreserv *rs = ip->i_res;
1293 unsigned int buf, x, search_bytes; 1389 u32 extlen;
1294 u8 *buffer = NULL; 1390 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1295 u8 *ptr, *end, *nonzero; 1391 int ret;
1296 u32 goal, rsv_bytes;
1297 struct gfs2_blkreserv *rs;
1298 u32 best_rs_bytes, unclaimed;
1299 int best_rs_blocks;
1300 1392
1301 if ((rgd->rd_free_clone < rgd->rd_reserved) || 1393 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
1302 (unclaimed_blocks(rgd) < max(requested, RGRP_RSRV_MINBLKS))) 1394 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1395 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1303 return; 1396 return;
1304 1397
1305 /* Find bitmap block that contains bits for goal block */ 1398 /* Find bitmap block that contains bits for goal block */
1306 if (rgrp_contains_block(rgd, ip->i_goal)) 1399 if (rgrp_contains_block(rgd, ip->i_goal))
1307 goal = ip->i_goal - rgd->rd_data0; 1400 goal = ip->i_goal;
1308 else 1401 else
1309 goal = rgd->rd_last_alloc; 1402 goal = rgd->rd_last_alloc + rgd->rd_data0;
1310 1403
1311 for (buf = 0; buf < length; buf++) { 1404 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1312 bi = rgd->rd_bits + buf; 1405 return;
1313 /* Convert scope of "goal" from rgrp-wide to within
1314 found bit block */
1315 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
1316 goal -= bi->bi_start * GFS2_NBBY;
1317 goto do_search;
1318 }
1319 }
1320 buf = 0;
1321 goal = 0;
1322
1323do_search:
1324 best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint),
1325 (RGRP_RSRV_MINBLKS * rgd->rd_length));
1326 best_rs_bytes = (best_rs_blocks *
1327 (1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) /
1328 GFS2_NBBY; /* 1 + is for our not-yet-created reservation */
1329 best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64));
1330 unclaimed = unclaimed_blocks(rgd);
1331 if (best_rs_bytes * GFS2_NBBY > unclaimed)
1332 best_rs_bytes = unclaimed >> GFS2_BIT_SIZE;
1333
1334 for (x = 0; x <= length; x++) {
1335 bi = rgd->rd_bits + buf;
1336
1337 if (test_bit(GBF_FULL, &bi->bi_flags))
1338 goto skip;
1339 1406
1340 WARN_ON(!buffer_uptodate(bi->bi_bh)); 1407 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true);
1341 if (bi->bi_clone) 1408 if (ret == 0) {
1342 buffer = bi->bi_clone + bi->bi_offset; 1409 rs->rs_rbm = rbm;
1343 else 1410 rs->rs_free = extlen;
1344 buffer = bi->bi_bh->b_data + bi->bi_offset; 1411 rs->rs_inum = ip->i_no_addr;
1345 1412 rs_insert(ip);
1346 /* We have to keep the reservations aligned on u64 boundaries
1347 otherwise we could get situations where a byte can't be
1348 used because it's after a reservation, but a free bit still
1349 is within the reservation's area. */
1350 ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64));
1351 end = (buffer + bi->bi_len);
1352 while (ptr < end) {
1353 rsv_bytes = 0;
1354 if ((ptr + best_rs_bytes) <= end)
1355 search_bytes = best_rs_bytes;
1356 else
1357 search_bytes = end - ptr;
1358 BUG_ON(!search_bytes);
1359 nonzero = memchr_inv(ptr, 0, search_bytes);
1360 /* If the lot is all zeroes, reserve the whole size. If
1361 there's enough zeroes to satisfy the request, use
1362 what we can. If there's not enough, keep looking. */
1363 if (nonzero == NULL)
1364 rsv_bytes = search_bytes;
1365 else if ((nonzero - ptr) * GFS2_NBBY >= requested)
1366 rsv_bytes = (nonzero - ptr);
1367
1368 if (rsv_bytes) {
1369 blk = ((ptr - buffer) * GFS2_NBBY);
1370 BUG_ON(blk >= bi->bi_len * GFS2_NBBY);
1371 rs = rs_insert(bi, ip, blk,
1372 rsv_bytes * GFS2_NBBY);
1373 if (rs)
1374 return;
1375 }
1376 ptr += ALIGN(search_bytes, sizeof(u64));
1377 }
1378skip:
1379 /* Try next bitmap block (wrap back to rgrp header
1380 if at end) */
1381 buf++;
1382 buf %= length;
1383 goal = 0;
1384 } 1413 }
1385} 1414}
1386 1415
@@ -1388,6 +1417,7 @@ skip:
1388 * gfs2_next_unreserved_block - Return next block that is not reserved 1417 * gfs2_next_unreserved_block - Return next block that is not reserved
1389 * @rgd: The resource group 1418 * @rgd: The resource group
1390 * @block: The starting block 1419 * @block: The starting block
1420 * @length: The required length
1391 * @ip: Ignore any reservations for this inode 1421 * @ip: Ignore any reservations for this inode
1392 * 1422 *
1393 * If the block does not appear in any reservation, then return the 1423 * If the block does not appear in any reservation, then return the
@@ -1397,6 +1427,7 @@ skip:
1397 */ 1427 */
1398 1428
1399static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block, 1429static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1430 u32 length,
1400 const struct gfs2_inode *ip) 1431 const struct gfs2_inode *ip)
1401{ 1432{
1402 struct gfs2_blkreserv *rs; 1433 struct gfs2_blkreserv *rs;
@@ -1404,10 +1435,10 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1404 int rc; 1435 int rc;
1405 1436
1406 spin_lock(&rgd->rd_rsspin); 1437 spin_lock(&rgd->rd_rsspin);
1407 n = rb_first(&rgd->rd_rstree); 1438 n = rgd->rd_rstree.rb_node;
1408 while (n) { 1439 while (n) {
1409 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); 1440 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1410 rc = rs_cmp(block, 1, rs); 1441 rc = rs_cmp(block, length, rs);
1411 if (rc < 0) 1442 if (rc < 0)
1412 n = n->rb_left; 1443 n = n->rb_left;
1413 else if (rc > 0) 1444 else if (rc > 0)
@@ -1417,9 +1448,9 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1417 } 1448 }
1418 1449
1419 if (n) { 1450 if (n) {
1420 while ((rs_cmp(block, 1, rs) == 0) && (ip->i_res != rs)) { 1451 while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) {
1421 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free; 1452 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1422 n = rb_next(&rs->rs_node); 1453 n = n->rb_right;
1423 if (n == NULL) 1454 if (n == NULL)
1424 break; 1455 break;
1425 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); 1456 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
@@ -1431,43 +1462,10 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1431} 1462}
1432 1463
1433/** 1464/**
1434 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
1435 * @rbm: The rbm with rgd already set correctly
1436 * @block: The block number (filesystem relative)
1437 *
1438 * This sets the bi and offset members of an rbm based on a
1439 * resource group and a filesystem relative block number. The
1440 * resource group must be set in the rbm on entry, the bi and
1441 * offset members will be set by this function.
1442 *
1443 * Returns: 0 on success, or an error code
1444 */
1445
1446static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
1447{
1448 u64 rblock = block - rbm->rgd->rd_data0;
1449 u32 goal = (u32)rblock;
1450 int x;
1451
1452 if (WARN_ON_ONCE(rblock > UINT_MAX))
1453 return -EINVAL;
1454 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
1455 return -E2BIG;
1456
1457 for (x = 0; x < rbm->rgd->rd_length; x++) {
1458 rbm->bi = rbm->rgd->rd_bits + x;
1459 if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
1460 rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
1461 break;
1462 }
1463 }
1464
1465 return 0;
1466}
1467
1468/**
1469 * gfs2_reservation_check_and_update - Check for reservations during block alloc 1465 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1470 * @rbm: The current position in the resource group 1466 * @rbm: The current position in the resource group
1467 * @ip: The inode for which we are searching for blocks
1468 * @minext: The minimum extent length
1471 * 1469 *
1472 * This checks the current position in the rgrp to see whether there is 1470 * This checks the current position in the rgrp to see whether there is
1473 * a reservation covering this block. If not then this function is a 1471 * a reservation covering this block. If not then this function is a
@@ -1479,15 +1477,33 @@ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
1479 */ 1477 */
1480 1478
1481static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm, 1479static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1482 const struct gfs2_inode *ip) 1480 const struct gfs2_inode *ip,
1481 u32 minext)
1483{ 1482{
1484 u64 block = gfs2_rbm_to_block(rbm); 1483 u64 block = gfs2_rbm_to_block(rbm);
1484 u32 extlen = 1;
1485 u64 nblock; 1485 u64 nblock;
1486 int ret; 1486 int ret;
1487 1487
1488 nblock = gfs2_next_unreserved_block(rbm->rgd, block, ip); 1488 /*
1489 * If we have a minimum extent length, then skip over any extent
1490 * which is less than the min extent length in size.
1491 */
1492 if (minext) {
1493 extlen = gfs2_free_extlen(rbm, minext);
1494 nblock = block + extlen;
1495 if (extlen < minext)
1496 goto fail;
1497 }
1498
1499 /*
1500 * Check the extent which has been found against the reservations
1501 * and skip if parts of it are already reserved
1502 */
1503 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1489 if (nblock == block) 1504 if (nblock == block)
1490 return 0; 1505 return 0;
1506fail:
1491 ret = gfs2_rbm_from_block(rbm, nblock); 1507 ret = gfs2_rbm_from_block(rbm, nblock);
1492 if (ret < 0) 1508 if (ret < 0)
1493 return ret; 1509 return ret;
@@ -1498,6 +1514,7 @@ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1498 * gfs2_rbm_find - Look for blocks of a particular state 1514 * gfs2_rbm_find - Look for blocks of a particular state
1499 * @rbm: Value/result starting position and final position 1515 * @rbm: Value/result starting position and final position
1500 * @state: The state which we want to find 1516 * @state: The state which we want to find
1517 * @minext: The requested extent length (0 for a single block)
1501 * @ip: If set, check for reservations 1518 * @ip: If set, check for reservations
1502 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping 1519 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1503 * around until we've reached the starting point. 1520 * around until we've reached the starting point.
@@ -1509,7 +1526,7 @@ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1509 * Returns: 0 on success, -ENOSPC if there is no block of the requested state 1526 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1510 */ 1527 */
1511 1528
1512static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, 1529static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1513 const struct gfs2_inode *ip, bool nowrap) 1530 const struct gfs2_inode *ip, bool nowrap)
1514{ 1531{
1515 struct buffer_head *bh; 1532 struct buffer_head *bh;
@@ -1548,7 +1565,7 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state,
1548 return 0; 1565 return 0;
1549 1566
1550 initial_bi = rbm->bi; 1567 initial_bi = rbm->bi;
1551 ret = gfs2_reservation_check_and_update(rbm, ip); 1568 ret = gfs2_reservation_check_and_update(rbm, ip, minext);
1552 if (ret == 0) 1569 if (ret == 0)
1553 return 0; 1570 return 0;
1554 if (ret > 0) { 1571 if (ret > 0) {
@@ -1608,7 +1625,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
1608 1625
1609 while (1) { 1626 while (1) {
1610 down_write(&sdp->sd_log_flush_lock); 1627 down_write(&sdp->sd_log_flush_lock);
1611 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, true); 1628 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true);
1612 up_write(&sdp->sd_log_flush_lock); 1629 up_write(&sdp->sd_log_flush_lock);
1613 if (error == -ENOSPC) 1630 if (error == -ENOSPC)
1614 break; 1631 break;
@@ -1988,11 +2005,11 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
1988 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0; 2005 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
1989 2006
1990 gfs2_rbm_from_block(&rbm, goal); 2007 gfs2_rbm_from_block(&rbm, goal);
1991 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, ip, false); 2008 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
1992 2009
1993 if (error == -ENOSPC) { 2010 if (error == -ENOSPC) {
1994 gfs2_rbm_from_block(&rbm, goal); 2011 gfs2_rbm_from_block(&rbm, goal);
1995 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, false); 2012 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
1996 } 2013 }
1997 2014
1998 /* Since all blocks are reserved in advance, this shouldn't happen */ 2015 /* Since all blocks are reserved in advance, this shouldn't happen */
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index c98f6af07e1c..24077958dcf6 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -79,10 +79,4 @@ static inline bool gfs2_rs_active(struct gfs2_blkreserv *rs)
79 return rs && !RB_EMPTY_NODE(&rs->rs_node); 79 return rs && !RB_EMPTY_NODE(&rs->rs_node);
80} 80}
81 81
82
83static inline u32 gfs2_bi2rgd_blk(const struct gfs2_bitmap *bi, u32 blk)
84{
85 return (bi->bi_start * GFS2_NBBY) + blk;
86}
87
88#endif /* __RGRP_DOT_H__ */ 82#endif /* __RGRP_DOT_H__ */