aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_fsops.c
diff options
context:
space:
mode:
authorDavid Chinner <dgc@sgi.com>2007-02-10 02:36:17 -0500
committerTim Shimmin <tes@sgi.com>2007-02-10 02:36:17 -0500
commitdbcabad19aa91dc9bc7176fd2853fa74f724cd2f (patch)
treeb65139c6c19541503444817af740ba265f8b838f /fs/xfs/xfs_fsops.c
parent20f4ebf2bf2f57c1a9abb3655391336cc90314b3 (diff)
[XFS] Fix block reservation mechanism.
The block reservation mechanism has been broken since the per-cpu superblock counters were introduced. Make the block reservation code work with the per-cpu counters by syncing the counters, snapshotting the amount of available space and then doing a modifcation of the counter state according to the result. Continue in a loop until we either have no space available or we reserve some space. SGI-PV: 956323 SGI-Modid: xfs-linux-melb:xfs-kern:27895a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_fsops.c')
-rw-r--r--fs/xfs/xfs_fsops.c54
1 files changed, 49 insertions, 5 deletions
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index c064e72ada9e..bfde9e6d67e6 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -460,7 +460,7 @@ xfs_fs_counts(
460{ 460{
461 unsigned long s; 461 unsigned long s;
462 462
463 xfs_icsb_sync_counters_lazy(mp); 463 xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
464 s = XFS_SB_LOCK(mp); 464 s = XFS_SB_LOCK(mp);
465 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 465 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
466 cnt->freertx = mp->m_sb.sb_frextents; 466 cnt->freertx = mp->m_sb.sb_frextents;
@@ -491,7 +491,7 @@ xfs_reserve_blocks(
491 __uint64_t *inval, 491 __uint64_t *inval,
492 xfs_fsop_resblks_t *outval) 492 xfs_fsop_resblks_t *outval)
493{ 493{
494 __int64_t lcounter, delta; 494 __int64_t lcounter, delta, fdblks_delta;
495 __uint64_t request; 495 __uint64_t request;
496 unsigned long s; 496 unsigned long s;
497 497
@@ -504,17 +504,35 @@ xfs_reserve_blocks(
504 } 504 }
505 505
506 request = *inval; 506 request = *inval;
507
508 /*
509 * With per-cpu counters, this becomes an interesting
510 * problem. we needto work out if we are freeing or allocation
511 * blocks first, then we can do the modification as necessary.
512 *
513 * We do this under the XFS_SB_LOCK so that if we are near
514 * ENOSPC, we will hold out any changes while we work out
515 * what to do. This means that the amount of free space can
516 * change while we do this, so we need to retry if we end up
517 * trying to reserve more space than is available.
518 *
519 * We also use the xfs_mod_incore_sb() interface so that we
520 * don't have to care about whether per cpu counter are
521 * enabled, disabled or even compiled in....
522 */
523retry:
507 s = XFS_SB_LOCK(mp); 524 s = XFS_SB_LOCK(mp);
525 xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);
508 526
509 /* 527 /*
510 * If our previous reservation was larger than the current value, 528 * If our previous reservation was larger than the current value,
511 * then move any unused blocks back to the free pool. 529 * then move any unused blocks back to the free pool.
512 */ 530 */
513 531 fdblks_delta = 0;
514 if (mp->m_resblks > request) { 532 if (mp->m_resblks > request) {
515 lcounter = mp->m_resblks_avail - request; 533 lcounter = mp->m_resblks_avail - request;
516 if (lcounter > 0) { /* release unused blocks */ 534 if (lcounter > 0) { /* release unused blocks */
517 mp->m_sb.sb_fdblocks += lcounter; 535 fdblks_delta = lcounter;
518 mp->m_resblks_avail -= lcounter; 536 mp->m_resblks_avail -= lcounter;
519 } 537 }
520 mp->m_resblks = request; 538 mp->m_resblks = request;
@@ -522,24 +540,50 @@ xfs_reserve_blocks(
522 __int64_t free; 540 __int64_t free;
523 541
524 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 542 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
543 if (!free)
544 goto out; /* ENOSPC and fdblks_delta = 0 */
545
525 delta = request - mp->m_resblks; 546 delta = request - mp->m_resblks;
526 lcounter = free - delta; 547 lcounter = free - delta;
527 if (lcounter < 0) { 548 if (lcounter < 0) {
528 /* We can't satisfy the request, just get what we can */ 549 /* We can't satisfy the request, just get what we can */
529 mp->m_resblks += free; 550 mp->m_resblks += free;
530 mp->m_resblks_avail += free; 551 mp->m_resblks_avail += free;
552 fdblks_delta = -free;
531 mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp); 553 mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp);
532 } else { 554 } else {
555 fdblks_delta = -delta;
533 mp->m_sb.sb_fdblocks = 556 mp->m_sb.sb_fdblocks =
534 lcounter + XFS_ALLOC_SET_ASIDE(mp); 557 lcounter + XFS_ALLOC_SET_ASIDE(mp);
535 mp->m_resblks = request; 558 mp->m_resblks = request;
536 mp->m_resblks_avail += delta; 559 mp->m_resblks_avail += delta;
537 } 560 }
538 } 561 }
539 562out:
540 outval->resblks = mp->m_resblks; 563 outval->resblks = mp->m_resblks;
541 outval->resblks_avail = mp->m_resblks_avail; 564 outval->resblks_avail = mp->m_resblks_avail;
542 XFS_SB_UNLOCK(mp, s); 565 XFS_SB_UNLOCK(mp, s);
566
567 if (fdblks_delta) {
568 /*
569 * If we are putting blocks back here, m_resblks_avail is
570 * already at it's max so this will put it in the free pool.
571 *
572 * If we need space, we'll either succeed in getting it
573 * from the free block count or we'll get an enospc. If
574 * we get a ENOSPC, it means things changed while we were
575 * calculating fdblks_delta and so we should try again to
576 * see if there is anything left to reserve.
577 *
578 * Don't set the reserved flag here - we don't want to reserve
579 * the extra reserve blocks from the reserve.....
580 */
581 int error;
582 error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0);
583 if (error == ENOSPC)
584 goto retry;
585 }
586
543 return 0; 587 return 0;
544} 588}
545 589