diff options
author | David Chinner <dgc@sgi.com> | 2007-02-10 02:36:17 -0500 |
---|---|---|
committer | Tim Shimmin <tes@sgi.com> | 2007-02-10 02:36:17 -0500 |
commit | dbcabad19aa91dc9bc7176fd2853fa74f724cd2f (patch) | |
tree | b65139c6c19541503444817af740ba265f8b838f | |
parent | 20f4ebf2bf2f57c1a9abb3655391336cc90314b3 (diff) |
[XFS] Fix block reservation mechanism.
The block reservation mechanism has been broken since the per-cpu
superblock counters were introduced. Make the block reservation code work
with the per-cpu counters by syncing the counters, snapshotting the amount
of available space and then doing a modifcation of the counter state
according to the result. Continue in a loop until we either have no space
available or we reserve some space.
SGI-PV: 956323
SGI-Modid: xfs-linux-melb:xfs-kern:27895a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Tim Shimmin <tes@sgi.com>
-rw-r--r-- | fs/xfs/xfs_fsops.c | 54 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.c | 16 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_vfsops.c | 2 |
4 files changed, 54 insertions, 20 deletions
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index c064e72ada9e..bfde9e6d67e6 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -460,7 +460,7 @@ xfs_fs_counts( | |||
460 | { | 460 | { |
461 | unsigned long s; | 461 | unsigned long s; |
462 | 462 | ||
463 | xfs_icsb_sync_counters_lazy(mp); | 463 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); |
464 | s = XFS_SB_LOCK(mp); | 464 | s = XFS_SB_LOCK(mp); |
465 | cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | 465 | cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
466 | cnt->freertx = mp->m_sb.sb_frextents; | 466 | cnt->freertx = mp->m_sb.sb_frextents; |
@@ -491,7 +491,7 @@ xfs_reserve_blocks( | |||
491 | __uint64_t *inval, | 491 | __uint64_t *inval, |
492 | xfs_fsop_resblks_t *outval) | 492 | xfs_fsop_resblks_t *outval) |
493 | { | 493 | { |
494 | __int64_t lcounter, delta; | 494 | __int64_t lcounter, delta, fdblks_delta; |
495 | __uint64_t request; | 495 | __uint64_t request; |
496 | unsigned long s; | 496 | unsigned long s; |
497 | 497 | ||
@@ -504,17 +504,35 @@ xfs_reserve_blocks( | |||
504 | } | 504 | } |
505 | 505 | ||
506 | request = *inval; | 506 | request = *inval; |
507 | |||
508 | /* | ||
509 | * With per-cpu counters, this becomes an interesting | ||
510 | * problem. we needto work out if we are freeing or allocation | ||
511 | * blocks first, then we can do the modification as necessary. | ||
512 | * | ||
513 | * We do this under the XFS_SB_LOCK so that if we are near | ||
514 | * ENOSPC, we will hold out any changes while we work out | ||
515 | * what to do. This means that the amount of free space can | ||
516 | * change while we do this, so we need to retry if we end up | ||
517 | * trying to reserve more space than is available. | ||
518 | * | ||
519 | * We also use the xfs_mod_incore_sb() interface so that we | ||
520 | * don't have to care about whether per cpu counter are | ||
521 | * enabled, disabled or even compiled in.... | ||
522 | */ | ||
523 | retry: | ||
507 | s = XFS_SB_LOCK(mp); | 524 | s = XFS_SB_LOCK(mp); |
525 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); | ||
508 | 526 | ||
509 | /* | 527 | /* |
510 | * If our previous reservation was larger than the current value, | 528 | * If our previous reservation was larger than the current value, |
511 | * then move any unused blocks back to the free pool. | 529 | * then move any unused blocks back to the free pool. |
512 | */ | 530 | */ |
513 | 531 | fdblks_delta = 0; | |
514 | if (mp->m_resblks > request) { | 532 | if (mp->m_resblks > request) { |
515 | lcounter = mp->m_resblks_avail - request; | 533 | lcounter = mp->m_resblks_avail - request; |
516 | if (lcounter > 0) { /* release unused blocks */ | 534 | if (lcounter > 0) { /* release unused blocks */ |
517 | mp->m_sb.sb_fdblocks += lcounter; | 535 | fdblks_delta = lcounter; |
518 | mp->m_resblks_avail -= lcounter; | 536 | mp->m_resblks_avail -= lcounter; |
519 | } | 537 | } |
520 | mp->m_resblks = request; | 538 | mp->m_resblks = request; |
@@ -522,24 +540,50 @@ xfs_reserve_blocks( | |||
522 | __int64_t free; | 540 | __int64_t free; |
523 | 541 | ||
524 | free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | 542 | free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
543 | if (!free) | ||
544 | goto out; /* ENOSPC and fdblks_delta = 0 */ | ||
545 | |||
525 | delta = request - mp->m_resblks; | 546 | delta = request - mp->m_resblks; |
526 | lcounter = free - delta; | 547 | lcounter = free - delta; |
527 | if (lcounter < 0) { | 548 | if (lcounter < 0) { |
528 | /* We can't satisfy the request, just get what we can */ | 549 | /* We can't satisfy the request, just get what we can */ |
529 | mp->m_resblks += free; | 550 | mp->m_resblks += free; |
530 | mp->m_resblks_avail += free; | 551 | mp->m_resblks_avail += free; |
552 | fdblks_delta = -free; | ||
531 | mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp); | 553 | mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp); |
532 | } else { | 554 | } else { |
555 | fdblks_delta = -delta; | ||
533 | mp->m_sb.sb_fdblocks = | 556 | mp->m_sb.sb_fdblocks = |
534 | lcounter + XFS_ALLOC_SET_ASIDE(mp); | 557 | lcounter + XFS_ALLOC_SET_ASIDE(mp); |
535 | mp->m_resblks = request; | 558 | mp->m_resblks = request; |
536 | mp->m_resblks_avail += delta; | 559 | mp->m_resblks_avail += delta; |
537 | } | 560 | } |
538 | } | 561 | } |
539 | 562 | out: | |
540 | outval->resblks = mp->m_resblks; | 563 | outval->resblks = mp->m_resblks; |
541 | outval->resblks_avail = mp->m_resblks_avail; | 564 | outval->resblks_avail = mp->m_resblks_avail; |
542 | XFS_SB_UNLOCK(mp, s); | 565 | XFS_SB_UNLOCK(mp, s); |
566 | |||
567 | if (fdblks_delta) { | ||
568 | /* | ||
569 | * If we are putting blocks back here, m_resblks_avail is | ||
570 | * already at it's max so this will put it in the free pool. | ||
571 | * | ||
572 | * If we need space, we'll either succeed in getting it | ||
573 | * from the free block count or we'll get an enospc. If | ||
574 | * we get a ENOSPC, it means things changed while we were | ||
575 | * calculating fdblks_delta and so we should try again to | ||
576 | * see if there is anything left to reserve. | ||
577 | * | ||
578 | * Don't set the reserved flag here - we don't want to reserve | ||
579 | * the extra reserve blocks from the reserve..... | ||
580 | */ | ||
581 | int error; | ||
582 | error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0); | ||
583 | if (error == ENOSPC) | ||
584 | goto retry; | ||
585 | } | ||
586 | |||
543 | return 0; | 587 | return 0; |
544 | } | 588 | } |
545 | 589 | ||
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 0df07c1df76e..30a5781a46d4 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -1979,8 +1979,8 @@ xfs_icsb_enable_counter( | |||
1979 | xfs_icsb_unlock_all_counters(mp); | 1979 | xfs_icsb_unlock_all_counters(mp); |
1980 | } | 1980 | } |
1981 | 1981 | ||
1982 | STATIC void | 1982 | void |
1983 | xfs_icsb_sync_counters_int( | 1983 | xfs_icsb_sync_counters_flags( |
1984 | xfs_mount_t *mp, | 1984 | xfs_mount_t *mp, |
1985 | int flags) | 1985 | int flags) |
1986 | { | 1986 | { |
@@ -2012,17 +2012,7 @@ STATIC void | |||
2012 | xfs_icsb_sync_counters( | 2012 | xfs_icsb_sync_counters( |
2013 | xfs_mount_t *mp) | 2013 | xfs_mount_t *mp) |
2014 | { | 2014 | { |
2015 | xfs_icsb_sync_counters_int(mp, 0); | 2015 | xfs_icsb_sync_counters_flags(mp, 0); |
2016 | } | ||
2017 | |||
2018 | /* | ||
2019 | * lazy addition used for things like df, background sb syncs, etc | ||
2020 | */ | ||
2021 | void | ||
2022 | xfs_icsb_sync_counters_lazy( | ||
2023 | xfs_mount_t *mp) | ||
2024 | { | ||
2025 | xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT); | ||
2026 | } | 2016 | } |
2027 | 2017 | ||
2028 | /* | 2018 | /* |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 9a8e7151b65c..b65dae61eb8d 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -307,7 +307,7 @@ typedef struct xfs_icsb_cnts { | |||
307 | #define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ | 307 | #define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ |
308 | 308 | ||
309 | extern int xfs_icsb_init_counters(struct xfs_mount *); | 309 | extern int xfs_icsb_init_counters(struct xfs_mount *); |
310 | extern void xfs_icsb_sync_counters_lazy(struct xfs_mount *); | 310 | extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int); |
311 | 311 | ||
312 | #else | 312 | #else |
313 | #define xfs_icsb_init_counters(mp) (0) | 313 | #define xfs_icsb_init_counters(mp) (0) |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index aec4e8d9cba7..f5ea74b999b6 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -806,7 +806,7 @@ xfs_statvfs( | |||
806 | 806 | ||
807 | statp->f_type = XFS_SB_MAGIC; | 807 | statp->f_type = XFS_SB_MAGIC; |
808 | 808 | ||
809 | xfs_icsb_sync_counters_lazy(mp); | 809 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); |
810 | s = XFS_SB_LOCK(mp); | 810 | s = XFS_SB_LOCK(mp); |
811 | statp->f_bsize = sbp->sb_blocksize; | 811 | statp->f_bsize = sbp->sb_blocksize; |
812 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; | 812 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; |