aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_fsops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_fsops.c')
-rw-r--r--fs/xfs/xfs_fsops.c60
1 files changed, 51 insertions, 9 deletions
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index c064e72ada9e..32c37c1c47ab 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -250,8 +250,7 @@ xfs_growfs_data_private(
250 block->bb_numrecs = cpu_to_be16(1); 250 block->bb_numrecs = cpu_to_be16(1);
251 block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); 251 block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
252 block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); 252 block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
253 arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, 253 arec = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
254 block, 1, mp->m_alloc_mxr[0]);
255 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); 254 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
256 arec->ar_blockcount = cpu_to_be32( 255 arec->ar_blockcount = cpu_to_be32(
257 agsize - be32_to_cpu(arec->ar_startblock)); 256 agsize - be32_to_cpu(arec->ar_startblock));
@@ -272,8 +271,7 @@ xfs_growfs_data_private(
272 block->bb_numrecs = cpu_to_be16(1); 271 block->bb_numrecs = cpu_to_be16(1);
273 block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); 272 block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
274 block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); 273 block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
275 arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, 274 arec = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
276 block, 1, mp->m_alloc_mxr[0]);
277 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); 275 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
278 arec->ar_blockcount = cpu_to_be32( 276 arec->ar_blockcount = cpu_to_be32(
279 agsize - be32_to_cpu(arec->ar_startblock)); 277 agsize - be32_to_cpu(arec->ar_startblock));
@@ -460,7 +458,7 @@ xfs_fs_counts(
460{ 458{
461 unsigned long s; 459 unsigned long s;
462 460
463 xfs_icsb_sync_counters_lazy(mp); 461 xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
464 s = XFS_SB_LOCK(mp); 462 s = XFS_SB_LOCK(mp);
465 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 463 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
466 cnt->freertx = mp->m_sb.sb_frextents; 464 cnt->freertx = mp->m_sb.sb_frextents;
@@ -491,7 +489,7 @@ xfs_reserve_blocks(
491 __uint64_t *inval, 489 __uint64_t *inval,
492 xfs_fsop_resblks_t *outval) 490 xfs_fsop_resblks_t *outval)
493{ 491{
494 __int64_t lcounter, delta; 492 __int64_t lcounter, delta, fdblks_delta;
495 __uint64_t request; 493 __uint64_t request;
496 unsigned long s; 494 unsigned long s;
497 495
@@ -504,17 +502,35 @@ xfs_reserve_blocks(
504 } 502 }
505 503
506 request = *inval; 504 request = *inval;
505
506 /*
507 * With per-cpu counters, this becomes an interesting
508 * problem. we needto work out if we are freeing or allocation
509 * blocks first, then we can do the modification as necessary.
510 *
511 * We do this under the XFS_SB_LOCK so that if we are near
512 * ENOSPC, we will hold out any changes while we work out
513 * what to do. This means that the amount of free space can
514 * change while we do this, so we need to retry if we end up
515 * trying to reserve more space than is available.
516 *
517 * We also use the xfs_mod_incore_sb() interface so that we
518 * don't have to care about whether per cpu counter are
519 * enabled, disabled or even compiled in....
520 */
521retry:
507 s = XFS_SB_LOCK(mp); 522 s = XFS_SB_LOCK(mp);
523 xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);
508 524
509 /* 525 /*
510 * If our previous reservation was larger than the current value, 526 * If our previous reservation was larger than the current value,
511 * then move any unused blocks back to the free pool. 527 * then move any unused blocks back to the free pool.
512 */ 528 */
513 529 fdblks_delta = 0;
514 if (mp->m_resblks > request) { 530 if (mp->m_resblks > request) {
515 lcounter = mp->m_resblks_avail - request; 531 lcounter = mp->m_resblks_avail - request;
516 if (lcounter > 0) { /* release unused blocks */ 532 if (lcounter > 0) { /* release unused blocks */
517 mp->m_sb.sb_fdblocks += lcounter; 533 fdblks_delta = lcounter;
518 mp->m_resblks_avail -= lcounter; 534 mp->m_resblks_avail -= lcounter;
519 } 535 }
520 mp->m_resblks = request; 536 mp->m_resblks = request;
@@ -522,24 +538,50 @@ xfs_reserve_blocks(
522 __int64_t free; 538 __int64_t free;
523 539
524 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 540 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
541 if (!free)
542 goto out; /* ENOSPC and fdblks_delta = 0 */
543
525 delta = request - mp->m_resblks; 544 delta = request - mp->m_resblks;
526 lcounter = free - delta; 545 lcounter = free - delta;
527 if (lcounter < 0) { 546 if (lcounter < 0) {
528 /* We can't satisfy the request, just get what we can */ 547 /* We can't satisfy the request, just get what we can */
529 mp->m_resblks += free; 548 mp->m_resblks += free;
530 mp->m_resblks_avail += free; 549 mp->m_resblks_avail += free;
550 fdblks_delta = -free;
531 mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp); 551 mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp);
532 } else { 552 } else {
553 fdblks_delta = -delta;
533 mp->m_sb.sb_fdblocks = 554 mp->m_sb.sb_fdblocks =
534 lcounter + XFS_ALLOC_SET_ASIDE(mp); 555 lcounter + XFS_ALLOC_SET_ASIDE(mp);
535 mp->m_resblks = request; 556 mp->m_resblks = request;
536 mp->m_resblks_avail += delta; 557 mp->m_resblks_avail += delta;
537 } 558 }
538 } 559 }
539 560out:
540 outval->resblks = mp->m_resblks; 561 outval->resblks = mp->m_resblks;
541 outval->resblks_avail = mp->m_resblks_avail; 562 outval->resblks_avail = mp->m_resblks_avail;
542 XFS_SB_UNLOCK(mp, s); 563 XFS_SB_UNLOCK(mp, s);
564
565 if (fdblks_delta) {
566 /*
567 * If we are putting blocks back here, m_resblks_avail is
568 * already at it's max so this will put it in the free pool.
569 *
570 * If we need space, we'll either succeed in getting it
571 * from the free block count or we'll get an enospc. If
572 * we get a ENOSPC, it means things changed while we were
573 * calculating fdblks_delta and so we should try again to
574 * see if there is anything left to reserve.
575 *
576 * Don't set the reserved flag here - we don't want to reserve
577 * the extra reserve blocks from the reserve.....
578 */
579 int error;
580 error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0);
581 if (error == ENOSPC)
582 goto retry;
583 }
584
543 return 0; 585 return 0;
544} 586}
545 587