diff options
author | Brian Foster <bfoster@redhat.com> | 2014-07-24 05:56:08 -0400 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2014-07-24 05:56:08 -0400 |
commit | f074051ff550f9f1f1a8ab4868277d049a7fd7aa (patch) | |
tree | 9009e61e9af493336bc869535426c286960eaf7e /fs/xfs | |
parent | dc06f398f00059707236d456d954a3a9d2a829db (diff) |
xfs: squash prealloc while over quota free space as well
From: Brian Foster <bfoster@redhat.com>
Commit 4d559a3b introduced heavy prealloc. squashing to catch the case
of requesting too large a prealloc on smaller filesystems, leading to
repeated flush and retry cycles that occur on ENOSPC. Now that we issue
eofblocks scans on EDQUOT/ENOSPC, squash the prealloc against the
minimum available free space across all applicable quotas as well to
avoid a similar problem of repeated eofblocks scans.
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_iomap.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 923c044bd26f..783b3b1b0684 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -397,7 +397,8 @@ xfs_quota_calc_throttle( | |||
397 | struct xfs_inode *ip, | 397 | struct xfs_inode *ip, |
398 | int type, | 398 | int type, |
399 | xfs_fsblock_t *qblocks, | 399 | xfs_fsblock_t *qblocks, |
400 | int *qshift) | 400 | int *qshift, |
401 | int64_t *qfreesp) | ||
401 | { | 402 | { |
402 | int64_t freesp; | 403 | int64_t freesp; |
403 | int shift = 0; | 404 | int shift = 0; |
@@ -406,6 +407,7 @@ xfs_quota_calc_throttle( | |||
406 | /* over hi wmark, squash the prealloc completely */ | 407 | /* over hi wmark, squash the prealloc completely */ |
407 | if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { | 408 | if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { |
408 | *qblocks = 0; | 409 | *qblocks = 0; |
410 | *qfreesp = 0; | ||
409 | return; | 411 | return; |
410 | } | 412 | } |
411 | 413 | ||
@@ -418,6 +420,9 @@ xfs_quota_calc_throttle( | |||
418 | shift += 2; | 420 | shift += 2; |
419 | } | 421 | } |
420 | 422 | ||
423 | if (freesp < *qfreesp) | ||
424 | *qfreesp = freesp; | ||
425 | |||
421 | /* only overwrite the throttle values if we are more aggressive */ | 426 | /* only overwrite the throttle values if we are more aggressive */ |
422 | if ((freesp >> shift) < (*qblocks >> *qshift)) { | 427 | if ((freesp >> shift) < (*qblocks >> *qshift)) { |
423 | *qblocks = freesp; | 428 | *qblocks = freesp; |
@@ -476,15 +481,18 @@ xfs_iomap_prealloc_size( | |||
476 | } | 481 | } |
477 | 482 | ||
478 | /* | 483 | /* |
479 | * Check each quota to cap the prealloc size and provide a shift | 484 | * Check each quota to cap the prealloc size, provide a shift value to |
480 | * value to throttle with. | 485 | * throttle with and adjust amount of available space. |
481 | */ | 486 | */ |
482 | if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) | 487 | if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) |
483 | xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift); | 488 | xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, |
489 | &freesp); | ||
484 | if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) | 490 | if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) |
485 | xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift); | 491 | xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, |
492 | &freesp); | ||
486 | if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) | 493 | if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) |
487 | xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift); | 494 | xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, |
495 | &freesp); | ||
488 | 496 | ||
489 | /* | 497 | /* |
490 | * The final prealloc size is set to the minimum of free space available | 498 | * The final prealloc size is set to the minimum of free space available |