summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.h
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2017-12-21 00:42:04 -0500
committerDavid Sterba <dsterba@suse.com>2018-04-18 10:46:51 -0400
commitff6bc37eb7f6e7b052e50c13a480e1080b3ec07a (patch)
tree9a583583fbd29c65d866562c3f54daf6f9de445d /fs/btrfs/ctree.h
parenta514d63882c3d2063b21b865447266ebcb18b04c (diff)
btrfs: qgroup: Use independent and accurate per inode qgroup rsv
Unlike reservation calculation used in inode rsv for metadata, qgroup doesn't really need to care about things like csum size or extent usage for the whole tree COW. Qgroups care more about net change of the extent usage. That's to say, if we're going to insert one file extent, it will mostly find its place in COWed tree block, leaving no change in extent usage. Or causing a leaf split, resulting in one new net extent and increasing qgroup number by nodesize. Or in an even more rare case, increase the tree level, increasing qgroup number by 2 * nodesize. So here instead of using the complicated calculation for extent allocator, which cares more about accuracy and no error, qgroup doesn't need that over-estimated reservation. This patch will maintain 2 new members in btrfs_block_rsv structure for qgroup, using much smaller calculation for qgroup rsv, reducing false EDQUOT. Signed-off-by: David Sterba <dsterba@suse.com> Signed-off-by: Qu Wenruo <wqu@suse.com>
Diffstat (limited to 'fs/btrfs/ctree.h')
-rw-r--r--fs/btrfs/ctree.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index ec84e2dabb04..2771cc56a622 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -459,6 +459,25 @@ struct btrfs_block_rsv {
459 unsigned short full; 459 unsigned short full;
460 unsigned short type; 460 unsigned short type;
461 unsigned short failfast; 461 unsigned short failfast;
462
463 /*
464 * Qgroup equivalent for @size @reserved
465 *
466 * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
467 * about things like csum size nor how many tree blocks it will need to
468 * reserve.
469 *
470 * Qgroup cares more about net change of the extent usage.
471 *
472 * So for one newly inserted file extent, in worst case it will cause
473 * leaf split and level increase, nodesize for each file extent is
474 * already too much.
475 *
476 * In short, qgroup_size/reserved is the upper limit of possible needed
477 * qgroup metadata reservation.
478 */
479 u64 qgroup_rsv_size;
480 u64 qgroup_rsv_reserved;
462}; 481};
463 482
464/* 483/*