diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2008-10-10 09:39:00 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2008-10-10 09:39:00 -0400 |
commit | 6bc6e63fcd7dac9e633ea29f1fddd9580ab28f3f (patch) | |
tree | 144d53023af5faeb94b9b3aa28e186a33e6c5b98 /fs/ext4/mballoc.c | |
parent | 030ba6bc67b4f2bc5cd174f57785a1745c929abe (diff) |
ext4: Add percpu dirty block accounting.
This patch adds dirty block accounting using percpu_counters. Delayed
allocation block reservation is now done by updating dirty block
counter. In a later patch we switch to non delalloc mode if the
filesystem free blocks is greater than 150% of total filesystem dirty
blocks
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Mingming Cao<cmm@us.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/mballoc.c')
-rw-r--r-- | fs/ext4/mballoc.c | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index e4f30de11a9d..585c25950184 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -2880,7 +2880,7 @@ void exit_ext4_mballoc(void) | |||
2880 | */ | 2880 | */ |
2881 | static noinline_for_stack int | 2881 | static noinline_for_stack int |
2882 | ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | 2882 | ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, |
2883 | handle_t *handle) | 2883 | handle_t *handle, unsigned long reserv_blks) |
2884 | { | 2884 | { |
2885 | struct buffer_head *bitmap_bh = NULL; | 2885 | struct buffer_head *bitmap_bh = NULL; |
2886 | struct ext4_super_block *es; | 2886 | struct ext4_super_block *es; |
@@ -2969,21 +2969,16 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
2969 | le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len); | 2969 | le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len); |
2970 | gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); | 2970 | gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); |
2971 | spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); | 2971 | spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); |
2972 | 2972 | percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); | |
2973 | /* | 2973 | /* |
2974 | * free blocks account has already be reduced/reserved | 2974 | * Now reduce the dirty block count also. Should not go negative |
2975 | * at write_begin() time for delayed allocation | ||
2976 | * do not double accounting | ||
2977 | */ | 2975 | */ |
2978 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED) && | 2976 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) |
2979 | ac->ac_o_ex.fe_len != ac->ac_b_ex.fe_len) { | 2977 | /* release all the reserved blocks if non delalloc */ |
2980 | /* | 2978 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); |
2981 | * we allocated less blocks than we calimed | 2979 | else |
2982 | * Add the difference back | 2980 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, |
2983 | */ | 2981 | ac->ac_b_ex.fe_len); |
2984 | percpu_counter_add(&sbi->s_freeblocks_counter, | ||
2985 | ac->ac_o_ex.fe_len - ac->ac_b_ex.fe_len); | ||
2986 | } | ||
2987 | 2982 | ||
2988 | if (sbi->s_log_groups_per_flex) { | 2983 | if (sbi->s_log_groups_per_flex) { |
2989 | ext4_group_t flex_group = ext4_flex_group(sbi, | 2984 | ext4_group_t flex_group = ext4_flex_group(sbi, |
@@ -4376,12 +4371,13 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) | |||
4376 | ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | 4371 | ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, |
4377 | struct ext4_allocation_request *ar, int *errp) | 4372 | struct ext4_allocation_request *ar, int *errp) |
4378 | { | 4373 | { |
4374 | int freed; | ||
4379 | struct ext4_allocation_context *ac = NULL; | 4375 | struct ext4_allocation_context *ac = NULL; |
4380 | struct ext4_sb_info *sbi; | 4376 | struct ext4_sb_info *sbi; |
4381 | struct super_block *sb; | 4377 | struct super_block *sb; |
4382 | ext4_fsblk_t block = 0; | 4378 | ext4_fsblk_t block = 0; |
4383 | int freed; | 4379 | unsigned long inquota; |
4384 | int inquota; | 4380 | unsigned long reserv_blks = 0; |
4385 | 4381 | ||
4386 | sb = ar->inode->i_sb; | 4382 | sb = ar->inode->i_sb; |
4387 | sbi = EXT4_SB(sb); | 4383 | sbi = EXT4_SB(sb); |
@@ -4404,6 +4400,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |||
4404 | *errp = -ENOSPC; | 4400 | *errp = -ENOSPC; |
4405 | return 0; | 4401 | return 0; |
4406 | } | 4402 | } |
4403 | reserv_blks = ar->len; | ||
4407 | } | 4404 | } |
4408 | while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { | 4405 | while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) { |
4409 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; | 4406 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; |
@@ -4450,7 +4447,7 @@ repeat: | |||
4450 | } | 4447 | } |
4451 | 4448 | ||
4452 | if (likely(ac->ac_status == AC_STATUS_FOUND)) { | 4449 | if (likely(ac->ac_status == AC_STATUS_FOUND)) { |
4453 | *errp = ext4_mb_mark_diskspace_used(ac, handle); | 4450 | *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks); |
4454 | if (*errp == -EAGAIN) { | 4451 | if (*errp == -EAGAIN) { |
4455 | ac->ac_b_ex.fe_group = 0; | 4452 | ac->ac_b_ex.fe_group = 0; |
4456 | ac->ac_b_ex.fe_start = 0; | 4453 | ac->ac_b_ex.fe_start = 0; |