diff options
author | Jose R. Santos <jrs@us.ibm.com> | 2008-07-11 19:27:31 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2008-07-11 19:27:31 -0400 |
commit | 772cb7c83ba256a11c7bf99a11bef3858d23767c (patch) | |
tree | a42b97e5cbd870a76b2646c2dcb658a92c53f637 /fs/ext4/balloc.c | |
parent | 736603ab297506f4396cb5af592004499950fcfd (diff) |
ext4: New inode allocation for FLEX_BG meta-data groups.
This patch mostly controls the way inode are allocated in order to
make ialloc aware of flex_bg block group grouping. It achieves this
by bypassing the Orlov allocator when block group meta-data are packed
toghether through mke2fs. Since the impact on the block allocator is
minimal, this patch should have little or no effect on other block
allocation algorithms. By controlling the inode allocation, it can
basically control where the initial search for new block begins and
thus indirectly manipulate the block allocator.
This allocator favors data and meta-data locality so the disk will
gradually be filled from block group zero upward. This helps improve
performance by reducing seek time. Since the group of inode tables
within one flex_bg are treated as one giant inode table, uninitialized
block groups would not need to partially initialize as many inode
table as with Orlov which would help fsck time as the filesystem usage
goes up.
Signed-off-by: Jose R. Santos <jrs@us.ibm.com>
Signed-off-by: Valerie Clement <valerie.clement@bull.net>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/balloc.c')
-rw-r--r-- | fs/ext4/balloc.c | 14 |
1 files changed, 14 insertions, 0 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index ba411233cc25..0b2b7549ac63 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -809,6 +809,13 @@ do_more: | |||
809 | spin_unlock(sb_bgl_lock(sbi, block_group)); | 809 | spin_unlock(sb_bgl_lock(sbi, block_group)); |
810 | percpu_counter_add(&sbi->s_freeblocks_counter, count); | 810 | percpu_counter_add(&sbi->s_freeblocks_counter, count); |
811 | 811 | ||
812 | if (sbi->s_log_groups_per_flex) { | ||
813 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | ||
814 | spin_lock(sb_bgl_lock(sbi, flex_group)); | ||
815 | sbi->s_flex_groups[flex_group].free_blocks += count; | ||
816 | spin_unlock(sb_bgl_lock(sbi, flex_group)); | ||
817 | } | ||
818 | |||
812 | /* We dirtied the bitmap block */ | 819 | /* We dirtied the bitmap block */ |
813 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | 820 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); |
814 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); | 821 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); |
@@ -1883,6 +1890,13 @@ allocated: | |||
1883 | spin_unlock(sb_bgl_lock(sbi, group_no)); | 1890 | spin_unlock(sb_bgl_lock(sbi, group_no)); |
1884 | percpu_counter_sub(&sbi->s_freeblocks_counter, num); | 1891 | percpu_counter_sub(&sbi->s_freeblocks_counter, num); |
1885 | 1892 | ||
1893 | if (sbi->s_log_groups_per_flex) { | ||
1894 | ext4_group_t flex_group = ext4_flex_group(sbi, group_no); | ||
1895 | spin_lock(sb_bgl_lock(sbi, flex_group)); | ||
1896 | sbi->s_flex_groups[flex_group].free_blocks -= num; | ||
1897 | spin_unlock(sb_bgl_lock(sbi, flex_group)); | ||
1898 | } | ||
1899 | |||
1886 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); | 1900 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); |
1887 | err = ext4_journal_dirty_metadata(handle, gdp_bh); | 1901 | err = ext4_journal_dirty_metadata(handle, gdp_bh); |
1888 | if (!fatal) | 1902 | if (!fatal) |