aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2009-08-09 22:01:13 -0400
committerTheodore Ts'o <tytso@mit.edu>2009-08-09 22:01:13 -0400
commit4ba74d00a20256e22f159cb288ff34b587608917 (patch)
tree6859cf49f8043ec11846e2c8ebc836499e1048c4 /fs
parent0ef90db93a4ddfc300af288c2a1bfc1e6c79da64 (diff)
ext4: Fix bugs in mballoc's stream allocation mode
The logic around sbi->s_mb_last_group and sbi->s_mb_last_start was all screwed up. These fields were getting unconditionally all the time, set even when stream allocation had not taken place, and if they were being used when the file was smaller than s_mb_stream_request, which is when the allocation should _not_ be doing stream allocation. Fix this by determining whether or not we stream allocation should take place once, in ext4_mb_group_or_file(), and setting a flag which gets used in ext4_mb_regular_allocator() and ext4_mb_use_best_found(). This simplifies the code and assures that we are consistently using (or not using) the stream allocation logic. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/mballoc.c23
2 files changed, 12 insertions, 13 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e267727cc62d..70aa951ecb3c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -88,6 +88,8 @@ typedef unsigned int ext4_group_t;
88#define EXT4_MB_HINT_TRY_GOAL 0x0200 88#define EXT4_MB_HINT_TRY_GOAL 0x0200
89/* blocks already pre-reserved by delayed allocation */ 89/* blocks already pre-reserved by delayed allocation */
90#define EXT4_MB_DELALLOC_RESERVED 0x0400 90#define EXT4_MB_DELALLOC_RESERVED 0x0400
91/* We are doing stream allocation */
92#define EXT4_MB_STREAM_ALLOC 0x0800
91 93
92 94
93struct ext4_allocation_request { 95struct ext4_allocation_request {
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 3434c603432d..90a30ce822fc 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1361,7 +1361,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1361 ac->alloc_semp = e4b->alloc_semp; 1361 ac->alloc_semp = e4b->alloc_semp;
1362 e4b->alloc_semp = NULL; 1362 e4b->alloc_semp = NULL;
1363 /* store last allocated for subsequent stream allocation */ 1363 /* store last allocated for subsequent stream allocation */
1364 if ((ac->ac_flags & EXT4_MB_HINT_DATA)) { 1364 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1365 spin_lock(&sbi->s_md_lock); 1365 spin_lock(&sbi->s_md_lock);
1366 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 1366 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1367 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 1367 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
@@ -1939,7 +1939,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1939 struct ext4_sb_info *sbi; 1939 struct ext4_sb_info *sbi;
1940 struct super_block *sb; 1940 struct super_block *sb;
1941 struct ext4_buddy e4b; 1941 struct ext4_buddy e4b;
1942 loff_t size, isize;
1943 1942
1944 sb = ac->ac_sb; 1943 sb = ac->ac_sb;
1945 sbi = EXT4_SB(sb); 1944 sbi = EXT4_SB(sb);
@@ -1975,20 +1974,16 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1975 } 1974 }
1976 1975
1977 bsbits = ac->ac_sb->s_blocksize_bits; 1976 bsbits = ac->ac_sb->s_blocksize_bits;
1978 /* if stream allocation is enabled, use global goal */
1979 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
1980 isize = i_size_read(ac->ac_inode) >> bsbits;
1981 if (size < isize)
1982 size = isize;
1983 1977
1984 if (size < sbi->s_mb_stream_request && 1978 /* if stream allocation is enabled, use global goal */
1985 (ac->ac_flags & EXT4_MB_HINT_DATA)) { 1979 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1986 /* TBD: may be hot point */ 1980 /* TBD: may be hot point */
1987 spin_lock(&sbi->s_md_lock); 1981 spin_lock(&sbi->s_md_lock);
1988 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 1982 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1989 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 1983 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1990 spin_unlock(&sbi->s_md_lock); 1984 spin_unlock(&sbi->s_md_lock);
1991 } 1985 }
1986
1992 /* Let's just scan groups to find more-less suitable blocks */ 1987 /* Let's just scan groups to find more-less suitable blocks */
1993 cr = ac->ac_2order ? 0 : 1; 1988 cr = ac->ac_2order ? 0 : 1;
1994 /* 1989 /*
@@ -4192,16 +4187,18 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4192 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4187 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4193 return; 4188 return;
4194 4189
4190 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4191 return;
4192
4195 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; 4193 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
4196 isize = i_size_read(ac->ac_inode) >> bsbits; 4194 isize = i_size_read(ac->ac_inode) >> bsbits;
4197 size = max(size, isize); 4195 size = max(size, isize);
4198 4196
4199 /* don't use group allocation for large files */ 4197 /* don't use group allocation for large files */
4200 if (size >= sbi->s_mb_stream_request) 4198 if (size >= sbi->s_mb_stream_request) {
4201 return; 4199 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4202
4203 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4204 return; 4200 return;
4201 }
4205 4202
4206 BUG_ON(ac->ac_lg != NULL); 4203 BUG_ON(ac->ac_lg != NULL);
4207 /* 4204 /*