aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/segment.c
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2014-05-08 05:00:35 -0400
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2014-05-08 05:23:21 -0400
commitadf8d90b6a949dc80e827263fccb31f8eb08a55d (patch)
treea7bc2e33fdd4ad08981c35b04ec94c0ee57093a8 /fs/f2fs/segment.c
parentc20e89cde669799eff62bf8c00ca9a4819c4e11f (diff)
f2fs: avoid to use slab memory in f2fs_issue_flush for efficiency
If we use slab memory in f2fs_issue_flush(), we will face memory pressure and latency time caused by racing of kmem_cache_{alloc,free}. Let's alloc memory in stack instead of slab. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/segment.c')
-rw-r--r--fs/f2fs/segment.c28
1 files changed, 9 insertions, 19 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 2ecbffb91f37..f25f0e07e26f 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -25,7 +25,6 @@
25#define __reverse_ffz(x) __reverse_ffs(~(x)) 25#define __reverse_ffz(x) __reverse_ffs(~(x))
26 26
27static struct kmem_cache *discard_entry_slab; 27static struct kmem_cache *discard_entry_slab;
28static struct kmem_cache *flush_cmd_slab;
29 28
30/* 29/*
31 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since 30 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
@@ -238,30 +237,28 @@ repeat:
238int f2fs_issue_flush(struct f2fs_sb_info *sbi) 237int f2fs_issue_flush(struct f2fs_sb_info *sbi)
239{ 238{
240 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; 239 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
241 struct flush_cmd *cmd; 240 struct flush_cmd cmd;
242 int ret;
243 241
244 if (!test_opt(sbi, FLUSH_MERGE)) 242 if (!test_opt(sbi, FLUSH_MERGE))
245 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL); 243 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
246 244
247 cmd = f2fs_kmem_cache_alloc(flush_cmd_slab, GFP_ATOMIC | __GFP_ZERO); 245 init_completion(&cmd.wait);
248 init_completion(&cmd->wait); 246 cmd.next = NULL;
249 247
250 spin_lock(&fcc->issue_lock); 248 spin_lock(&fcc->issue_lock);
251 if (fcc->issue_list) 249 if (fcc->issue_list)
252 fcc->issue_tail->next = cmd; 250 fcc->issue_tail->next = &cmd;
253 else 251 else
254 fcc->issue_list = cmd; 252 fcc->issue_list = &cmd;
255 fcc->issue_tail = cmd; 253 fcc->issue_tail = &cmd;
256 spin_unlock(&fcc->issue_lock); 254 spin_unlock(&fcc->issue_lock);
257 255
258 if (!fcc->dispatch_list) 256 if (!fcc->dispatch_list)
259 wake_up(&fcc->flush_wait_queue); 257 wake_up(&fcc->flush_wait_queue);
260 258
261 wait_for_completion(&cmd->wait); 259 wait_for_completion(&cmd.wait);
262 ret = cmd->ret; 260
263 kmem_cache_free(flush_cmd_slab, cmd); 261 return cmd.ret;
264 return ret;
265} 262}
266 263
267int create_flush_cmd_control(struct f2fs_sb_info *sbi) 264int create_flush_cmd_control(struct f2fs_sb_info *sbi)
@@ -2036,17 +2033,10 @@ int __init create_segment_manager_caches(void)
2036 sizeof(struct discard_entry)); 2033 sizeof(struct discard_entry));
2037 if (!discard_entry_slab) 2034 if (!discard_entry_slab)
2038 return -ENOMEM; 2035 return -ENOMEM;
2039 flush_cmd_slab = f2fs_kmem_cache_create("flush_command",
2040 sizeof(struct flush_cmd));
2041 if (!flush_cmd_slab) {
2042 kmem_cache_destroy(discard_entry_slab);
2043 return -ENOMEM;
2044 }
2045 return 0; 2036 return 0;
2046} 2037}
2047 2038
2048void destroy_segment_manager_caches(void) 2039void destroy_segment_manager_caches(void)
2049{ 2040{
2050 kmem_cache_destroy(discard_entry_slab); 2041 kmem_cache_destroy(discard_entry_slab);
2051 kmem_cache_destroy(flush_cmd_slab);
2052} 2042}