aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/segment.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/segment.c')
-rw-r--r--fs/f2fs/segment.c46
1 files changed, 6 insertions, 40 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1b26e4ea1016..de6240922b0a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -12,54 +12,23 @@
12#include <linux/f2fs_fs.h> 12#include <linux/f2fs_fs.h>
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/prefetch.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16 17
17#include "f2fs.h" 18#include "f2fs.h"
18#include "segment.h" 19#include "segment.h"
19#include "node.h" 20#include "node.h"
20 21
21static int need_to_flush(struct f2fs_sb_info *sbi)
22{
23 unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) *
24 sbi->segs_per_sec;
25 int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
26 >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
27 int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
28 >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
29
30 if (sbi->por_doing)
31 return 0;
32
33 if (free_sections(sbi) <= (node_secs + 2 * dent_secs +
34 reserved_sections(sbi)))
35 return 1;
36 return 0;
37}
38
39/* 22/*
40 * This function balances dirty node and dentry pages. 23 * This function balances dirty node and dentry pages.
41 * In addition, it controls garbage collection. 24 * In addition, it controls garbage collection.
42 */ 25 */
43void f2fs_balance_fs(struct f2fs_sb_info *sbi) 26void f2fs_balance_fs(struct f2fs_sb_info *sbi)
44{ 27{
45 struct writeback_control wbc = {
46 .sync_mode = WB_SYNC_ALL,
47 .nr_to_write = LONG_MAX,
48 .for_reclaim = 0,
49 };
50
51 if (sbi->por_doing)
52 return;
53
54 /* 28 /*
55 * We should do checkpoint when there are so many dirty node pages 29 * We should do GC or end up with checkpoint, if there are so many dirty
56 * with enough free segments. After then, we should do GC. 30 * dir/node pages without enough free segments.
57 */ 31 */
58 if (need_to_flush(sbi)) {
59 sync_dirty_dir_inodes(sbi);
60 sync_node_pages(sbi, 0, &wbc);
61 }
62
63 if (has_not_enough_free_secs(sbi)) { 32 if (has_not_enough_free_secs(sbi)) {
64 mutex_lock(&sbi->gc_mutex); 33 mutex_lock(&sbi->gc_mutex);
65 f2fs_gc(sbi, 1); 34 f2fs_gc(sbi, 1);
@@ -631,7 +600,6 @@ static void f2fs_end_io_write(struct bio *bio, int err)
631 if (page->mapping) 600 if (page->mapping)
632 set_bit(AS_EIO, &page->mapping->flags); 601 set_bit(AS_EIO, &page->mapping->flags);
633 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG); 602 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
634 set_page_dirty(page);
635 } 603 }
636 end_page_writeback(page); 604 end_page_writeback(page);
637 dec_page_count(p->sbi, F2FS_WRITEBACK); 605 dec_page_count(p->sbi, F2FS_WRITEBACK);
@@ -791,11 +759,10 @@ static int __get_segment_type(struct page *page, enum page_type p_type)
791 return __get_segment_type_2(page, p_type); 759 return __get_segment_type_2(page, p_type);
792 case 4: 760 case 4:
793 return __get_segment_type_4(page, p_type); 761 return __get_segment_type_4(page, p_type);
794 case 6:
795 return __get_segment_type_6(page, p_type);
796 default:
797 BUG();
798 } 762 }
763 /* NR_CURSEG_TYPE(6) logs by default */
764 BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
765 return __get_segment_type_6(page, p_type);
799} 766}
800 767
801static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, 768static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
@@ -1608,7 +1575,6 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1608 1575
1609 for (i = 0; i < NR_DIRTY_TYPE; i++) { 1576 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1610 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); 1577 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
1611 dirty_i->nr_dirty[i] = 0;
1612 if (!dirty_i->dirty_segmap[i]) 1578 if (!dirty_i->dirty_segmap[i])
1613 return -ENOMEM; 1579 return -ENOMEM;
1614 } 1580 }