diff options
Diffstat (limited to 'fs/f2fs/segment.c')
| -rw-r--r-- | fs/f2fs/segment.c | 194 |
1 files changed, 112 insertions, 82 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 42607a679923..daee4ab913da 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include "f2fs.h" | 20 | #include "f2fs.h" |
| 21 | #include "segment.h" | 21 | #include "segment.h" |
| 22 | #include "node.h" | 22 | #include "node.h" |
| 23 | #include "trace.h" | ||
| 23 | #include <trace/events/f2fs.h> | 24 | #include <trace/events/f2fs.h> |
| 24 | 25 | ||
| 25 | #define __reverse_ffz(x) __reverse_ffs(~(x)) | 26 | #define __reverse_ffz(x) __reverse_ffs(~(x)) |
| @@ -181,6 +182,7 @@ void register_inmem_page(struct inode *inode, struct page *page) | |||
| 181 | int err; | 182 | int err; |
| 182 | 183 | ||
| 183 | SetPagePrivate(page); | 184 | SetPagePrivate(page); |
| 185 | f2fs_trace_pid(page); | ||
| 184 | 186 | ||
| 185 | new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); | 187 | new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); |
| 186 | 188 | ||
| @@ -205,23 +207,6 @@ retry: | |||
| 205 | mutex_unlock(&fi->inmem_lock); | 207 | mutex_unlock(&fi->inmem_lock); |
| 206 | } | 208 | } |
| 207 | 209 | ||
| 208 | void invalidate_inmem_page(struct inode *inode, struct page *page) | ||
| 209 | { | ||
| 210 | struct f2fs_inode_info *fi = F2FS_I(inode); | ||
| 211 | struct inmem_pages *cur; | ||
| 212 | |||
| 213 | mutex_lock(&fi->inmem_lock); | ||
| 214 | cur = radix_tree_lookup(&fi->inmem_root, page->index); | ||
| 215 | if (cur) { | ||
| 216 | radix_tree_delete(&fi->inmem_root, cur->page->index); | ||
| 217 | f2fs_put_page(cur->page, 0); | ||
| 218 | list_del(&cur->list); | ||
| 219 | kmem_cache_free(inmem_entry_slab, cur); | ||
| 220 | dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); | ||
| 221 | } | ||
| 222 | mutex_unlock(&fi->inmem_lock); | ||
| 223 | } | ||
| 224 | |||
| 225 | void commit_inmem_pages(struct inode *inode, bool abort) | 210 | void commit_inmem_pages(struct inode *inode, bool abort) |
| 226 | { | 211 | { |
| 227 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 212 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| @@ -230,7 +215,7 @@ void commit_inmem_pages(struct inode *inode, bool abort) | |||
| 230 | bool submit_bio = false; | 215 | bool submit_bio = false; |
| 231 | struct f2fs_io_info fio = { | 216 | struct f2fs_io_info fio = { |
| 232 | .type = DATA, | 217 | .type = DATA, |
| 233 | .rw = WRITE_SYNC, | 218 | .rw = WRITE_SYNC | REQ_PRIO, |
| 234 | }; | 219 | }; |
| 235 | 220 | ||
| 236 | /* | 221 | /* |
| @@ -240,33 +225,38 @@ void commit_inmem_pages(struct inode *inode, bool abort) | |||
| 240 | * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this | 225 | * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this |
| 241 | * inode becomes free by iget_locked in f2fs_iget. | 226 | * inode becomes free by iget_locked in f2fs_iget. |
| 242 | */ | 227 | */ |
| 243 | if (!abort) | 228 | if (!abort) { |
| 244 | f2fs_balance_fs(sbi); | 229 | f2fs_balance_fs(sbi); |
| 245 | 230 | f2fs_lock_op(sbi); | |
| 246 | f2fs_lock_op(sbi); | 231 | } |
| 247 | 232 | ||
| 248 | mutex_lock(&fi->inmem_lock); | 233 | mutex_lock(&fi->inmem_lock); |
| 249 | list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { | 234 | list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { |
| 250 | lock_page(cur->page); | 235 | if (!abort) { |
| 251 | if (!abort && cur->page->mapping == inode->i_mapping) { | 236 | lock_page(cur->page); |
| 252 | f2fs_wait_on_page_writeback(cur->page, DATA); | 237 | if (cur->page->mapping == inode->i_mapping) { |
| 253 | if (clear_page_dirty_for_io(cur->page)) | 238 | f2fs_wait_on_page_writeback(cur->page, DATA); |
| 254 | inode_dec_dirty_pages(inode); | 239 | if (clear_page_dirty_for_io(cur->page)) |
| 255 | do_write_data_page(cur->page, &fio); | 240 | inode_dec_dirty_pages(inode); |
| 256 | submit_bio = true; | 241 | do_write_data_page(cur->page, &fio); |
| 242 | submit_bio = true; | ||
| 243 | } | ||
| 244 | f2fs_put_page(cur->page, 1); | ||
| 245 | } else { | ||
| 246 | put_page(cur->page); | ||
| 257 | } | 247 | } |
| 258 | radix_tree_delete(&fi->inmem_root, cur->page->index); | 248 | radix_tree_delete(&fi->inmem_root, cur->page->index); |
| 259 | f2fs_put_page(cur->page, 1); | ||
| 260 | list_del(&cur->list); | 249 | list_del(&cur->list); |
| 261 | kmem_cache_free(inmem_entry_slab, cur); | 250 | kmem_cache_free(inmem_entry_slab, cur); |
| 262 | dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); | 251 | dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); |
| 263 | } | 252 | } |
| 264 | if (submit_bio) | ||
| 265 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | ||
| 266 | mutex_unlock(&fi->inmem_lock); | 253 | mutex_unlock(&fi->inmem_lock); |
| 267 | 254 | ||
| 268 | filemap_fdatawait_range(inode->i_mapping, 0, LLONG_MAX); | 255 | if (!abort) { |
| 269 | f2fs_unlock_op(sbi); | 256 | f2fs_unlock_op(sbi); |
| 257 | if (submit_bio) | ||
| 258 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | ||
| 259 | } | ||
| 270 | } | 260 | } |
| 271 | 261 | ||
| 272 | /* | 262 | /* |
| @@ -290,7 +280,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) | |||
| 290 | /* check the # of cached NAT entries and prefree segments */ | 280 | /* check the # of cached NAT entries and prefree segments */ |
| 291 | if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || | 281 | if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || |
| 292 | excess_prefree_segs(sbi) || | 282 | excess_prefree_segs(sbi) || |
| 293 | available_free_memory(sbi, INO_ENTRIES)) | 283 | !available_free_memory(sbi, INO_ENTRIES)) |
| 294 | f2fs_sync_fs(sbi->sb, true); | 284 | f2fs_sync_fs(sbi->sb, true); |
| 295 | } | 285 | } |
| 296 | 286 | ||
| @@ -515,12 +505,13 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) | |||
| 515 | struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); | 505 | struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); |
| 516 | unsigned long *cur_map = (unsigned long *)se->cur_valid_map; | 506 | unsigned long *cur_map = (unsigned long *)se->cur_valid_map; |
| 517 | unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; | 507 | unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; |
| 518 | unsigned long dmap[entries]; | 508 | unsigned long *dmap = SIT_I(sbi)->tmp_map; |
| 519 | unsigned int start = 0, end = -1; | 509 | unsigned int start = 0, end = -1; |
| 520 | bool force = (cpc->reason == CP_DISCARD); | 510 | bool force = (cpc->reason == CP_DISCARD); |
| 521 | int i; | 511 | int i; |
| 522 | 512 | ||
| 523 | if (!force && !test_opt(sbi, DISCARD)) | 513 | if (!force && (!test_opt(sbi, DISCARD) || |
| 514 | SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)) | ||
| 524 | return; | 515 | return; |
| 525 | 516 | ||
| 526 | if (force && !se->valid_blocks) { | 517 | if (force && !se->valid_blocks) { |
| @@ -548,7 +539,8 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) | |||
| 548 | 539 | ||
| 549 | /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ | 540 | /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ |
| 550 | for (i = 0; i < entries; i++) | 541 | for (i = 0; i < entries; i++) |
| 551 | dmap[i] = ~(cur_map[i] | ckpt_map[i]); | 542 | dmap[i] = force ? ~ckpt_map[i] : |
| 543 | (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; | ||
| 552 | 544 | ||
| 553 | while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { | 545 | while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { |
| 554 | start = __find_rev_next_bit(dmap, max_blocks, end + 1); | 546 | start = __find_rev_next_bit(dmap, max_blocks, end + 1); |
| @@ -735,7 +727,7 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, | |||
| 735 | /* | 727 | /* |
| 736 | * Calculate the number of current summary pages for writing | 728 | * Calculate the number of current summary pages for writing |
| 737 | */ | 729 | */ |
| 738 | int npages_for_summary_flush(struct f2fs_sb_info *sbi) | 730 | int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) |
| 739 | { | 731 | { |
| 740 | int valid_sum_count = 0; | 732 | int valid_sum_count = 0; |
| 741 | int i, sum_in_page; | 733 | int i, sum_in_page; |
| @@ -743,8 +735,13 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi) | |||
| 743 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { | 735 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { |
| 744 | if (sbi->ckpt->alloc_type[i] == SSR) | 736 | if (sbi->ckpt->alloc_type[i] == SSR) |
| 745 | valid_sum_count += sbi->blocks_per_seg; | 737 | valid_sum_count += sbi->blocks_per_seg; |
| 746 | else | 738 | else { |
| 747 | valid_sum_count += curseg_blkoff(sbi, i); | 739 | if (for_ra) |
| 740 | valid_sum_count += le16_to_cpu( | ||
| 741 | F2FS_CKPT(sbi)->cur_data_blkoff[i]); | ||
| 742 | else | ||
| 743 | valid_sum_count += curseg_blkoff(sbi, i); | ||
| 744 | } | ||
| 748 | } | 745 | } |
| 749 | 746 | ||
| 750 | sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - | 747 | sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - |
| @@ -803,7 +800,7 @@ static void get_new_segment(struct f2fs_sb_info *sbi, | |||
| 803 | int go_left = 0; | 800 | int go_left = 0; |
| 804 | int i; | 801 | int i; |
| 805 | 802 | ||
| 806 | write_lock(&free_i->segmap_lock); | 803 | spin_lock(&free_i->segmap_lock); |
| 807 | 804 | ||
| 808 | if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { | 805 | if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { |
| 809 | segno = find_next_zero_bit(free_i->free_segmap, | 806 | segno = find_next_zero_bit(free_i->free_segmap, |
| @@ -876,7 +873,7 @@ got_it: | |||
| 876 | f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); | 873 | f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); |
| 877 | __set_inuse(sbi, segno); | 874 | __set_inuse(sbi, segno); |
| 878 | *newseg = segno; | 875 | *newseg = segno; |
| 879 | write_unlock(&free_i->segmap_lock); | 876 | spin_unlock(&free_i->segmap_lock); |
| 880 | } | 877 | } |
| 881 | 878 | ||
| 882 | static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) | 879 | static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) |
| @@ -927,7 +924,7 @@ static void __next_free_blkoff(struct f2fs_sb_info *sbi, | |||
| 927 | { | 924 | { |
| 928 | struct seg_entry *se = get_seg_entry(sbi, seg->segno); | 925 | struct seg_entry *se = get_seg_entry(sbi, seg->segno); |
| 929 | int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); | 926 | int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); |
| 930 | unsigned long target_map[entries]; | 927 | unsigned long *target_map = SIT_I(sbi)->tmp_map; |
| 931 | unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; | 928 | unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; |
| 932 | unsigned long *cur_map = (unsigned long *)se->cur_valid_map; | 929 | unsigned long *cur_map = (unsigned long *)se->cur_valid_map; |
| 933 | int i, pos; | 930 | int i, pos; |
| @@ -1027,18 +1024,22 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, | |||
| 1027 | stat_inc_seg_type(sbi, curseg); | 1024 | stat_inc_seg_type(sbi, curseg); |
| 1028 | } | 1025 | } |
| 1029 | 1026 | ||
| 1027 | static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type) | ||
| 1028 | { | ||
| 1029 | struct curseg_info *curseg = CURSEG_I(sbi, type); | ||
| 1030 | unsigned int old_segno; | ||
| 1031 | |||
| 1032 | old_segno = curseg->segno; | ||
| 1033 | SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); | ||
| 1034 | locate_dirty_segment(sbi, old_segno); | ||
| 1035 | } | ||
| 1036 | |||
| 1030 | void allocate_new_segments(struct f2fs_sb_info *sbi) | 1037 | void allocate_new_segments(struct f2fs_sb_info *sbi) |
| 1031 | { | 1038 | { |
| 1032 | struct curseg_info *curseg; | ||
| 1033 | unsigned int old_curseg; | ||
| 1034 | int i; | 1039 | int i; |
| 1035 | 1040 | ||
| 1036 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { | 1041 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) |
| 1037 | curseg = CURSEG_I(sbi, i); | 1042 | __allocate_new_segments(sbi, i); |
| 1038 | old_curseg = curseg->segno; | ||
| 1039 | SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); | ||
| 1040 | locate_dirty_segment(sbi, old_curseg); | ||
| 1041 | } | ||
| 1042 | } | 1043 | } |
| 1043 | 1044 | ||
| 1044 | static const struct segment_allocation default_salloc_ops = { | 1045 | static const struct segment_allocation default_salloc_ops = { |
| @@ -1047,8 +1048,8 @@ static const struct segment_allocation default_salloc_ops = { | |||
| 1047 | 1048 | ||
| 1048 | int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) | 1049 | int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) |
| 1049 | { | 1050 | { |
| 1050 | __u64 start = range->start >> sbi->log_blocksize; | 1051 | __u64 start = F2FS_BYTES_TO_BLK(range->start); |
| 1051 | __u64 end = start + (range->len >> sbi->log_blocksize) - 1; | 1052 | __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; |
| 1052 | unsigned int start_segno, end_segno; | 1053 | unsigned int start_segno, end_segno; |
| 1053 | struct cp_control cpc; | 1054 | struct cp_control cpc; |
| 1054 | 1055 | ||
| @@ -1065,16 +1066,21 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) | |||
| 1065 | end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : | 1066 | end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : |
| 1066 | GET_SEGNO(sbi, end); | 1067 | GET_SEGNO(sbi, end); |
| 1067 | cpc.reason = CP_DISCARD; | 1068 | cpc.reason = CP_DISCARD; |
| 1068 | cpc.trim_start = start_segno; | 1069 | cpc.trim_minlen = F2FS_BYTES_TO_BLK(range->minlen); |
| 1069 | cpc.trim_end = end_segno; | ||
| 1070 | cpc.trim_minlen = range->minlen >> sbi->log_blocksize; | ||
| 1071 | 1070 | ||
| 1072 | /* do checkpoint to issue discard commands safely */ | 1071 | /* do checkpoint to issue discard commands safely */ |
| 1073 | mutex_lock(&sbi->gc_mutex); | 1072 | for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { |
| 1074 | write_checkpoint(sbi, &cpc); | 1073 | cpc.trim_start = start_segno; |
| 1075 | mutex_unlock(&sbi->gc_mutex); | 1074 | cpc.trim_end = min_t(unsigned int, rounddown(start_segno + |
| 1075 | BATCHED_TRIM_SEGMENTS(sbi), | ||
| 1076 | sbi->segs_per_sec) - 1, end_segno); | ||
| 1077 | |||
| 1078 | mutex_lock(&sbi->gc_mutex); | ||
| 1079 | write_checkpoint(sbi, &cpc); | ||
| 1080 | mutex_unlock(&sbi->gc_mutex); | ||
| 1081 | } | ||
| 1076 | out: | 1082 | out: |
| 1077 | range->len = cpc.trimmed << sbi->log_blocksize; | 1083 | range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); |
| 1078 | return 0; | 1084 | return 0; |
| 1079 | } | 1085 | } |
| 1080 | 1086 | ||
| @@ -1151,11 +1157,18 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, | |||
| 1151 | { | 1157 | { |
| 1152 | struct sit_info *sit_i = SIT_I(sbi); | 1158 | struct sit_info *sit_i = SIT_I(sbi); |
| 1153 | struct curseg_info *curseg; | 1159 | struct curseg_info *curseg; |
| 1160 | bool direct_io = (type == CURSEG_DIRECT_IO); | ||
| 1161 | |||
| 1162 | type = direct_io ? CURSEG_WARM_DATA : type; | ||
| 1154 | 1163 | ||
| 1155 | curseg = CURSEG_I(sbi, type); | 1164 | curseg = CURSEG_I(sbi, type); |
| 1156 | 1165 | ||
| 1157 | mutex_lock(&curseg->curseg_mutex); | 1166 | mutex_lock(&curseg->curseg_mutex); |
| 1158 | 1167 | ||
| 1168 | /* direct_io'ed data is aligned to the segment for better performance */ | ||
| 1169 | if (direct_io && curseg->next_blkoff) | ||
| 1170 | __allocate_new_segments(sbi, type); | ||
| 1171 | |||
| 1159 | *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); | 1172 | *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); |
| 1160 | 1173 | ||
| 1161 | /* | 1174 | /* |
| @@ -1187,39 +1200,39 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, | |||
| 1187 | } | 1200 | } |
| 1188 | 1201 | ||
| 1189 | static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, | 1202 | static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, |
| 1190 | block_t old_blkaddr, block_t *new_blkaddr, | 1203 | struct f2fs_summary *sum, |
| 1191 | struct f2fs_summary *sum, struct f2fs_io_info *fio) | 1204 | struct f2fs_io_info *fio) |
| 1192 | { | 1205 | { |
| 1193 | int type = __get_segment_type(page, fio->type); | 1206 | int type = __get_segment_type(page, fio->type); |
| 1194 | 1207 | ||
| 1195 | allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type); | 1208 | allocate_data_block(sbi, page, fio->blk_addr, &fio->blk_addr, sum, type); |
| 1196 | 1209 | ||
| 1197 | /* writeout dirty page into bdev */ | 1210 | /* writeout dirty page into bdev */ |
| 1198 | f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio); | 1211 | f2fs_submit_page_mbio(sbi, page, fio); |
| 1199 | } | 1212 | } |
| 1200 | 1213 | ||
| 1201 | void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) | 1214 | void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) |
| 1202 | { | 1215 | { |
| 1203 | struct f2fs_io_info fio = { | 1216 | struct f2fs_io_info fio = { |
| 1204 | .type = META, | 1217 | .type = META, |
| 1205 | .rw = WRITE_SYNC | REQ_META | REQ_PRIO | 1218 | .rw = WRITE_SYNC | REQ_META | REQ_PRIO, |
| 1219 | .blk_addr = page->index, | ||
| 1206 | }; | 1220 | }; |
| 1207 | 1221 | ||
| 1208 | set_page_writeback(page); | 1222 | set_page_writeback(page); |
| 1209 | f2fs_submit_page_mbio(sbi, page, page->index, &fio); | 1223 | f2fs_submit_page_mbio(sbi, page, &fio); |
| 1210 | } | 1224 | } |
| 1211 | 1225 | ||
| 1212 | void write_node_page(struct f2fs_sb_info *sbi, struct page *page, | 1226 | void write_node_page(struct f2fs_sb_info *sbi, struct page *page, |
| 1213 | struct f2fs_io_info *fio, | 1227 | unsigned int nid, struct f2fs_io_info *fio) |
| 1214 | unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) | ||
| 1215 | { | 1228 | { |
| 1216 | struct f2fs_summary sum; | 1229 | struct f2fs_summary sum; |
| 1217 | set_summary(&sum, nid, 0, 0); | 1230 | set_summary(&sum, nid, 0, 0); |
| 1218 | do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio); | 1231 | do_write_page(sbi, page, &sum, fio); |
| 1219 | } | 1232 | } |
| 1220 | 1233 | ||
| 1221 | void write_data_page(struct page *page, struct dnode_of_data *dn, | 1234 | void write_data_page(struct page *page, struct dnode_of_data *dn, |
| 1222 | block_t *new_blkaddr, struct f2fs_io_info *fio) | 1235 | struct f2fs_io_info *fio) |
| 1223 | { | 1236 | { |
| 1224 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); | 1237 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
| 1225 | struct f2fs_summary sum; | 1238 | struct f2fs_summary sum; |
| @@ -1228,14 +1241,14 @@ void write_data_page(struct page *page, struct dnode_of_data *dn, | |||
| 1228 | f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); | 1241 | f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); |
| 1229 | get_node_info(sbi, dn->nid, &ni); | 1242 | get_node_info(sbi, dn->nid, &ni); |
| 1230 | set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); | 1243 | set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); |
| 1231 | 1244 | do_write_page(sbi, page, &sum, fio); | |
| 1232 | do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio); | 1245 | dn->data_blkaddr = fio->blk_addr; |
| 1233 | } | 1246 | } |
| 1234 | 1247 | ||
| 1235 | void rewrite_data_page(struct page *page, block_t old_blkaddr, | 1248 | void rewrite_data_page(struct page *page, struct f2fs_io_info *fio) |
| 1236 | struct f2fs_io_info *fio) | ||
| 1237 | { | 1249 | { |
| 1238 | f2fs_submit_page_mbio(F2FS_P_SB(page), page, old_blkaddr, fio); | 1250 | stat_inc_inplace_blocks(F2FS_P_SB(page)); |
| 1251 | f2fs_submit_page_mbio(F2FS_P_SB(page), page, fio); | ||
| 1239 | } | 1252 | } |
| 1240 | 1253 | ||
| 1241 | void recover_data_page(struct f2fs_sb_info *sbi, | 1254 | void recover_data_page(struct f2fs_sb_info *sbi, |
| @@ -1393,7 +1406,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) | |||
| 1393 | segno = le32_to_cpu(ckpt->cur_data_segno[type]); | 1406 | segno = le32_to_cpu(ckpt->cur_data_segno[type]); |
| 1394 | blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - | 1407 | blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - |
| 1395 | CURSEG_HOT_DATA]); | 1408 | CURSEG_HOT_DATA]); |
| 1396 | if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) | 1409 | if (__exist_node_summaries(sbi)) |
| 1397 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); | 1410 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); |
| 1398 | else | 1411 | else |
| 1399 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); | 1412 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); |
| @@ -1402,7 +1415,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) | |||
| 1402 | CURSEG_HOT_NODE]); | 1415 | CURSEG_HOT_NODE]); |
| 1403 | blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - | 1416 | blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - |
| 1404 | CURSEG_HOT_NODE]); | 1417 | CURSEG_HOT_NODE]); |
| 1405 | if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) | 1418 | if (__exist_node_summaries(sbi)) |
| 1406 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, | 1419 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, |
| 1407 | type - CURSEG_HOT_NODE); | 1420 | type - CURSEG_HOT_NODE); |
| 1408 | else | 1421 | else |
| @@ -1413,7 +1426,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) | |||
| 1413 | sum = (struct f2fs_summary_block *)page_address(new); | 1426 | sum = (struct f2fs_summary_block *)page_address(new); |
| 1414 | 1427 | ||
| 1415 | if (IS_NODESEG(type)) { | 1428 | if (IS_NODESEG(type)) { |
| 1416 | if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { | 1429 | if (__exist_node_summaries(sbi)) { |
| 1417 | struct f2fs_summary *ns = &sum->entries[0]; | 1430 | struct f2fs_summary *ns = &sum->entries[0]; |
| 1418 | int i; | 1431 | int i; |
| 1419 | for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { | 1432 | for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { |
| @@ -1450,12 +1463,22 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) | |||
| 1450 | int err; | 1463 | int err; |
| 1451 | 1464 | ||
| 1452 | if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { | 1465 | if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { |
| 1466 | int npages = npages_for_summary_flush(sbi, true); | ||
| 1467 | |||
| 1468 | if (npages >= 2) | ||
| 1469 | ra_meta_pages(sbi, start_sum_block(sbi), npages, | ||
| 1470 | META_CP); | ||
| 1471 | |||
| 1453 | /* restore for compacted data summary */ | 1472 | /* restore for compacted data summary */ |
| 1454 | if (read_compacted_summaries(sbi)) | 1473 | if (read_compacted_summaries(sbi)) |
| 1455 | return -EINVAL; | 1474 | return -EINVAL; |
| 1456 | type = CURSEG_HOT_NODE; | 1475 | type = CURSEG_HOT_NODE; |
| 1457 | } | 1476 | } |
| 1458 | 1477 | ||
| 1478 | if (__exist_node_summaries(sbi)) | ||
| 1479 | ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), | ||
| 1480 | NR_CURSEG_TYPE - type, META_CP); | ||
| 1481 | |||
| 1459 | for (; type <= CURSEG_COLD_NODE; type++) { | 1482 | for (; type <= CURSEG_COLD_NODE; type++) { |
| 1460 | err = read_normal_summaries(sbi, type); | 1483 | err = read_normal_summaries(sbi, type); |
| 1461 | if (err) | 1484 | if (err) |
| @@ -1549,8 +1572,7 @@ void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) | |||
| 1549 | 1572 | ||
| 1550 | void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) | 1573 | void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) |
| 1551 | { | 1574 | { |
| 1552 | if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) | 1575 | write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); |
| 1553 | write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); | ||
| 1554 | } | 1576 | } |
| 1555 | 1577 | ||
| 1556 | int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, | 1578 | int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, |
| @@ -1754,7 +1776,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) | |||
| 1754 | se = get_seg_entry(sbi, segno); | 1776 | se = get_seg_entry(sbi, segno); |
| 1755 | 1777 | ||
| 1756 | /* add discard candidates */ | 1778 | /* add discard candidates */ |
| 1757 | if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards) { | 1779 | if (cpc->reason != CP_DISCARD) { |
| 1758 | cpc->trim_start = segno; | 1780 | cpc->trim_start = segno; |
| 1759 | add_discard_addrs(sbi, cpc); | 1781 | add_discard_addrs(sbi, cpc); |
| 1760 | } | 1782 | } |
| @@ -1833,6 +1855,10 @@ static int build_sit_info(struct f2fs_sb_info *sbi) | |||
| 1833 | return -ENOMEM; | 1855 | return -ENOMEM; |
| 1834 | } | 1856 | } |
| 1835 | 1857 | ||
| 1858 | sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); | ||
| 1859 | if (!sit_i->tmp_map) | ||
| 1860 | return -ENOMEM; | ||
| 1861 | |||
| 1836 | if (sbi->segs_per_sec > 1) { | 1862 | if (sbi->segs_per_sec > 1) { |
| 1837 | sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) * | 1863 | sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) * |
| 1838 | sizeof(struct sec_entry)); | 1864 | sizeof(struct sec_entry)); |
| @@ -1897,7 +1923,7 @@ static int build_free_segmap(struct f2fs_sb_info *sbi) | |||
| 1897 | free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); | 1923 | free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); |
| 1898 | free_i->free_segments = 0; | 1924 | free_i->free_segments = 0; |
| 1899 | free_i->free_sections = 0; | 1925 | free_i->free_sections = 0; |
| 1900 | rwlock_init(&free_i->segmap_lock); | 1926 | spin_lock_init(&free_i->segmap_lock); |
| 1901 | return 0; | 1927 | return 0; |
| 1902 | } | 1928 | } |
| 1903 | 1929 | ||
| @@ -2110,6 +2136,8 @@ int build_segment_manager(struct f2fs_sb_info *sbi) | |||
| 2110 | sm_info->nr_discards = 0; | 2136 | sm_info->nr_discards = 0; |
| 2111 | sm_info->max_discards = 0; | 2137 | sm_info->max_discards = 0; |
| 2112 | 2138 | ||
| 2139 | sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; | ||
| 2140 | |||
| 2113 | INIT_LIST_HEAD(&sm_info->sit_entry_set); | 2141 | INIT_LIST_HEAD(&sm_info->sit_entry_set); |
| 2114 | 2142 | ||
| 2115 | if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { | 2143 | if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { |
| @@ -2212,6 +2240,8 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi) | |||
| 2212 | kfree(sit_i->sentries[start].ckpt_valid_map); | 2240 | kfree(sit_i->sentries[start].ckpt_valid_map); |
| 2213 | } | 2241 | } |
| 2214 | } | 2242 | } |
| 2243 | kfree(sit_i->tmp_map); | ||
| 2244 | |||
| 2215 | vfree(sit_i->sentries); | 2245 | vfree(sit_i->sentries); |
| 2216 | vfree(sit_i->sec_entries); | 2246 | vfree(sit_i->sec_entries); |
| 2217 | kfree(sit_i->dirty_sentries_bitmap); | 2247 | kfree(sit_i->dirty_sentries_bitmap); |
