diff options
author | Jiri Kosina <jkosina@suse.cz> | 2013-01-29 04:48:30 -0500 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2013-01-29 04:48:30 -0500 |
commit | 617677295b53a40d0e54aac4cbbc216ffbc755dd (patch) | |
tree | 51b9e87213243ed5efff252c8e8d8fec4eebc588 /fs/f2fs/segment.c | |
parent | 5c8d1b68e01a144813e38795fe6dbe7ebb506131 (diff) | |
parent | 6abb7c25775b7fb2225ad0508236d63ca710e65f (diff) |
Merge branch 'master' into for-next
Conflicts:
drivers/devfreq/exynos4_bus.c
Sync with Linus' tree to be able to apply patches that are
against newer code (mvneta).
Diffstat (limited to 'fs/f2fs/segment.c')
-rw-r--r-- | fs/f2fs/segment.c | 1757 |
1 files changed, 1757 insertions, 0 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c new file mode 100644 index 000000000000..4b0099066582 --- /dev/null +++ b/fs/f2fs/segment.c | |||
@@ -0,0 +1,1757 @@ | |||
1 | /* | ||
2 | * fs/f2fs/segment.c | ||
3 | * | ||
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com/ | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/f2fs_fs.h> | ||
13 | #include <linux/bio.h> | ||
14 | #include <linux/blkdev.h> | ||
15 | #include <linux/prefetch.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
18 | #include "f2fs.h" | ||
19 | #include "segment.h" | ||
20 | #include "node.h" | ||
21 | |||
22 | /* | ||
23 | * This function balances dirty node and dentry pages. | ||
24 | * In addition, it controls garbage collection. | ||
25 | */ | ||
26 | void f2fs_balance_fs(struct f2fs_sb_info *sbi) | ||
27 | { | ||
28 | /* | ||
29 | * We should do GC or end up with checkpoint, if there are so many dirty | ||
30 | * dir/node pages without enough free segments. | ||
31 | */ | ||
32 | if (has_not_enough_free_secs(sbi)) { | ||
33 | mutex_lock(&sbi->gc_mutex); | ||
34 | f2fs_gc(sbi); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, | ||
39 | enum dirty_type dirty_type) | ||
40 | { | ||
41 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
42 | |||
43 | /* need not be added */ | ||
44 | if (IS_CURSEG(sbi, segno)) | ||
45 | return; | ||
46 | |||
47 | if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) | ||
48 | dirty_i->nr_dirty[dirty_type]++; | ||
49 | |||
50 | if (dirty_type == DIRTY) { | ||
51 | struct seg_entry *sentry = get_seg_entry(sbi, segno); | ||
52 | dirty_type = sentry->type; | ||
53 | if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) | ||
54 | dirty_i->nr_dirty[dirty_type]++; | ||
55 | } | ||
56 | } | ||
57 | |||
58 | static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, | ||
59 | enum dirty_type dirty_type) | ||
60 | { | ||
61 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
62 | |||
63 | if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) | ||
64 | dirty_i->nr_dirty[dirty_type]--; | ||
65 | |||
66 | if (dirty_type == DIRTY) { | ||
67 | struct seg_entry *sentry = get_seg_entry(sbi, segno); | ||
68 | dirty_type = sentry->type; | ||
69 | if (test_and_clear_bit(segno, | ||
70 | dirty_i->dirty_segmap[dirty_type])) | ||
71 | dirty_i->nr_dirty[dirty_type]--; | ||
72 | clear_bit(segno, dirty_i->victim_segmap[FG_GC]); | ||
73 | clear_bit(segno, dirty_i->victim_segmap[BG_GC]); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Should not occur error such as -ENOMEM. | ||
79 | * Adding dirty entry into seglist is not critical operation. | ||
80 | * If a given segment is one of current working segments, it won't be added. | ||
81 | */ | ||
82 | void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) | ||
83 | { | ||
84 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
85 | unsigned short valid_blocks; | ||
86 | |||
87 | if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) | ||
88 | return; | ||
89 | |||
90 | mutex_lock(&dirty_i->seglist_lock); | ||
91 | |||
92 | valid_blocks = get_valid_blocks(sbi, segno, 0); | ||
93 | |||
94 | if (valid_blocks == 0) { | ||
95 | __locate_dirty_segment(sbi, segno, PRE); | ||
96 | __remove_dirty_segment(sbi, segno, DIRTY); | ||
97 | } else if (valid_blocks < sbi->blocks_per_seg) { | ||
98 | __locate_dirty_segment(sbi, segno, DIRTY); | ||
99 | } else { | ||
100 | /* Recovery routine with SSR needs this */ | ||
101 | __remove_dirty_segment(sbi, segno, DIRTY); | ||
102 | } | ||
103 | |||
104 | mutex_unlock(&dirty_i->seglist_lock); | ||
105 | return; | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Should call clear_prefree_segments after checkpoint is done. | ||
110 | */ | ||
111 | static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) | ||
112 | { | ||
113 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
114 | unsigned int segno, offset = 0; | ||
115 | unsigned int total_segs = TOTAL_SEGS(sbi); | ||
116 | |||
117 | mutex_lock(&dirty_i->seglist_lock); | ||
118 | while (1) { | ||
119 | segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, | ||
120 | offset); | ||
121 | if (segno >= total_segs) | ||
122 | break; | ||
123 | __set_test_and_free(sbi, segno); | ||
124 | offset = segno + 1; | ||
125 | } | ||
126 | mutex_unlock(&dirty_i->seglist_lock); | ||
127 | } | ||
128 | |||
129 | void clear_prefree_segments(struct f2fs_sb_info *sbi) | ||
130 | { | ||
131 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
132 | unsigned int segno, offset = 0; | ||
133 | unsigned int total_segs = TOTAL_SEGS(sbi); | ||
134 | |||
135 | mutex_lock(&dirty_i->seglist_lock); | ||
136 | while (1) { | ||
137 | segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, | ||
138 | offset); | ||
139 | if (segno >= total_segs) | ||
140 | break; | ||
141 | |||
142 | offset = segno + 1; | ||
143 | if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE])) | ||
144 | dirty_i->nr_dirty[PRE]--; | ||
145 | |||
146 | /* Let's use trim */ | ||
147 | if (test_opt(sbi, DISCARD)) | ||
148 | blkdev_issue_discard(sbi->sb->s_bdev, | ||
149 | START_BLOCK(sbi, segno) << | ||
150 | sbi->log_sectors_per_block, | ||
151 | 1 << (sbi->log_sectors_per_block + | ||
152 | sbi->log_blocks_per_seg), | ||
153 | GFP_NOFS, 0); | ||
154 | } | ||
155 | mutex_unlock(&dirty_i->seglist_lock); | ||
156 | } | ||
157 | |||
158 | static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) | ||
159 | { | ||
160 | struct sit_info *sit_i = SIT_I(sbi); | ||
161 | if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) | ||
162 | sit_i->dirty_sentries++; | ||
163 | } | ||
164 | |||
165 | static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, | ||
166 | unsigned int segno, int modified) | ||
167 | { | ||
168 | struct seg_entry *se = get_seg_entry(sbi, segno); | ||
169 | se->type = type; | ||
170 | if (modified) | ||
171 | __mark_sit_entry_dirty(sbi, segno); | ||
172 | } | ||
173 | |||
174 | static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) | ||
175 | { | ||
176 | struct seg_entry *se; | ||
177 | unsigned int segno, offset; | ||
178 | long int new_vblocks; | ||
179 | |||
180 | segno = GET_SEGNO(sbi, blkaddr); | ||
181 | |||
182 | se = get_seg_entry(sbi, segno); | ||
183 | new_vblocks = se->valid_blocks + del; | ||
184 | offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); | ||
185 | |||
186 | BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) || | ||
187 | (new_vblocks > sbi->blocks_per_seg))); | ||
188 | |||
189 | se->valid_blocks = new_vblocks; | ||
190 | se->mtime = get_mtime(sbi); | ||
191 | SIT_I(sbi)->max_mtime = se->mtime; | ||
192 | |||
193 | /* Update valid block bitmap */ | ||
194 | if (del > 0) { | ||
195 | if (f2fs_set_bit(offset, se->cur_valid_map)) | ||
196 | BUG(); | ||
197 | } else { | ||
198 | if (!f2fs_clear_bit(offset, se->cur_valid_map)) | ||
199 | BUG(); | ||
200 | } | ||
201 | if (!f2fs_test_bit(offset, se->ckpt_valid_map)) | ||
202 | se->ckpt_valid_blocks += del; | ||
203 | |||
204 | __mark_sit_entry_dirty(sbi, segno); | ||
205 | |||
206 | /* update total number of valid blocks to be written in ckpt area */ | ||
207 | SIT_I(sbi)->written_valid_blocks += del; | ||
208 | |||
209 | if (sbi->segs_per_sec > 1) | ||
210 | get_sec_entry(sbi, segno)->valid_blocks += del; | ||
211 | } | ||
212 | |||
213 | static void refresh_sit_entry(struct f2fs_sb_info *sbi, | ||
214 | block_t old_blkaddr, block_t new_blkaddr) | ||
215 | { | ||
216 | update_sit_entry(sbi, new_blkaddr, 1); | ||
217 | if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) | ||
218 | update_sit_entry(sbi, old_blkaddr, -1); | ||
219 | } | ||
220 | |||
221 | void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) | ||
222 | { | ||
223 | unsigned int segno = GET_SEGNO(sbi, addr); | ||
224 | struct sit_info *sit_i = SIT_I(sbi); | ||
225 | |||
226 | BUG_ON(addr == NULL_ADDR); | ||
227 | if (addr == NEW_ADDR) | ||
228 | return; | ||
229 | |||
230 | /* add it into sit main buffer */ | ||
231 | mutex_lock(&sit_i->sentry_lock); | ||
232 | |||
233 | update_sit_entry(sbi, addr, -1); | ||
234 | |||
235 | /* add it into dirty seglist */ | ||
236 | locate_dirty_segment(sbi, segno); | ||
237 | |||
238 | mutex_unlock(&sit_i->sentry_lock); | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * This function should be resided under the curseg_mutex lock | ||
243 | */ | ||
244 | static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, | ||
245 | struct f2fs_summary *sum, unsigned short offset) | ||
246 | { | ||
247 | struct curseg_info *curseg = CURSEG_I(sbi, type); | ||
248 | void *addr = curseg->sum_blk; | ||
249 | addr += offset * sizeof(struct f2fs_summary); | ||
250 | memcpy(addr, sum, sizeof(struct f2fs_summary)); | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Calculate the number of current summary pages for writing | ||
256 | */ | ||
257 | int npages_for_summary_flush(struct f2fs_sb_info *sbi) | ||
258 | { | ||
259 | int total_size_bytes = 0; | ||
260 | int valid_sum_count = 0; | ||
261 | int i, sum_space; | ||
262 | |||
263 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { | ||
264 | if (sbi->ckpt->alloc_type[i] == SSR) | ||
265 | valid_sum_count += sbi->blocks_per_seg; | ||
266 | else | ||
267 | valid_sum_count += curseg_blkoff(sbi, i); | ||
268 | } | ||
269 | |||
270 | total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1) | ||
271 | + sizeof(struct nat_journal) + 2 | ||
272 | + sizeof(struct sit_journal) + 2; | ||
273 | sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE; | ||
274 | if (total_size_bytes < sum_space) | ||
275 | return 1; | ||
276 | else if (total_size_bytes < 2 * sum_space) | ||
277 | return 2; | ||
278 | return 3; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Caller should put this summary page | ||
283 | */ | ||
284 | struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) | ||
285 | { | ||
286 | return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); | ||
287 | } | ||
288 | |||
289 | static void write_sum_page(struct f2fs_sb_info *sbi, | ||
290 | struct f2fs_summary_block *sum_blk, block_t blk_addr) | ||
291 | { | ||
292 | struct page *page = grab_meta_page(sbi, blk_addr); | ||
293 | void *kaddr = page_address(page); | ||
294 | memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE); | ||
295 | set_page_dirty(page); | ||
296 | f2fs_put_page(page, 1); | ||
297 | } | ||
298 | |||
299 | static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, | ||
300 | int ofs_unit, int type) | ||
301 | { | ||
302 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
303 | unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE]; | ||
304 | unsigned int segno, next_segno, i; | ||
305 | int ofs = 0; | ||
306 | |||
307 | /* | ||
308 | * If there is not enough reserved sections, | ||
309 | * we should not reuse prefree segments. | ||
310 | */ | ||
311 | if (has_not_enough_free_secs(sbi)) | ||
312 | return NULL_SEGNO; | ||
313 | |||
314 | /* | ||
315 | * NODE page should not reuse prefree segment, | ||
316 | * since those information is used for SPOR. | ||
317 | */ | ||
318 | if (IS_NODESEG(type)) | ||
319 | return NULL_SEGNO; | ||
320 | next: | ||
321 | segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs++); | ||
322 | ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit; | ||
323 | if (segno < TOTAL_SEGS(sbi)) { | ||
324 | /* skip intermediate segments in a section */ | ||
325 | if (segno % ofs_unit) | ||
326 | goto next; | ||
327 | |||
328 | /* skip if whole section is not prefree */ | ||
329 | next_segno = find_next_zero_bit(prefree_segmap, | ||
330 | TOTAL_SEGS(sbi), segno + 1); | ||
331 | if (next_segno - segno < ofs_unit) | ||
332 | goto next; | ||
333 | |||
334 | /* skip if whole section was not free at the last checkpoint */ | ||
335 | for (i = 0; i < ofs_unit; i++) | ||
336 | if (get_seg_entry(sbi, segno)->ckpt_valid_blocks) | ||
337 | goto next; | ||
338 | return segno; | ||
339 | } | ||
340 | return NULL_SEGNO; | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * Find a new segment from the free segments bitmap to right order | ||
345 | * This function should be returned with success, otherwise BUG | ||
346 | */ | ||
347 | static void get_new_segment(struct f2fs_sb_info *sbi, | ||
348 | unsigned int *newseg, bool new_sec, int dir) | ||
349 | { | ||
350 | struct free_segmap_info *free_i = FREE_I(sbi); | ||
351 | unsigned int total_secs = sbi->total_sections; | ||
352 | unsigned int segno, secno, zoneno; | ||
353 | unsigned int total_zones = sbi->total_sections / sbi->secs_per_zone; | ||
354 | unsigned int hint = *newseg / sbi->segs_per_sec; | ||
355 | unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); | ||
356 | unsigned int left_start = hint; | ||
357 | bool init = true; | ||
358 | int go_left = 0; | ||
359 | int i; | ||
360 | |||
361 | write_lock(&free_i->segmap_lock); | ||
362 | |||
363 | if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { | ||
364 | segno = find_next_zero_bit(free_i->free_segmap, | ||
365 | TOTAL_SEGS(sbi), *newseg + 1); | ||
366 | if (segno < TOTAL_SEGS(sbi)) | ||
367 | goto got_it; | ||
368 | } | ||
369 | find_other_zone: | ||
370 | secno = find_next_zero_bit(free_i->free_secmap, total_secs, hint); | ||
371 | if (secno >= total_secs) { | ||
372 | if (dir == ALLOC_RIGHT) { | ||
373 | secno = find_next_zero_bit(free_i->free_secmap, | ||
374 | total_secs, 0); | ||
375 | BUG_ON(secno >= total_secs); | ||
376 | } else { | ||
377 | go_left = 1; | ||
378 | left_start = hint - 1; | ||
379 | } | ||
380 | } | ||
381 | if (go_left == 0) | ||
382 | goto skip_left; | ||
383 | |||
384 | while (test_bit(left_start, free_i->free_secmap)) { | ||
385 | if (left_start > 0) { | ||
386 | left_start--; | ||
387 | continue; | ||
388 | } | ||
389 | left_start = find_next_zero_bit(free_i->free_secmap, | ||
390 | total_secs, 0); | ||
391 | BUG_ON(left_start >= total_secs); | ||
392 | break; | ||
393 | } | ||
394 | secno = left_start; | ||
395 | skip_left: | ||
396 | hint = secno; | ||
397 | segno = secno * sbi->segs_per_sec; | ||
398 | zoneno = secno / sbi->secs_per_zone; | ||
399 | |||
400 | /* give up on finding another zone */ | ||
401 | if (!init) | ||
402 | goto got_it; | ||
403 | if (sbi->secs_per_zone == 1) | ||
404 | goto got_it; | ||
405 | if (zoneno == old_zoneno) | ||
406 | goto got_it; | ||
407 | if (dir == ALLOC_LEFT) { | ||
408 | if (!go_left && zoneno + 1 >= total_zones) | ||
409 | goto got_it; | ||
410 | if (go_left && zoneno == 0) | ||
411 | goto got_it; | ||
412 | } | ||
413 | for (i = 0; i < NR_CURSEG_TYPE; i++) | ||
414 | if (CURSEG_I(sbi, i)->zone == zoneno) | ||
415 | break; | ||
416 | |||
417 | if (i < NR_CURSEG_TYPE) { | ||
418 | /* zone is in user, try another */ | ||
419 | if (go_left) | ||
420 | hint = zoneno * sbi->secs_per_zone - 1; | ||
421 | else if (zoneno + 1 >= total_zones) | ||
422 | hint = 0; | ||
423 | else | ||
424 | hint = (zoneno + 1) * sbi->secs_per_zone; | ||
425 | init = false; | ||
426 | goto find_other_zone; | ||
427 | } | ||
428 | got_it: | ||
429 | /* set it as dirty segment in free segmap */ | ||
430 | BUG_ON(test_bit(segno, free_i->free_segmap)); | ||
431 | __set_inuse(sbi, segno); | ||
432 | *newseg = segno; | ||
433 | write_unlock(&free_i->segmap_lock); | ||
434 | } | ||
435 | |||
436 | static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) | ||
437 | { | ||
438 | struct curseg_info *curseg = CURSEG_I(sbi, type); | ||
439 | struct summary_footer *sum_footer; | ||
440 | |||
441 | curseg->segno = curseg->next_segno; | ||
442 | curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); | ||
443 | curseg->next_blkoff = 0; | ||
444 | curseg->next_segno = NULL_SEGNO; | ||
445 | |||
446 | sum_footer = &(curseg->sum_blk->footer); | ||
447 | memset(sum_footer, 0, sizeof(struct summary_footer)); | ||
448 | if (IS_DATASEG(type)) | ||
449 | SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); | ||
450 | if (IS_NODESEG(type)) | ||
451 | SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); | ||
452 | __set_sit_entry_type(sbi, type, curseg->segno, modified); | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * Allocate a current working segment. | ||
457 | * This function always allocates a free segment in LFS manner. | ||
458 | */ | ||
459 | static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) | ||
460 | { | ||
461 | struct curseg_info *curseg = CURSEG_I(sbi, type); | ||
462 | unsigned int segno = curseg->segno; | ||
463 | int dir = ALLOC_LEFT; | ||
464 | |||
465 | write_sum_page(sbi, curseg->sum_blk, | ||
466 | GET_SUM_BLOCK(sbi, curseg->segno)); | ||
467 | if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) | ||
468 | dir = ALLOC_RIGHT; | ||
469 | |||
470 | if (test_opt(sbi, NOHEAP)) | ||
471 | dir = ALLOC_RIGHT; | ||
472 | |||
473 | get_new_segment(sbi, &segno, new_sec, dir); | ||
474 | curseg->next_segno = segno; | ||
475 | reset_curseg(sbi, type, 1); | ||
476 | curseg->alloc_type = LFS; | ||
477 | } | ||
478 | |||
479 | static void __next_free_blkoff(struct f2fs_sb_info *sbi, | ||
480 | struct curseg_info *seg, block_t start) | ||
481 | { | ||
482 | struct seg_entry *se = get_seg_entry(sbi, seg->segno); | ||
483 | block_t ofs; | ||
484 | for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) { | ||
485 | if (!f2fs_test_bit(ofs, se->ckpt_valid_map) | ||
486 | && !f2fs_test_bit(ofs, se->cur_valid_map)) | ||
487 | break; | ||
488 | } | ||
489 | seg->next_blkoff = ofs; | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * If a segment is written by LFS manner, next block offset is just obtained | ||
494 | * by increasing the current block offset. However, if a segment is written by | ||
495 | * SSR manner, next block offset obtained by calling __next_free_blkoff | ||
496 | */ | ||
497 | static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, | ||
498 | struct curseg_info *seg) | ||
499 | { | ||
500 | if (seg->alloc_type == SSR) | ||
501 | __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); | ||
502 | else | ||
503 | seg->next_blkoff++; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * This function always allocates a used segment (from dirty seglist) by SSR | ||
508 | * manner, so it should recover the existing segment information of valid blocks | ||
509 | */ | ||
510 | static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) | ||
511 | { | ||
512 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
513 | struct curseg_info *curseg = CURSEG_I(sbi, type); | ||
514 | unsigned int new_segno = curseg->next_segno; | ||
515 | struct f2fs_summary_block *sum_node; | ||
516 | struct page *sum_page; | ||
517 | |||
518 | write_sum_page(sbi, curseg->sum_blk, | ||
519 | GET_SUM_BLOCK(sbi, curseg->segno)); | ||
520 | __set_test_and_inuse(sbi, new_segno); | ||
521 | |||
522 | mutex_lock(&dirty_i->seglist_lock); | ||
523 | __remove_dirty_segment(sbi, new_segno, PRE); | ||
524 | __remove_dirty_segment(sbi, new_segno, DIRTY); | ||
525 | mutex_unlock(&dirty_i->seglist_lock); | ||
526 | |||
527 | reset_curseg(sbi, type, 1); | ||
528 | curseg->alloc_type = SSR; | ||
529 | __next_free_blkoff(sbi, curseg, 0); | ||
530 | |||
531 | if (reuse) { | ||
532 | sum_page = get_sum_page(sbi, new_segno); | ||
533 | sum_node = (struct f2fs_summary_block *)page_address(sum_page); | ||
534 | memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); | ||
535 | f2fs_put_page(sum_page, 1); | ||
536 | } | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * flush out current segment and replace it with new segment | ||
541 | * This function should be returned with success, otherwise BUG | ||
542 | */ | ||
543 | static void allocate_segment_by_default(struct f2fs_sb_info *sbi, | ||
544 | int type, bool force) | ||
545 | { | ||
546 | struct curseg_info *curseg = CURSEG_I(sbi, type); | ||
547 | unsigned int ofs_unit; | ||
548 | |||
549 | if (force) { | ||
550 | new_curseg(sbi, type, true); | ||
551 | goto out; | ||
552 | } | ||
553 | |||
554 | ofs_unit = need_SSR(sbi) ? 1 : sbi->segs_per_sec; | ||
555 | curseg->next_segno = check_prefree_segments(sbi, ofs_unit, type); | ||
556 | |||
557 | if (curseg->next_segno != NULL_SEGNO) | ||
558 | change_curseg(sbi, type, false); | ||
559 | else if (type == CURSEG_WARM_NODE) | ||
560 | new_curseg(sbi, type, false); | ||
561 | else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) | ||
562 | change_curseg(sbi, type, true); | ||
563 | else | ||
564 | new_curseg(sbi, type, false); | ||
565 | out: | ||
566 | sbi->segment_count[curseg->alloc_type]++; | ||
567 | } | ||
568 | |||
569 | void allocate_new_segments(struct f2fs_sb_info *sbi) | ||
570 | { | ||
571 | struct curseg_info *curseg; | ||
572 | unsigned int old_curseg; | ||
573 | int i; | ||
574 | |||
575 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { | ||
576 | curseg = CURSEG_I(sbi, i); | ||
577 | old_curseg = curseg->segno; | ||
578 | SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); | ||
579 | locate_dirty_segment(sbi, old_curseg); | ||
580 | } | ||
581 | } | ||
582 | |||
583 | static const struct segment_allocation default_salloc_ops = { | ||
584 | .allocate_segment = allocate_segment_by_default, | ||
585 | }; | ||
586 | |||
587 | static void f2fs_end_io_write(struct bio *bio, int err) | ||
588 | { | ||
589 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
590 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
591 | struct bio_private *p = bio->bi_private; | ||
592 | |||
593 | do { | ||
594 | struct page *page = bvec->bv_page; | ||
595 | |||
596 | if (--bvec >= bio->bi_io_vec) | ||
597 | prefetchw(&bvec->bv_page->flags); | ||
598 | if (!uptodate) { | ||
599 | SetPageError(page); | ||
600 | if (page->mapping) | ||
601 | set_bit(AS_EIO, &page->mapping->flags); | ||
602 | set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG); | ||
603 | } | ||
604 | end_page_writeback(page); | ||
605 | dec_page_count(p->sbi, F2FS_WRITEBACK); | ||
606 | } while (bvec >= bio->bi_io_vec); | ||
607 | |||
608 | if (p->is_sync) | ||
609 | complete(p->wait); | ||
610 | kfree(p); | ||
611 | bio_put(bio); | ||
612 | } | ||
613 | |||
614 | struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages) | ||
615 | { | ||
616 | struct bio *bio; | ||
617 | struct bio_private *priv; | ||
618 | retry: | ||
619 | priv = kmalloc(sizeof(struct bio_private), GFP_NOFS); | ||
620 | if (!priv) { | ||
621 | cond_resched(); | ||
622 | goto retry; | ||
623 | } | ||
624 | |||
625 | /* No failure on bio allocation */ | ||
626 | bio = bio_alloc(GFP_NOIO, npages); | ||
627 | bio->bi_bdev = bdev; | ||
628 | bio->bi_private = priv; | ||
629 | return bio; | ||
630 | } | ||
631 | |||
632 | static void do_submit_bio(struct f2fs_sb_info *sbi, | ||
633 | enum page_type type, bool sync) | ||
634 | { | ||
635 | int rw = sync ? WRITE_SYNC : WRITE; | ||
636 | enum page_type btype = type > META ? META : type; | ||
637 | |||
638 | if (type >= META_FLUSH) | ||
639 | rw = WRITE_FLUSH_FUA; | ||
640 | |||
641 | if (sbi->bio[btype]) { | ||
642 | struct bio_private *p = sbi->bio[btype]->bi_private; | ||
643 | p->sbi = sbi; | ||
644 | sbi->bio[btype]->bi_end_io = f2fs_end_io_write; | ||
645 | if (type == META_FLUSH) { | ||
646 | DECLARE_COMPLETION_ONSTACK(wait); | ||
647 | p->is_sync = true; | ||
648 | p->wait = &wait; | ||
649 | submit_bio(rw, sbi->bio[btype]); | ||
650 | wait_for_completion(&wait); | ||
651 | } else { | ||
652 | p->is_sync = false; | ||
653 | submit_bio(rw, sbi->bio[btype]); | ||
654 | } | ||
655 | sbi->bio[btype] = NULL; | ||
656 | } | ||
657 | } | ||
658 | |||
659 | void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync) | ||
660 | { | ||
661 | down_write(&sbi->bio_sem); | ||
662 | do_submit_bio(sbi, type, sync); | ||
663 | up_write(&sbi->bio_sem); | ||
664 | } | ||
665 | |||
666 | static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, | ||
667 | block_t blk_addr, enum page_type type) | ||
668 | { | ||
669 | struct block_device *bdev = sbi->sb->s_bdev; | ||
670 | |||
671 | verify_block_addr(sbi, blk_addr); | ||
672 | |||
673 | down_write(&sbi->bio_sem); | ||
674 | |||
675 | inc_page_count(sbi, F2FS_WRITEBACK); | ||
676 | |||
677 | if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1) | ||
678 | do_submit_bio(sbi, type, false); | ||
679 | alloc_new: | ||
680 | if (sbi->bio[type] == NULL) { | ||
681 | sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev)); | ||
682 | sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | ||
683 | /* | ||
684 | * The end_io will be assigned at the sumbission phase. | ||
685 | * Until then, let bio_add_page() merge consecutive IOs as much | ||
686 | * as possible. | ||
687 | */ | ||
688 | } | ||
689 | |||
690 | if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) < | ||
691 | PAGE_CACHE_SIZE) { | ||
692 | do_submit_bio(sbi, type, false); | ||
693 | goto alloc_new; | ||
694 | } | ||
695 | |||
696 | sbi->last_block_in_bio[type] = blk_addr; | ||
697 | |||
698 | up_write(&sbi->bio_sem); | ||
699 | } | ||
700 | |||
701 | static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) | ||
702 | { | ||
703 | struct curseg_info *curseg = CURSEG_I(sbi, type); | ||
704 | if (curseg->next_blkoff < sbi->blocks_per_seg) | ||
705 | return true; | ||
706 | return false; | ||
707 | } | ||
708 | |||
709 | static int __get_segment_type_2(struct page *page, enum page_type p_type) | ||
710 | { | ||
711 | if (p_type == DATA) | ||
712 | return CURSEG_HOT_DATA; | ||
713 | else | ||
714 | return CURSEG_HOT_NODE; | ||
715 | } | ||
716 | |||
717 | static int __get_segment_type_4(struct page *page, enum page_type p_type) | ||
718 | { | ||
719 | if (p_type == DATA) { | ||
720 | struct inode *inode = page->mapping->host; | ||
721 | |||
722 | if (S_ISDIR(inode->i_mode)) | ||
723 | return CURSEG_HOT_DATA; | ||
724 | else | ||
725 | return CURSEG_COLD_DATA; | ||
726 | } else { | ||
727 | if (IS_DNODE(page) && !is_cold_node(page)) | ||
728 | return CURSEG_HOT_NODE; | ||
729 | else | ||
730 | return CURSEG_COLD_NODE; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | static int __get_segment_type_6(struct page *page, enum page_type p_type) | ||
735 | { | ||
736 | if (p_type == DATA) { | ||
737 | struct inode *inode = page->mapping->host; | ||
738 | |||
739 | if (S_ISDIR(inode->i_mode)) | ||
740 | return CURSEG_HOT_DATA; | ||
741 | else if (is_cold_data(page) || is_cold_file(inode)) | ||
742 | return CURSEG_COLD_DATA; | ||
743 | else | ||
744 | return CURSEG_WARM_DATA; | ||
745 | } else { | ||
746 | if (IS_DNODE(page)) | ||
747 | return is_cold_node(page) ? CURSEG_WARM_NODE : | ||
748 | CURSEG_HOT_NODE; | ||
749 | else | ||
750 | return CURSEG_COLD_NODE; | ||
751 | } | ||
752 | } | ||
753 | |||
754 | static int __get_segment_type(struct page *page, enum page_type p_type) | ||
755 | { | ||
756 | struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); | ||
757 | switch (sbi->active_logs) { | ||
758 | case 2: | ||
759 | return __get_segment_type_2(page, p_type); | ||
760 | case 4: | ||
761 | return __get_segment_type_4(page, p_type); | ||
762 | } | ||
763 | /* NR_CURSEG_TYPE(6) logs by default */ | ||
764 | BUG_ON(sbi->active_logs != NR_CURSEG_TYPE); | ||
765 | return __get_segment_type_6(page, p_type); | ||
766 | } | ||
767 | |||
768 | static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, | ||
769 | block_t old_blkaddr, block_t *new_blkaddr, | ||
770 | struct f2fs_summary *sum, enum page_type p_type) | ||
771 | { | ||
772 | struct sit_info *sit_i = SIT_I(sbi); | ||
773 | struct curseg_info *curseg; | ||
774 | unsigned int old_cursegno; | ||
775 | int type; | ||
776 | |||
777 | type = __get_segment_type(page, p_type); | ||
778 | curseg = CURSEG_I(sbi, type); | ||
779 | |||
780 | mutex_lock(&curseg->curseg_mutex); | ||
781 | |||
782 | *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); | ||
783 | old_cursegno = curseg->segno; | ||
784 | |||
785 | /* | ||
786 | * __add_sum_entry should be resided under the curseg_mutex | ||
787 | * because, this function updates a summary entry in the | ||
788 | * current summary block. | ||
789 | */ | ||
790 | __add_sum_entry(sbi, type, sum, curseg->next_blkoff); | ||
791 | |||
792 | mutex_lock(&sit_i->sentry_lock); | ||
793 | __refresh_next_blkoff(sbi, curseg); | ||
794 | sbi->block_count[curseg->alloc_type]++; | ||
795 | |||
796 | /* | ||
797 | * SIT information should be updated before segment allocation, | ||
798 | * since SSR needs latest valid block information. | ||
799 | */ | ||
800 | refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); | ||
801 | |||
802 | if (!__has_curseg_space(sbi, type)) | ||
803 | sit_i->s_ops->allocate_segment(sbi, type, false); | ||
804 | |||
805 | locate_dirty_segment(sbi, old_cursegno); | ||
806 | locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); | ||
807 | mutex_unlock(&sit_i->sentry_lock); | ||
808 | |||
809 | if (p_type == NODE) | ||
810 | fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); | ||
811 | |||
812 | /* writeout dirty page into bdev */ | ||
813 | submit_write_page(sbi, page, *new_blkaddr, p_type); | ||
814 | |||
815 | mutex_unlock(&curseg->curseg_mutex); | ||
816 | } | ||
817 | |||
818 | int write_meta_page(struct f2fs_sb_info *sbi, struct page *page, | ||
819 | struct writeback_control *wbc) | ||
820 | { | ||
821 | if (wbc->for_reclaim) | ||
822 | return AOP_WRITEPAGE_ACTIVATE; | ||
823 | |||
824 | set_page_writeback(page); | ||
825 | submit_write_page(sbi, page, page->index, META); | ||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | void write_node_page(struct f2fs_sb_info *sbi, struct page *page, | ||
830 | unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) | ||
831 | { | ||
832 | struct f2fs_summary sum; | ||
833 | set_summary(&sum, nid, 0, 0); | ||
834 | do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE); | ||
835 | } | ||
836 | |||
837 | void write_data_page(struct inode *inode, struct page *page, | ||
838 | struct dnode_of_data *dn, block_t old_blkaddr, | ||
839 | block_t *new_blkaddr) | ||
840 | { | ||
841 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
842 | struct f2fs_summary sum; | ||
843 | struct node_info ni; | ||
844 | |||
845 | BUG_ON(old_blkaddr == NULL_ADDR); | ||
846 | get_node_info(sbi, dn->nid, &ni); | ||
847 | set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); | ||
848 | |||
849 | do_write_page(sbi, page, old_blkaddr, | ||
850 | new_blkaddr, &sum, DATA); | ||
851 | } | ||
852 | |||
853 | void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page, | ||
854 | block_t old_blk_addr) | ||
855 | { | ||
856 | submit_write_page(sbi, page, old_blk_addr, DATA); | ||
857 | } | ||
858 | |||
859 | void recover_data_page(struct f2fs_sb_info *sbi, | ||
860 | struct page *page, struct f2fs_summary *sum, | ||
861 | block_t old_blkaddr, block_t new_blkaddr) | ||
862 | { | ||
863 | struct sit_info *sit_i = SIT_I(sbi); | ||
864 | struct curseg_info *curseg; | ||
865 | unsigned int segno, old_cursegno; | ||
866 | struct seg_entry *se; | ||
867 | int type; | ||
868 | |||
869 | segno = GET_SEGNO(sbi, new_blkaddr); | ||
870 | se = get_seg_entry(sbi, segno); | ||
871 | type = se->type; | ||
872 | |||
873 | if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { | ||
874 | if (old_blkaddr == NULL_ADDR) | ||
875 | type = CURSEG_COLD_DATA; | ||
876 | else | ||
877 | type = CURSEG_WARM_DATA; | ||
878 | } | ||
879 | curseg = CURSEG_I(sbi, type); | ||
880 | |||
881 | mutex_lock(&curseg->curseg_mutex); | ||
882 | mutex_lock(&sit_i->sentry_lock); | ||
883 | |||
884 | old_cursegno = curseg->segno; | ||
885 | |||
886 | /* change the current segment */ | ||
887 | if (segno != curseg->segno) { | ||
888 | curseg->next_segno = segno; | ||
889 | change_curseg(sbi, type, true); | ||
890 | } | ||
891 | |||
892 | curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & | ||
893 | (sbi->blocks_per_seg - 1); | ||
894 | __add_sum_entry(sbi, type, sum, curseg->next_blkoff); | ||
895 | |||
896 | refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); | ||
897 | |||
898 | locate_dirty_segment(sbi, old_cursegno); | ||
899 | locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); | ||
900 | |||
901 | mutex_unlock(&sit_i->sentry_lock); | ||
902 | mutex_unlock(&curseg->curseg_mutex); | ||
903 | } | ||
904 | |||
905 | void rewrite_node_page(struct f2fs_sb_info *sbi, | ||
906 | struct page *page, struct f2fs_summary *sum, | ||
907 | block_t old_blkaddr, block_t new_blkaddr) | ||
908 | { | ||
909 | struct sit_info *sit_i = SIT_I(sbi); | ||
910 | int type = CURSEG_WARM_NODE; | ||
911 | struct curseg_info *curseg; | ||
912 | unsigned int segno, old_cursegno; | ||
913 | block_t next_blkaddr = next_blkaddr_of_node(page); | ||
914 | unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr); | ||
915 | |||
916 | curseg = CURSEG_I(sbi, type); | ||
917 | |||
918 | mutex_lock(&curseg->curseg_mutex); | ||
919 | mutex_lock(&sit_i->sentry_lock); | ||
920 | |||
921 | segno = GET_SEGNO(sbi, new_blkaddr); | ||
922 | old_cursegno = curseg->segno; | ||
923 | |||
924 | /* change the current segment */ | ||
925 | if (segno != curseg->segno) { | ||
926 | curseg->next_segno = segno; | ||
927 | change_curseg(sbi, type, true); | ||
928 | } | ||
929 | curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & | ||
930 | (sbi->blocks_per_seg - 1); | ||
931 | __add_sum_entry(sbi, type, sum, curseg->next_blkoff); | ||
932 | |||
933 | /* change the current log to the next block addr in advance */ | ||
934 | if (next_segno != segno) { | ||
935 | curseg->next_segno = next_segno; | ||
936 | change_curseg(sbi, type, true); | ||
937 | } | ||
938 | curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) & | ||
939 | (sbi->blocks_per_seg - 1); | ||
940 | |||
941 | /* rewrite node page */ | ||
942 | set_page_writeback(page); | ||
943 | submit_write_page(sbi, page, new_blkaddr, NODE); | ||
944 | f2fs_submit_bio(sbi, NODE, true); | ||
945 | refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); | ||
946 | |||
947 | locate_dirty_segment(sbi, old_cursegno); | ||
948 | locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); | ||
949 | |||
950 | mutex_unlock(&sit_i->sentry_lock); | ||
951 | mutex_unlock(&curseg->curseg_mutex); | ||
952 | } | ||
953 | |||
954 | static int read_compacted_summaries(struct f2fs_sb_info *sbi) | ||
955 | { | ||
956 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
957 | struct curseg_info *seg_i; | ||
958 | unsigned char *kaddr; | ||
959 | struct page *page; | ||
960 | block_t start; | ||
961 | int i, j, offset; | ||
962 | |||
963 | start = start_sum_block(sbi); | ||
964 | |||
965 | page = get_meta_page(sbi, start++); | ||
966 | kaddr = (unsigned char *)page_address(page); | ||
967 | |||
968 | /* Step 1: restore nat cache */ | ||
969 | seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); | ||
970 | memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); | ||
971 | |||
972 | /* Step 2: restore sit cache */ | ||
973 | seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); | ||
974 | memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, | ||
975 | SUM_JOURNAL_SIZE); | ||
976 | offset = 2 * SUM_JOURNAL_SIZE; | ||
977 | |||
978 | /* Step 3: restore summary entries */ | ||
979 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { | ||
980 | unsigned short blk_off; | ||
981 | unsigned int segno; | ||
982 | |||
983 | seg_i = CURSEG_I(sbi, i); | ||
984 | segno = le32_to_cpu(ckpt->cur_data_segno[i]); | ||
985 | blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); | ||
986 | seg_i->next_segno = segno; | ||
987 | reset_curseg(sbi, i, 0); | ||
988 | seg_i->alloc_type = ckpt->alloc_type[i]; | ||
989 | seg_i->next_blkoff = blk_off; | ||
990 | |||
991 | if (seg_i->alloc_type == SSR) | ||
992 | blk_off = sbi->blocks_per_seg; | ||
993 | |||
994 | for (j = 0; j < blk_off; j++) { | ||
995 | struct f2fs_summary *s; | ||
996 | s = (struct f2fs_summary *)(kaddr + offset); | ||
997 | seg_i->sum_blk->entries[j] = *s; | ||
998 | offset += SUMMARY_SIZE; | ||
999 | if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - | ||
1000 | SUM_FOOTER_SIZE) | ||
1001 | continue; | ||
1002 | |||
1003 | f2fs_put_page(page, 1); | ||
1004 | page = NULL; | ||
1005 | |||
1006 | page = get_meta_page(sbi, start++); | ||
1007 | kaddr = (unsigned char *)page_address(page); | ||
1008 | offset = 0; | ||
1009 | } | ||
1010 | } | ||
1011 | f2fs_put_page(page, 1); | ||
1012 | return 0; | ||
1013 | } | ||
1014 | |||
1015 | static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) | ||
1016 | { | ||
1017 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
1018 | struct f2fs_summary_block *sum; | ||
1019 | struct curseg_info *curseg; | ||
1020 | struct page *new; | ||
1021 | unsigned short blk_off; | ||
1022 | unsigned int segno = 0; | ||
1023 | block_t blk_addr = 0; | ||
1024 | |||
1025 | /* get segment number and block addr */ | ||
1026 | if (IS_DATASEG(type)) { | ||
1027 | segno = le32_to_cpu(ckpt->cur_data_segno[type]); | ||
1028 | blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - | ||
1029 | CURSEG_HOT_DATA]); | ||
1030 | if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) | ||
1031 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); | ||
1032 | else | ||
1033 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); | ||
1034 | } else { | ||
1035 | segno = le32_to_cpu(ckpt->cur_node_segno[type - | ||
1036 | CURSEG_HOT_NODE]); | ||
1037 | blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - | ||
1038 | CURSEG_HOT_NODE]); | ||
1039 | if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) | ||
1040 | blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, | ||
1041 | type - CURSEG_HOT_NODE); | ||
1042 | else | ||
1043 | blk_addr = GET_SUM_BLOCK(sbi, segno); | ||
1044 | } | ||
1045 | |||
1046 | new = get_meta_page(sbi, blk_addr); | ||
1047 | sum = (struct f2fs_summary_block *)page_address(new); | ||
1048 | |||
1049 | if (IS_NODESEG(type)) { | ||
1050 | if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { | ||
1051 | struct f2fs_summary *ns = &sum->entries[0]; | ||
1052 | int i; | ||
1053 | for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { | ||
1054 | ns->version = 0; | ||
1055 | ns->ofs_in_node = 0; | ||
1056 | } | ||
1057 | } else { | ||
1058 | if (restore_node_summary(sbi, segno, sum)) { | ||
1059 | f2fs_put_page(new, 1); | ||
1060 | return -EINVAL; | ||
1061 | } | ||
1062 | } | ||
1063 | } | ||
1064 | |||
1065 | /* set uncompleted segment to curseg */ | ||
1066 | curseg = CURSEG_I(sbi, type); | ||
1067 | mutex_lock(&curseg->curseg_mutex); | ||
1068 | memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); | ||
1069 | curseg->next_segno = segno; | ||
1070 | reset_curseg(sbi, type, 0); | ||
1071 | curseg->alloc_type = ckpt->alloc_type[type]; | ||
1072 | curseg->next_blkoff = blk_off; | ||
1073 | mutex_unlock(&curseg->curseg_mutex); | ||
1074 | f2fs_put_page(new, 1); | ||
1075 | return 0; | ||
1076 | } | ||
1077 | |||
1078 | static int restore_curseg_summaries(struct f2fs_sb_info *sbi) | ||
1079 | { | ||
1080 | int type = CURSEG_HOT_DATA; | ||
1081 | |||
1082 | if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { | ||
1083 | /* restore for compacted data summary */ | ||
1084 | if (read_compacted_summaries(sbi)) | ||
1085 | return -EINVAL; | ||
1086 | type = CURSEG_HOT_NODE; | ||
1087 | } | ||
1088 | |||
1089 | for (; type <= CURSEG_COLD_NODE; type++) | ||
1090 | if (read_normal_summaries(sbi, type)) | ||
1091 | return -EINVAL; | ||
1092 | return 0; | ||
1093 | } | ||
1094 | |||
1095 | static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) | ||
1096 | { | ||
1097 | struct page *page; | ||
1098 | unsigned char *kaddr; | ||
1099 | struct f2fs_summary *summary; | ||
1100 | struct curseg_info *seg_i; | ||
1101 | int written_size = 0; | ||
1102 | int i, j; | ||
1103 | |||
1104 | page = grab_meta_page(sbi, blkaddr++); | ||
1105 | kaddr = (unsigned char *)page_address(page); | ||
1106 | |||
1107 | /* Step 1: write nat cache */ | ||
1108 | seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); | ||
1109 | memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); | ||
1110 | written_size += SUM_JOURNAL_SIZE; | ||
1111 | |||
1112 | /* Step 2: write sit cache */ | ||
1113 | seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); | ||
1114 | memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, | ||
1115 | SUM_JOURNAL_SIZE); | ||
1116 | written_size += SUM_JOURNAL_SIZE; | ||
1117 | |||
1118 | set_page_dirty(page); | ||
1119 | |||
1120 | /* Step 3: write summary entries */ | ||
1121 | for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { | ||
1122 | unsigned short blkoff; | ||
1123 | seg_i = CURSEG_I(sbi, i); | ||
1124 | if (sbi->ckpt->alloc_type[i] == SSR) | ||
1125 | blkoff = sbi->blocks_per_seg; | ||
1126 | else | ||
1127 | blkoff = curseg_blkoff(sbi, i); | ||
1128 | |||
1129 | for (j = 0; j < blkoff; j++) { | ||
1130 | if (!page) { | ||
1131 | page = grab_meta_page(sbi, blkaddr++); | ||
1132 | kaddr = (unsigned char *)page_address(page); | ||
1133 | written_size = 0; | ||
1134 | } | ||
1135 | summary = (struct f2fs_summary *)(kaddr + written_size); | ||
1136 | *summary = seg_i->sum_blk->entries[j]; | ||
1137 | written_size += SUMMARY_SIZE; | ||
1138 | set_page_dirty(page); | ||
1139 | |||
1140 | if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - | ||
1141 | SUM_FOOTER_SIZE) | ||
1142 | continue; | ||
1143 | |||
1144 | f2fs_put_page(page, 1); | ||
1145 | page = NULL; | ||
1146 | } | ||
1147 | } | ||
1148 | if (page) | ||
1149 | f2fs_put_page(page, 1); | ||
1150 | } | ||
1151 | |||
1152 | static void write_normal_summaries(struct f2fs_sb_info *sbi, | ||
1153 | block_t blkaddr, int type) | ||
1154 | { | ||
1155 | int i, end; | ||
1156 | if (IS_DATASEG(type)) | ||
1157 | end = type + NR_CURSEG_DATA_TYPE; | ||
1158 | else | ||
1159 | end = type + NR_CURSEG_NODE_TYPE; | ||
1160 | |||
1161 | for (i = type; i < end; i++) { | ||
1162 | struct curseg_info *sum = CURSEG_I(sbi, i); | ||
1163 | mutex_lock(&sum->curseg_mutex); | ||
1164 | write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); | ||
1165 | mutex_unlock(&sum->curseg_mutex); | ||
1166 | } | ||
1167 | } | ||
1168 | |||
1169 | void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) | ||
1170 | { | ||
1171 | if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) | ||
1172 | write_compacted_summaries(sbi, start_blk); | ||
1173 | else | ||
1174 | write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); | ||
1175 | } | ||
1176 | |||
1177 | void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) | ||
1178 | { | ||
1179 | if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) | ||
1180 | write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); | ||
1181 | return; | ||
1182 | } | ||
1183 | |||
1184 | int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, | ||
1185 | unsigned int val, int alloc) | ||
1186 | { | ||
1187 | int i; | ||
1188 | |||
1189 | if (type == NAT_JOURNAL) { | ||
1190 | for (i = 0; i < nats_in_cursum(sum); i++) { | ||
1191 | if (le32_to_cpu(nid_in_journal(sum, i)) == val) | ||
1192 | return i; | ||
1193 | } | ||
1194 | if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) | ||
1195 | return update_nats_in_cursum(sum, 1); | ||
1196 | } else if (type == SIT_JOURNAL) { | ||
1197 | for (i = 0; i < sits_in_cursum(sum); i++) | ||
1198 | if (le32_to_cpu(segno_in_journal(sum, i)) == val) | ||
1199 | return i; | ||
1200 | if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES) | ||
1201 | return update_sits_in_cursum(sum, 1); | ||
1202 | } | ||
1203 | return -1; | ||
1204 | } | ||
1205 | |||
1206 | static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, | ||
1207 | unsigned int segno) | ||
1208 | { | ||
1209 | struct sit_info *sit_i = SIT_I(sbi); | ||
1210 | unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); | ||
1211 | block_t blk_addr = sit_i->sit_base_addr + offset; | ||
1212 | |||
1213 | check_seg_range(sbi, segno); | ||
1214 | |||
1215 | /* calculate sit block address */ | ||
1216 | if (f2fs_test_bit(offset, sit_i->sit_bitmap)) | ||
1217 | blk_addr += sit_i->sit_blocks; | ||
1218 | |||
1219 | return get_meta_page(sbi, blk_addr); | ||
1220 | } | ||
1221 | |||
1222 | static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, | ||
1223 | unsigned int start) | ||
1224 | { | ||
1225 | struct sit_info *sit_i = SIT_I(sbi); | ||
1226 | struct page *src_page, *dst_page; | ||
1227 | pgoff_t src_off, dst_off; | ||
1228 | void *src_addr, *dst_addr; | ||
1229 | |||
1230 | src_off = current_sit_addr(sbi, start); | ||
1231 | dst_off = next_sit_addr(sbi, src_off); | ||
1232 | |||
1233 | /* get current sit block page without lock */ | ||
1234 | src_page = get_meta_page(sbi, src_off); | ||
1235 | dst_page = grab_meta_page(sbi, dst_off); | ||
1236 | BUG_ON(PageDirty(src_page)); | ||
1237 | |||
1238 | src_addr = page_address(src_page); | ||
1239 | dst_addr = page_address(dst_page); | ||
1240 | memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); | ||
1241 | |||
1242 | set_page_dirty(dst_page); | ||
1243 | f2fs_put_page(src_page, 1); | ||
1244 | |||
1245 | set_to_next_sit(sit_i, start); | ||
1246 | |||
1247 | return dst_page; | ||
1248 | } | ||
1249 | |||
1250 | static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) | ||
1251 | { | ||
1252 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); | ||
1253 | struct f2fs_summary_block *sum = curseg->sum_blk; | ||
1254 | int i; | ||
1255 | |||
1256 | /* | ||
1257 | * If the journal area in the current summary is full of sit entries, | ||
1258 | * all the sit entries will be flushed. Otherwise the sit entries | ||
1259 | * are not able to replace with newly hot sit entries. | ||
1260 | */ | ||
1261 | if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) { | ||
1262 | for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { | ||
1263 | unsigned int segno; | ||
1264 | segno = le32_to_cpu(segno_in_journal(sum, i)); | ||
1265 | __mark_sit_entry_dirty(sbi, segno); | ||
1266 | } | ||
1267 | update_sits_in_cursum(sum, -sits_in_cursum(sum)); | ||
1268 | return 1; | ||
1269 | } | ||
1270 | return 0; | ||
1271 | } | ||
1272 | |||
1273 | /* | ||
1274 | * CP calls this function, which flushes SIT entries including sit_journal, | ||
1275 | * and moves prefree segs to free segs. | ||
1276 | */ | ||
1277 | void flush_sit_entries(struct f2fs_sb_info *sbi) | ||
1278 | { | ||
1279 | struct sit_info *sit_i = SIT_I(sbi); | ||
1280 | unsigned long *bitmap = sit_i->dirty_sentries_bitmap; | ||
1281 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); | ||
1282 | struct f2fs_summary_block *sum = curseg->sum_blk; | ||
1283 | unsigned long nsegs = TOTAL_SEGS(sbi); | ||
1284 | struct page *page = NULL; | ||
1285 | struct f2fs_sit_block *raw_sit = NULL; | ||
1286 | unsigned int start = 0, end = 0; | ||
1287 | unsigned int segno = -1; | ||
1288 | bool flushed; | ||
1289 | |||
1290 | mutex_lock(&curseg->curseg_mutex); | ||
1291 | mutex_lock(&sit_i->sentry_lock); | ||
1292 | |||
1293 | /* | ||
1294 | * "flushed" indicates whether sit entries in journal are flushed | ||
1295 | * to the SIT area or not. | ||
1296 | */ | ||
1297 | flushed = flush_sits_in_journal(sbi); | ||
1298 | |||
1299 | while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) { | ||
1300 | struct seg_entry *se = get_seg_entry(sbi, segno); | ||
1301 | int sit_offset, offset; | ||
1302 | |||
1303 | sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); | ||
1304 | |||
1305 | if (flushed) | ||
1306 | goto to_sit_page; | ||
1307 | |||
1308 | offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); | ||
1309 | if (offset >= 0) { | ||
1310 | segno_in_journal(sum, offset) = cpu_to_le32(segno); | ||
1311 | seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); | ||
1312 | goto flush_done; | ||
1313 | } | ||
1314 | to_sit_page: | ||
1315 | if (!page || (start > segno) || (segno > end)) { | ||
1316 | if (page) { | ||
1317 | f2fs_put_page(page, 1); | ||
1318 | page = NULL; | ||
1319 | } | ||
1320 | |||
1321 | start = START_SEGNO(sit_i, segno); | ||
1322 | end = start + SIT_ENTRY_PER_BLOCK - 1; | ||
1323 | |||
1324 | /* read sit block that will be updated */ | ||
1325 | page = get_next_sit_page(sbi, start); | ||
1326 | raw_sit = page_address(page); | ||
1327 | } | ||
1328 | |||
1329 | /* udpate entry in SIT block */ | ||
1330 | seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); | ||
1331 | flush_done: | ||
1332 | __clear_bit(segno, bitmap); | ||
1333 | sit_i->dirty_sentries--; | ||
1334 | } | ||
1335 | mutex_unlock(&sit_i->sentry_lock); | ||
1336 | mutex_unlock(&curseg->curseg_mutex); | ||
1337 | |||
1338 | /* writeout last modified SIT block */ | ||
1339 | f2fs_put_page(page, 1); | ||
1340 | |||
1341 | set_prefree_as_free_segments(sbi); | ||
1342 | } | ||
1343 | |||
1344 | static int build_sit_info(struct f2fs_sb_info *sbi) | ||
1345 | { | ||
1346 | struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); | ||
1347 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
1348 | struct sit_info *sit_i; | ||
1349 | unsigned int sit_segs, start; | ||
1350 | char *src_bitmap, *dst_bitmap; | ||
1351 | unsigned int bitmap_size; | ||
1352 | |||
1353 | /* allocate memory for SIT information */ | ||
1354 | sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); | ||
1355 | if (!sit_i) | ||
1356 | return -ENOMEM; | ||
1357 | |||
1358 | SM_I(sbi)->sit_info = sit_i; | ||
1359 | |||
1360 | sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry)); | ||
1361 | if (!sit_i->sentries) | ||
1362 | return -ENOMEM; | ||
1363 | |||
1364 | bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); | ||
1365 | sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
1366 | if (!sit_i->dirty_sentries_bitmap) | ||
1367 | return -ENOMEM; | ||
1368 | |||
1369 | for (start = 0; start < TOTAL_SEGS(sbi); start++) { | ||
1370 | sit_i->sentries[start].cur_valid_map | ||
1371 | = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); | ||
1372 | sit_i->sentries[start].ckpt_valid_map | ||
1373 | = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); | ||
1374 | if (!sit_i->sentries[start].cur_valid_map | ||
1375 | || !sit_i->sentries[start].ckpt_valid_map) | ||
1376 | return -ENOMEM; | ||
1377 | } | ||
1378 | |||
1379 | if (sbi->segs_per_sec > 1) { | ||
1380 | sit_i->sec_entries = vzalloc(sbi->total_sections * | ||
1381 | sizeof(struct sec_entry)); | ||
1382 | if (!sit_i->sec_entries) | ||
1383 | return -ENOMEM; | ||
1384 | } | ||
1385 | |||
1386 | /* get information related with SIT */ | ||
1387 | sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; | ||
1388 | |||
1389 | /* setup SIT bitmap from ckeckpoint pack */ | ||
1390 | bitmap_size = __bitmap_size(sbi, SIT_BITMAP); | ||
1391 | src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); | ||
1392 | |||
1393 | dst_bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
1394 | if (!dst_bitmap) | ||
1395 | return -ENOMEM; | ||
1396 | memcpy(dst_bitmap, src_bitmap, bitmap_size); | ||
1397 | |||
1398 | /* init SIT information */ | ||
1399 | sit_i->s_ops = &default_salloc_ops; | ||
1400 | |||
1401 | sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); | ||
1402 | sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; | ||
1403 | sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); | ||
1404 | sit_i->sit_bitmap = dst_bitmap; | ||
1405 | sit_i->bitmap_size = bitmap_size; | ||
1406 | sit_i->dirty_sentries = 0; | ||
1407 | sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; | ||
1408 | sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); | ||
1409 | sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; | ||
1410 | mutex_init(&sit_i->sentry_lock); | ||
1411 | return 0; | ||
1412 | } | ||
1413 | |||
1414 | static int build_free_segmap(struct f2fs_sb_info *sbi) | ||
1415 | { | ||
1416 | struct f2fs_sm_info *sm_info = SM_I(sbi); | ||
1417 | struct free_segmap_info *free_i; | ||
1418 | unsigned int bitmap_size, sec_bitmap_size; | ||
1419 | |||
1420 | /* allocate memory for free segmap information */ | ||
1421 | free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); | ||
1422 | if (!free_i) | ||
1423 | return -ENOMEM; | ||
1424 | |||
1425 | SM_I(sbi)->free_info = free_i; | ||
1426 | |||
1427 | bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); | ||
1428 | free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); | ||
1429 | if (!free_i->free_segmap) | ||
1430 | return -ENOMEM; | ||
1431 | |||
1432 | sec_bitmap_size = f2fs_bitmap_size(sbi->total_sections); | ||
1433 | free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); | ||
1434 | if (!free_i->free_secmap) | ||
1435 | return -ENOMEM; | ||
1436 | |||
1437 | /* set all segments as dirty temporarily */ | ||
1438 | memset(free_i->free_segmap, 0xff, bitmap_size); | ||
1439 | memset(free_i->free_secmap, 0xff, sec_bitmap_size); | ||
1440 | |||
1441 | /* init free segmap information */ | ||
1442 | free_i->start_segno = | ||
1443 | (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr); | ||
1444 | free_i->free_segments = 0; | ||
1445 | free_i->free_sections = 0; | ||
1446 | rwlock_init(&free_i->segmap_lock); | ||
1447 | return 0; | ||
1448 | } | ||
1449 | |||
1450 | static int build_curseg(struct f2fs_sb_info *sbi) | ||
1451 | { | ||
1452 | struct curseg_info *array; | ||
1453 | int i; | ||
1454 | |||
1455 | array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL); | ||
1456 | if (!array) | ||
1457 | return -ENOMEM; | ||
1458 | |||
1459 | SM_I(sbi)->curseg_array = array; | ||
1460 | |||
1461 | for (i = 0; i < NR_CURSEG_TYPE; i++) { | ||
1462 | mutex_init(&array[i].curseg_mutex); | ||
1463 | array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); | ||
1464 | if (!array[i].sum_blk) | ||
1465 | return -ENOMEM; | ||
1466 | array[i].segno = NULL_SEGNO; | ||
1467 | array[i].next_blkoff = 0; | ||
1468 | } | ||
1469 | return restore_curseg_summaries(sbi); | ||
1470 | } | ||
1471 | |||
1472 | static void build_sit_entries(struct f2fs_sb_info *sbi) | ||
1473 | { | ||
1474 | struct sit_info *sit_i = SIT_I(sbi); | ||
1475 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); | ||
1476 | struct f2fs_summary_block *sum = curseg->sum_blk; | ||
1477 | unsigned int start; | ||
1478 | |||
1479 | for (start = 0; start < TOTAL_SEGS(sbi); start++) { | ||
1480 | struct seg_entry *se = &sit_i->sentries[start]; | ||
1481 | struct f2fs_sit_block *sit_blk; | ||
1482 | struct f2fs_sit_entry sit; | ||
1483 | struct page *page; | ||
1484 | int i; | ||
1485 | |||
1486 | mutex_lock(&curseg->curseg_mutex); | ||
1487 | for (i = 0; i < sits_in_cursum(sum); i++) { | ||
1488 | if (le32_to_cpu(segno_in_journal(sum, i)) == start) { | ||
1489 | sit = sit_in_journal(sum, i); | ||
1490 | mutex_unlock(&curseg->curseg_mutex); | ||
1491 | goto got_it; | ||
1492 | } | ||
1493 | } | ||
1494 | mutex_unlock(&curseg->curseg_mutex); | ||
1495 | page = get_current_sit_page(sbi, start); | ||
1496 | sit_blk = (struct f2fs_sit_block *)page_address(page); | ||
1497 | sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; | ||
1498 | f2fs_put_page(page, 1); | ||
1499 | got_it: | ||
1500 | check_block_count(sbi, start, &sit); | ||
1501 | seg_info_from_raw_sit(se, &sit); | ||
1502 | if (sbi->segs_per_sec > 1) { | ||
1503 | struct sec_entry *e = get_sec_entry(sbi, start); | ||
1504 | e->valid_blocks += se->valid_blocks; | ||
1505 | } | ||
1506 | } | ||
1507 | } | ||
1508 | |||
1509 | static void init_free_segmap(struct f2fs_sb_info *sbi) | ||
1510 | { | ||
1511 | unsigned int start; | ||
1512 | int type; | ||
1513 | |||
1514 | for (start = 0; start < TOTAL_SEGS(sbi); start++) { | ||
1515 | struct seg_entry *sentry = get_seg_entry(sbi, start); | ||
1516 | if (!sentry->valid_blocks) | ||
1517 | __set_free(sbi, start); | ||
1518 | } | ||
1519 | |||
1520 | /* set use the current segments */ | ||
1521 | for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { | ||
1522 | struct curseg_info *curseg_t = CURSEG_I(sbi, type); | ||
1523 | __set_test_and_inuse(sbi, curseg_t->segno); | ||
1524 | } | ||
1525 | } | ||
1526 | |||
1527 | static void init_dirty_segmap(struct f2fs_sb_info *sbi) | ||
1528 | { | ||
1529 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
1530 | struct free_segmap_info *free_i = FREE_I(sbi); | ||
1531 | unsigned int segno = 0, offset = 0; | ||
1532 | unsigned short valid_blocks; | ||
1533 | |||
1534 | while (segno < TOTAL_SEGS(sbi)) { | ||
1535 | /* find dirty segment based on free segmap */ | ||
1536 | segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset); | ||
1537 | if (segno >= TOTAL_SEGS(sbi)) | ||
1538 | break; | ||
1539 | offset = segno + 1; | ||
1540 | valid_blocks = get_valid_blocks(sbi, segno, 0); | ||
1541 | if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks) | ||
1542 | continue; | ||
1543 | mutex_lock(&dirty_i->seglist_lock); | ||
1544 | __locate_dirty_segment(sbi, segno, DIRTY); | ||
1545 | mutex_unlock(&dirty_i->seglist_lock); | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | static int init_victim_segmap(struct f2fs_sb_info *sbi) | ||
1550 | { | ||
1551 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
1552 | unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); | ||
1553 | |||
1554 | dirty_i->victim_segmap[FG_GC] = kzalloc(bitmap_size, GFP_KERNEL); | ||
1555 | dirty_i->victim_segmap[BG_GC] = kzalloc(bitmap_size, GFP_KERNEL); | ||
1556 | if (!dirty_i->victim_segmap[FG_GC] || !dirty_i->victim_segmap[BG_GC]) | ||
1557 | return -ENOMEM; | ||
1558 | return 0; | ||
1559 | } | ||
1560 | |||
1561 | static int build_dirty_segmap(struct f2fs_sb_info *sbi) | ||
1562 | { | ||
1563 | struct dirty_seglist_info *dirty_i; | ||
1564 | unsigned int bitmap_size, i; | ||
1565 | |||
1566 | /* allocate memory for dirty segments list information */ | ||
1567 | dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); | ||
1568 | if (!dirty_i) | ||
1569 | return -ENOMEM; | ||
1570 | |||
1571 | SM_I(sbi)->dirty_info = dirty_i; | ||
1572 | mutex_init(&dirty_i->seglist_lock); | ||
1573 | |||
1574 | bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); | ||
1575 | |||
1576 | for (i = 0; i < NR_DIRTY_TYPE; i++) { | ||
1577 | dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); | ||
1578 | if (!dirty_i->dirty_segmap[i]) | ||
1579 | return -ENOMEM; | ||
1580 | } | ||
1581 | |||
1582 | init_dirty_segmap(sbi); | ||
1583 | return init_victim_segmap(sbi); | ||
1584 | } | ||
1585 | |||
1586 | /* | ||
1587 | * Update min, max modified time for cost-benefit GC algorithm | ||
1588 | */ | ||
1589 | static void init_min_max_mtime(struct f2fs_sb_info *sbi) | ||
1590 | { | ||
1591 | struct sit_info *sit_i = SIT_I(sbi); | ||
1592 | unsigned int segno; | ||
1593 | |||
1594 | mutex_lock(&sit_i->sentry_lock); | ||
1595 | |||
1596 | sit_i->min_mtime = LLONG_MAX; | ||
1597 | |||
1598 | for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { | ||
1599 | unsigned int i; | ||
1600 | unsigned long long mtime = 0; | ||
1601 | |||
1602 | for (i = 0; i < sbi->segs_per_sec; i++) | ||
1603 | mtime += get_seg_entry(sbi, segno + i)->mtime; | ||
1604 | |||
1605 | mtime = div_u64(mtime, sbi->segs_per_sec); | ||
1606 | |||
1607 | if (sit_i->min_mtime > mtime) | ||
1608 | sit_i->min_mtime = mtime; | ||
1609 | } | ||
1610 | sit_i->max_mtime = get_mtime(sbi); | ||
1611 | mutex_unlock(&sit_i->sentry_lock); | ||
1612 | } | ||
1613 | |||
1614 | int build_segment_manager(struct f2fs_sb_info *sbi) | ||
1615 | { | ||
1616 | struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); | ||
1617 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
1618 | struct f2fs_sm_info *sm_info; | ||
1619 | int err; | ||
1620 | |||
1621 | sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); | ||
1622 | if (!sm_info) | ||
1623 | return -ENOMEM; | ||
1624 | |||
1625 | /* init sm info */ | ||
1626 | sbi->sm_info = sm_info; | ||
1627 | INIT_LIST_HEAD(&sm_info->wblist_head); | ||
1628 | spin_lock_init(&sm_info->wblist_lock); | ||
1629 | sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); | ||
1630 | sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); | ||
1631 | sm_info->segment_count = le32_to_cpu(raw_super->segment_count); | ||
1632 | sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); | ||
1633 | sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); | ||
1634 | sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); | ||
1635 | sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); | ||
1636 | |||
1637 | err = build_sit_info(sbi); | ||
1638 | if (err) | ||
1639 | return err; | ||
1640 | err = build_free_segmap(sbi); | ||
1641 | if (err) | ||
1642 | return err; | ||
1643 | err = build_curseg(sbi); | ||
1644 | if (err) | ||
1645 | return err; | ||
1646 | |||
1647 | /* reinit free segmap based on SIT */ | ||
1648 | build_sit_entries(sbi); | ||
1649 | |||
1650 | init_free_segmap(sbi); | ||
1651 | err = build_dirty_segmap(sbi); | ||
1652 | if (err) | ||
1653 | return err; | ||
1654 | |||
1655 | init_min_max_mtime(sbi); | ||
1656 | return 0; | ||
1657 | } | ||
1658 | |||
1659 | static void discard_dirty_segmap(struct f2fs_sb_info *sbi, | ||
1660 | enum dirty_type dirty_type) | ||
1661 | { | ||
1662 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
1663 | |||
1664 | mutex_lock(&dirty_i->seglist_lock); | ||
1665 | kfree(dirty_i->dirty_segmap[dirty_type]); | ||
1666 | dirty_i->nr_dirty[dirty_type] = 0; | ||
1667 | mutex_unlock(&dirty_i->seglist_lock); | ||
1668 | } | ||
1669 | |||
1670 | void reset_victim_segmap(struct f2fs_sb_info *sbi) | ||
1671 | { | ||
1672 | unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); | ||
1673 | memset(DIRTY_I(sbi)->victim_segmap[FG_GC], 0, bitmap_size); | ||
1674 | } | ||
1675 | |||
1676 | static void destroy_victim_segmap(struct f2fs_sb_info *sbi) | ||
1677 | { | ||
1678 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
1679 | |||
1680 | kfree(dirty_i->victim_segmap[FG_GC]); | ||
1681 | kfree(dirty_i->victim_segmap[BG_GC]); | ||
1682 | } | ||
1683 | |||
1684 | static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) | ||
1685 | { | ||
1686 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | ||
1687 | int i; | ||
1688 | |||
1689 | if (!dirty_i) | ||
1690 | return; | ||
1691 | |||
1692 | /* discard pre-free/dirty segments list */ | ||
1693 | for (i = 0; i < NR_DIRTY_TYPE; i++) | ||
1694 | discard_dirty_segmap(sbi, i); | ||
1695 | |||
1696 | destroy_victim_segmap(sbi); | ||
1697 | SM_I(sbi)->dirty_info = NULL; | ||
1698 | kfree(dirty_i); | ||
1699 | } | ||
1700 | |||
1701 | static void destroy_curseg(struct f2fs_sb_info *sbi) | ||
1702 | { | ||
1703 | struct curseg_info *array = SM_I(sbi)->curseg_array; | ||
1704 | int i; | ||
1705 | |||
1706 | if (!array) | ||
1707 | return; | ||
1708 | SM_I(sbi)->curseg_array = NULL; | ||
1709 | for (i = 0; i < NR_CURSEG_TYPE; i++) | ||
1710 | kfree(array[i].sum_blk); | ||
1711 | kfree(array); | ||
1712 | } | ||
1713 | |||
1714 | static void destroy_free_segmap(struct f2fs_sb_info *sbi) | ||
1715 | { | ||
1716 | struct free_segmap_info *free_i = SM_I(sbi)->free_info; | ||
1717 | if (!free_i) | ||
1718 | return; | ||
1719 | SM_I(sbi)->free_info = NULL; | ||
1720 | kfree(free_i->free_segmap); | ||
1721 | kfree(free_i->free_secmap); | ||
1722 | kfree(free_i); | ||
1723 | } | ||
1724 | |||
1725 | static void destroy_sit_info(struct f2fs_sb_info *sbi) | ||
1726 | { | ||
1727 | struct sit_info *sit_i = SIT_I(sbi); | ||
1728 | unsigned int start; | ||
1729 | |||
1730 | if (!sit_i) | ||
1731 | return; | ||
1732 | |||
1733 | if (sit_i->sentries) { | ||
1734 | for (start = 0; start < TOTAL_SEGS(sbi); start++) { | ||
1735 | kfree(sit_i->sentries[start].cur_valid_map); | ||
1736 | kfree(sit_i->sentries[start].ckpt_valid_map); | ||
1737 | } | ||
1738 | } | ||
1739 | vfree(sit_i->sentries); | ||
1740 | vfree(sit_i->sec_entries); | ||
1741 | kfree(sit_i->dirty_sentries_bitmap); | ||
1742 | |||
1743 | SM_I(sbi)->sit_info = NULL; | ||
1744 | kfree(sit_i->sit_bitmap); | ||
1745 | kfree(sit_i); | ||
1746 | } | ||
1747 | |||
1748 | void destroy_segment_manager(struct f2fs_sb_info *sbi) | ||
1749 | { | ||
1750 | struct f2fs_sm_info *sm_info = SM_I(sbi); | ||
1751 | destroy_dirty_segmap(sbi); | ||
1752 | destroy_curseg(sbi); | ||
1753 | destroy_free_segmap(sbi); | ||
1754 | destroy_sit_info(sbi); | ||
1755 | sbi->sm_info = NULL; | ||
1756 | kfree(sm_info); | ||
1757 | } | ||