diff options
Diffstat (limited to 'fs/f2fs/checkpoint.c')
-rw-r--r-- | fs/f2fs/checkpoint.c | 793 |
1 files changed, 793 insertions, 0 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c new file mode 100644 index 000000000000..ff3c8439af87 --- /dev/null +++ b/fs/f2fs/checkpoint.c | |||
@@ -0,0 +1,793 @@ | |||
1 | /* | ||
2 | * fs/f2fs/checkpoint.c | ||
3 | * | ||
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com/ | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/bio.h> | ||
13 | #include <linux/mpage.h> | ||
14 | #include <linux/writeback.h> | ||
15 | #include <linux/blkdev.h> | ||
16 | #include <linux/f2fs_fs.h> | ||
17 | #include <linux/pagevec.h> | ||
18 | #include <linux/swap.h> | ||
19 | |||
20 | #include "f2fs.h" | ||
21 | #include "node.h" | ||
22 | #include "segment.h" | ||
23 | |||
24 | static struct kmem_cache *orphan_entry_slab; | ||
25 | static struct kmem_cache *inode_entry_slab; | ||
26 | |||
27 | /* | ||
28 | * We guarantee no failure on the returned page. | ||
29 | */ | ||
30 | struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) | ||
31 | { | ||
32 | struct address_space *mapping = sbi->meta_inode->i_mapping; | ||
33 | struct page *page = NULL; | ||
34 | repeat: | ||
35 | page = grab_cache_page(mapping, index); | ||
36 | if (!page) { | ||
37 | cond_resched(); | ||
38 | goto repeat; | ||
39 | } | ||
40 | |||
41 | /* We wait writeback only inside grab_meta_page() */ | ||
42 | wait_on_page_writeback(page); | ||
43 | SetPageUptodate(page); | ||
44 | return page; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * We guarantee no failure on the returned page. | ||
49 | */ | ||
50 | struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) | ||
51 | { | ||
52 | struct address_space *mapping = sbi->meta_inode->i_mapping; | ||
53 | struct page *page; | ||
54 | repeat: | ||
55 | page = grab_cache_page(mapping, index); | ||
56 | if (!page) { | ||
57 | cond_resched(); | ||
58 | goto repeat; | ||
59 | } | ||
60 | if (f2fs_readpage(sbi, page, index, READ_SYNC)) { | ||
61 | f2fs_put_page(page, 1); | ||
62 | goto repeat; | ||
63 | } | ||
64 | mark_page_accessed(page); | ||
65 | |||
66 | /* We do not allow returning an errorneous page */ | ||
67 | return page; | ||
68 | } | ||
69 | |||
70 | static int f2fs_write_meta_page(struct page *page, | ||
71 | struct writeback_control *wbc) | ||
72 | { | ||
73 | struct inode *inode = page->mapping->host; | ||
74 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
75 | int err; | ||
76 | |||
77 | wait_on_page_writeback(page); | ||
78 | |||
79 | err = write_meta_page(sbi, page, wbc); | ||
80 | if (err) { | ||
81 | wbc->pages_skipped++; | ||
82 | set_page_dirty(page); | ||
83 | } | ||
84 | |||
85 | dec_page_count(sbi, F2FS_DIRTY_META); | ||
86 | |||
87 | /* In this case, we should not unlock this page */ | ||
88 | if (err != AOP_WRITEPAGE_ACTIVATE) | ||
89 | unlock_page(page); | ||
90 | return err; | ||
91 | } | ||
92 | |||
93 | static int f2fs_write_meta_pages(struct address_space *mapping, | ||
94 | struct writeback_control *wbc) | ||
95 | { | ||
96 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); | ||
97 | struct block_device *bdev = sbi->sb->s_bdev; | ||
98 | long written; | ||
99 | |||
100 | if (wbc->for_kupdate) | ||
101 | return 0; | ||
102 | |||
103 | if (get_pages(sbi, F2FS_DIRTY_META) == 0) | ||
104 | return 0; | ||
105 | |||
106 | /* if mounting is failed, skip writing node pages */ | ||
107 | mutex_lock(&sbi->cp_mutex); | ||
108 | written = sync_meta_pages(sbi, META, bio_get_nr_vecs(bdev)); | ||
109 | mutex_unlock(&sbi->cp_mutex); | ||
110 | wbc->nr_to_write -= written; | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, | ||
115 | long nr_to_write) | ||
116 | { | ||
117 | struct address_space *mapping = sbi->meta_inode->i_mapping; | ||
118 | pgoff_t index = 0, end = LONG_MAX; | ||
119 | struct pagevec pvec; | ||
120 | long nwritten = 0; | ||
121 | struct writeback_control wbc = { | ||
122 | .for_reclaim = 0, | ||
123 | }; | ||
124 | |||
125 | pagevec_init(&pvec, 0); | ||
126 | |||
127 | while (index <= end) { | ||
128 | int i, nr_pages; | ||
129 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | ||
130 | PAGECACHE_TAG_DIRTY, | ||
131 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | ||
132 | if (nr_pages == 0) | ||
133 | break; | ||
134 | |||
135 | for (i = 0; i < nr_pages; i++) { | ||
136 | struct page *page = pvec.pages[i]; | ||
137 | lock_page(page); | ||
138 | BUG_ON(page->mapping != mapping); | ||
139 | BUG_ON(!PageDirty(page)); | ||
140 | clear_page_dirty_for_io(page); | ||
141 | f2fs_write_meta_page(page, &wbc); | ||
142 | if (nwritten++ >= nr_to_write) | ||
143 | break; | ||
144 | } | ||
145 | pagevec_release(&pvec); | ||
146 | cond_resched(); | ||
147 | } | ||
148 | |||
149 | if (nwritten) | ||
150 | f2fs_submit_bio(sbi, type, nr_to_write == LONG_MAX); | ||
151 | |||
152 | return nwritten; | ||
153 | } | ||
154 | |||
155 | static int f2fs_set_meta_page_dirty(struct page *page) | ||
156 | { | ||
157 | struct address_space *mapping = page->mapping; | ||
158 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); | ||
159 | |||
160 | SetPageUptodate(page); | ||
161 | if (!PageDirty(page)) { | ||
162 | __set_page_dirty_nobuffers(page); | ||
163 | inc_page_count(sbi, F2FS_DIRTY_META); | ||
164 | F2FS_SET_SB_DIRT(sbi); | ||
165 | return 1; | ||
166 | } | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | const struct address_space_operations f2fs_meta_aops = { | ||
171 | .writepage = f2fs_write_meta_page, | ||
172 | .writepages = f2fs_write_meta_pages, | ||
173 | .set_page_dirty = f2fs_set_meta_page_dirty, | ||
174 | }; | ||
175 | |||
176 | int check_orphan_space(struct f2fs_sb_info *sbi) | ||
177 | { | ||
178 | unsigned int max_orphans; | ||
179 | int err = 0; | ||
180 | |||
181 | /* | ||
182 | * considering 512 blocks in a segment 5 blocks are needed for cp | ||
183 | * and log segment summaries. Remaining blocks are used to keep | ||
184 | * orphan entries with the limitation one reserved segment | ||
185 | * for cp pack we can have max 1020*507 orphan entries | ||
186 | */ | ||
187 | max_orphans = (sbi->blocks_per_seg - 5) * F2FS_ORPHANS_PER_BLOCK; | ||
188 | mutex_lock(&sbi->orphan_inode_mutex); | ||
189 | if (sbi->n_orphans >= max_orphans) | ||
190 | err = -ENOSPC; | ||
191 | mutex_unlock(&sbi->orphan_inode_mutex); | ||
192 | return err; | ||
193 | } | ||
194 | |||
195 | void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) | ||
196 | { | ||
197 | struct list_head *head, *this; | ||
198 | struct orphan_inode_entry *new = NULL, *orphan = NULL; | ||
199 | |||
200 | mutex_lock(&sbi->orphan_inode_mutex); | ||
201 | head = &sbi->orphan_inode_list; | ||
202 | list_for_each(this, head) { | ||
203 | orphan = list_entry(this, struct orphan_inode_entry, list); | ||
204 | if (orphan->ino == ino) | ||
205 | goto out; | ||
206 | if (orphan->ino > ino) | ||
207 | break; | ||
208 | orphan = NULL; | ||
209 | } | ||
210 | retry: | ||
211 | new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC); | ||
212 | if (!new) { | ||
213 | cond_resched(); | ||
214 | goto retry; | ||
215 | } | ||
216 | new->ino = ino; | ||
217 | |||
218 | /* add new_oentry into list which is sorted by inode number */ | ||
219 | if (orphan) { | ||
220 | struct orphan_inode_entry *prev; | ||
221 | |||
222 | /* get previous entry */ | ||
223 | prev = list_entry(orphan->list.prev, typeof(*prev), list); | ||
224 | if (&prev->list != head) | ||
225 | /* insert new orphan inode entry */ | ||
226 | list_add(&new->list, &prev->list); | ||
227 | else | ||
228 | list_add(&new->list, head); | ||
229 | } else { | ||
230 | list_add_tail(&new->list, head); | ||
231 | } | ||
232 | sbi->n_orphans++; | ||
233 | out: | ||
234 | mutex_unlock(&sbi->orphan_inode_mutex); | ||
235 | } | ||
236 | |||
237 | void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) | ||
238 | { | ||
239 | struct list_head *this, *next, *head; | ||
240 | struct orphan_inode_entry *orphan; | ||
241 | |||
242 | mutex_lock(&sbi->orphan_inode_mutex); | ||
243 | head = &sbi->orphan_inode_list; | ||
244 | list_for_each_safe(this, next, head) { | ||
245 | orphan = list_entry(this, struct orphan_inode_entry, list); | ||
246 | if (orphan->ino == ino) { | ||
247 | list_del(&orphan->list); | ||
248 | kmem_cache_free(orphan_entry_slab, orphan); | ||
249 | sbi->n_orphans--; | ||
250 | break; | ||
251 | } | ||
252 | } | ||
253 | mutex_unlock(&sbi->orphan_inode_mutex); | ||
254 | } | ||
255 | |||
256 | static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) | ||
257 | { | ||
258 | struct inode *inode = f2fs_iget(sbi->sb, ino); | ||
259 | BUG_ON(IS_ERR(inode)); | ||
260 | clear_nlink(inode); | ||
261 | |||
262 | /* truncate all the data during iput */ | ||
263 | iput(inode); | ||
264 | } | ||
265 | |||
266 | int recover_orphan_inodes(struct f2fs_sb_info *sbi) | ||
267 | { | ||
268 | block_t start_blk, orphan_blkaddr, i, j; | ||
269 | |||
270 | if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) | ||
271 | return 0; | ||
272 | |||
273 | sbi->por_doing = 1; | ||
274 | start_blk = __start_cp_addr(sbi) + 1; | ||
275 | orphan_blkaddr = __start_sum_addr(sbi) - 1; | ||
276 | |||
277 | for (i = 0; i < orphan_blkaddr; i++) { | ||
278 | struct page *page = get_meta_page(sbi, start_blk + i); | ||
279 | struct f2fs_orphan_block *orphan_blk; | ||
280 | |||
281 | orphan_blk = (struct f2fs_orphan_block *)page_address(page); | ||
282 | for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { | ||
283 | nid_t ino = le32_to_cpu(orphan_blk->ino[j]); | ||
284 | recover_orphan_inode(sbi, ino); | ||
285 | } | ||
286 | f2fs_put_page(page, 1); | ||
287 | } | ||
288 | /* clear Orphan Flag */ | ||
289 | clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); | ||
290 | sbi->por_doing = 0; | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) | ||
295 | { | ||
296 | struct list_head *head, *this, *next; | ||
297 | struct f2fs_orphan_block *orphan_blk = NULL; | ||
298 | struct page *page = NULL; | ||
299 | unsigned int nentries = 0; | ||
300 | unsigned short index = 1; | ||
301 | unsigned short orphan_blocks; | ||
302 | |||
303 | orphan_blocks = (unsigned short)((sbi->n_orphans + | ||
304 | (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK); | ||
305 | |||
306 | mutex_lock(&sbi->orphan_inode_mutex); | ||
307 | head = &sbi->orphan_inode_list; | ||
308 | |||
309 | /* loop for each orphan inode entry and write them in Jornal block */ | ||
310 | list_for_each_safe(this, next, head) { | ||
311 | struct orphan_inode_entry *orphan; | ||
312 | |||
313 | orphan = list_entry(this, struct orphan_inode_entry, list); | ||
314 | |||
315 | if (nentries == F2FS_ORPHANS_PER_BLOCK) { | ||
316 | /* | ||
317 | * an orphan block is full of 1020 entries, | ||
318 | * then we need to flush current orphan blocks | ||
319 | * and bring another one in memory | ||
320 | */ | ||
321 | orphan_blk->blk_addr = cpu_to_le16(index); | ||
322 | orphan_blk->blk_count = cpu_to_le16(orphan_blocks); | ||
323 | orphan_blk->entry_count = cpu_to_le32(nentries); | ||
324 | set_page_dirty(page); | ||
325 | f2fs_put_page(page, 1); | ||
326 | index++; | ||
327 | start_blk++; | ||
328 | nentries = 0; | ||
329 | page = NULL; | ||
330 | } | ||
331 | if (page) | ||
332 | goto page_exist; | ||
333 | |||
334 | page = grab_meta_page(sbi, start_blk); | ||
335 | orphan_blk = (struct f2fs_orphan_block *)page_address(page); | ||
336 | memset(orphan_blk, 0, sizeof(*orphan_blk)); | ||
337 | page_exist: | ||
338 | orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); | ||
339 | } | ||
340 | if (!page) | ||
341 | goto end; | ||
342 | |||
343 | orphan_blk->blk_addr = cpu_to_le16(index); | ||
344 | orphan_blk->blk_count = cpu_to_le16(orphan_blocks); | ||
345 | orphan_blk->entry_count = cpu_to_le32(nentries); | ||
346 | set_page_dirty(page); | ||
347 | f2fs_put_page(page, 1); | ||
348 | end: | ||
349 | mutex_unlock(&sbi->orphan_inode_mutex); | ||
350 | } | ||
351 | |||
352 | static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, | ||
353 | block_t cp_addr, unsigned long long *version) | ||
354 | { | ||
355 | struct page *cp_page_1, *cp_page_2 = NULL; | ||
356 | unsigned long blk_size = sbi->blocksize; | ||
357 | struct f2fs_checkpoint *cp_block; | ||
358 | unsigned long long cur_version = 0, pre_version = 0; | ||
359 | unsigned int crc = 0; | ||
360 | size_t crc_offset; | ||
361 | |||
362 | /* Read the 1st cp block in this CP pack */ | ||
363 | cp_page_1 = get_meta_page(sbi, cp_addr); | ||
364 | |||
365 | /* get the version number */ | ||
366 | cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1); | ||
367 | crc_offset = le32_to_cpu(cp_block->checksum_offset); | ||
368 | if (crc_offset >= blk_size) | ||
369 | goto invalid_cp1; | ||
370 | |||
371 | crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); | ||
372 | if (!f2fs_crc_valid(crc, cp_block, crc_offset)) | ||
373 | goto invalid_cp1; | ||
374 | |||
375 | pre_version = le64_to_cpu(cp_block->checkpoint_ver); | ||
376 | |||
377 | /* Read the 2nd cp block in this CP pack */ | ||
378 | cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; | ||
379 | cp_page_2 = get_meta_page(sbi, cp_addr); | ||
380 | |||
381 | cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2); | ||
382 | crc_offset = le32_to_cpu(cp_block->checksum_offset); | ||
383 | if (crc_offset >= blk_size) | ||
384 | goto invalid_cp2; | ||
385 | |||
386 | crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); | ||
387 | if (!f2fs_crc_valid(crc, cp_block, crc_offset)) | ||
388 | goto invalid_cp2; | ||
389 | |||
390 | cur_version = le64_to_cpu(cp_block->checkpoint_ver); | ||
391 | |||
392 | if (cur_version == pre_version) { | ||
393 | *version = cur_version; | ||
394 | f2fs_put_page(cp_page_2, 1); | ||
395 | return cp_page_1; | ||
396 | } | ||
397 | invalid_cp2: | ||
398 | f2fs_put_page(cp_page_2, 1); | ||
399 | invalid_cp1: | ||
400 | f2fs_put_page(cp_page_1, 1); | ||
401 | return NULL; | ||
402 | } | ||
403 | |||
404 | int get_valid_checkpoint(struct f2fs_sb_info *sbi) | ||
405 | { | ||
406 | struct f2fs_checkpoint *cp_block; | ||
407 | struct f2fs_super_block *fsb = sbi->raw_super; | ||
408 | struct page *cp1, *cp2, *cur_page; | ||
409 | unsigned long blk_size = sbi->blocksize; | ||
410 | unsigned long long cp1_version = 0, cp2_version = 0; | ||
411 | unsigned long long cp_start_blk_no; | ||
412 | |||
413 | sbi->ckpt = kzalloc(blk_size, GFP_KERNEL); | ||
414 | if (!sbi->ckpt) | ||
415 | return -ENOMEM; | ||
416 | /* | ||
417 | * Finding out valid cp block involves read both | ||
418 | * sets( cp pack1 and cp pack 2) | ||
419 | */ | ||
420 | cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); | ||
421 | cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); | ||
422 | |||
423 | /* The second checkpoint pack should start at the next segment */ | ||
424 | cp_start_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg); | ||
425 | cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); | ||
426 | |||
427 | if (cp1 && cp2) { | ||
428 | if (ver_after(cp2_version, cp1_version)) | ||
429 | cur_page = cp2; | ||
430 | else | ||
431 | cur_page = cp1; | ||
432 | } else if (cp1) { | ||
433 | cur_page = cp1; | ||
434 | } else if (cp2) { | ||
435 | cur_page = cp2; | ||
436 | } else { | ||
437 | goto fail_no_cp; | ||
438 | } | ||
439 | |||
440 | cp_block = (struct f2fs_checkpoint *)page_address(cur_page); | ||
441 | memcpy(sbi->ckpt, cp_block, blk_size); | ||
442 | |||
443 | f2fs_put_page(cp1, 1); | ||
444 | f2fs_put_page(cp2, 1); | ||
445 | return 0; | ||
446 | |||
447 | fail_no_cp: | ||
448 | kfree(sbi->ckpt); | ||
449 | return -EINVAL; | ||
450 | } | ||
451 | |||
452 | void set_dirty_dir_page(struct inode *inode, struct page *page) | ||
453 | { | ||
454 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
455 | struct list_head *head = &sbi->dir_inode_list; | ||
456 | struct dir_inode_entry *new; | ||
457 | struct list_head *this; | ||
458 | |||
459 | if (!S_ISDIR(inode->i_mode)) | ||
460 | return; | ||
461 | retry: | ||
462 | new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS); | ||
463 | if (!new) { | ||
464 | cond_resched(); | ||
465 | goto retry; | ||
466 | } | ||
467 | new->inode = inode; | ||
468 | INIT_LIST_HEAD(&new->list); | ||
469 | |||
470 | spin_lock(&sbi->dir_inode_lock); | ||
471 | list_for_each(this, head) { | ||
472 | struct dir_inode_entry *entry; | ||
473 | entry = list_entry(this, struct dir_inode_entry, list); | ||
474 | if (entry->inode == inode) { | ||
475 | kmem_cache_free(inode_entry_slab, new); | ||
476 | goto out; | ||
477 | } | ||
478 | } | ||
479 | list_add_tail(&new->list, head); | ||
480 | sbi->n_dirty_dirs++; | ||
481 | |||
482 | BUG_ON(!S_ISDIR(inode->i_mode)); | ||
483 | out: | ||
484 | inc_page_count(sbi, F2FS_DIRTY_DENTS); | ||
485 | inode_inc_dirty_dents(inode); | ||
486 | SetPagePrivate(page); | ||
487 | |||
488 | spin_unlock(&sbi->dir_inode_lock); | ||
489 | } | ||
490 | |||
491 | void remove_dirty_dir_inode(struct inode *inode) | ||
492 | { | ||
493 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
494 | struct list_head *head = &sbi->dir_inode_list; | ||
495 | struct list_head *this; | ||
496 | |||
497 | if (!S_ISDIR(inode->i_mode)) | ||
498 | return; | ||
499 | |||
500 | spin_lock(&sbi->dir_inode_lock); | ||
501 | if (atomic_read(&F2FS_I(inode)->dirty_dents)) | ||
502 | goto out; | ||
503 | |||
504 | list_for_each(this, head) { | ||
505 | struct dir_inode_entry *entry; | ||
506 | entry = list_entry(this, struct dir_inode_entry, list); | ||
507 | if (entry->inode == inode) { | ||
508 | list_del(&entry->list); | ||
509 | kmem_cache_free(inode_entry_slab, entry); | ||
510 | sbi->n_dirty_dirs--; | ||
511 | break; | ||
512 | } | ||
513 | } | ||
514 | out: | ||
515 | spin_unlock(&sbi->dir_inode_lock); | ||
516 | } | ||
517 | |||
518 | void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) | ||
519 | { | ||
520 | struct list_head *head = &sbi->dir_inode_list; | ||
521 | struct dir_inode_entry *entry; | ||
522 | struct inode *inode; | ||
523 | retry: | ||
524 | spin_lock(&sbi->dir_inode_lock); | ||
525 | if (list_empty(head)) { | ||
526 | spin_unlock(&sbi->dir_inode_lock); | ||
527 | return; | ||
528 | } | ||
529 | entry = list_entry(head->next, struct dir_inode_entry, list); | ||
530 | inode = igrab(entry->inode); | ||
531 | spin_unlock(&sbi->dir_inode_lock); | ||
532 | if (inode) { | ||
533 | filemap_flush(inode->i_mapping); | ||
534 | iput(inode); | ||
535 | } else { | ||
536 | /* | ||
537 | * We should submit bio, since it exists several | ||
538 | * wribacking dentry pages in the freeing inode. | ||
539 | */ | ||
540 | f2fs_submit_bio(sbi, DATA, true); | ||
541 | } | ||
542 | goto retry; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Freeze all the FS-operations for checkpoint. | ||
547 | */ | ||
548 | void block_operations(struct f2fs_sb_info *sbi) | ||
549 | { | ||
550 | int t; | ||
551 | struct writeback_control wbc = { | ||
552 | .sync_mode = WB_SYNC_ALL, | ||
553 | .nr_to_write = LONG_MAX, | ||
554 | .for_reclaim = 0, | ||
555 | }; | ||
556 | |||
557 | /* Stop renaming operation */ | ||
558 | mutex_lock_op(sbi, RENAME); | ||
559 | mutex_lock_op(sbi, DENTRY_OPS); | ||
560 | |||
561 | retry_dents: | ||
562 | /* write all the dirty dentry pages */ | ||
563 | sync_dirty_dir_inodes(sbi); | ||
564 | |||
565 | mutex_lock_op(sbi, DATA_WRITE); | ||
566 | if (get_pages(sbi, F2FS_DIRTY_DENTS)) { | ||
567 | mutex_unlock_op(sbi, DATA_WRITE); | ||
568 | goto retry_dents; | ||
569 | } | ||
570 | |||
571 | /* block all the operations */ | ||
572 | for (t = DATA_NEW; t <= NODE_TRUNC; t++) | ||
573 | mutex_lock_op(sbi, t); | ||
574 | |||
575 | mutex_lock(&sbi->write_inode); | ||
576 | |||
577 | /* | ||
578 | * POR: we should ensure that there is no dirty node pages | ||
579 | * until finishing nat/sit flush. | ||
580 | */ | ||
581 | retry: | ||
582 | sync_node_pages(sbi, 0, &wbc); | ||
583 | |||
584 | mutex_lock_op(sbi, NODE_WRITE); | ||
585 | |||
586 | if (get_pages(sbi, F2FS_DIRTY_NODES)) { | ||
587 | mutex_unlock_op(sbi, NODE_WRITE); | ||
588 | goto retry; | ||
589 | } | ||
590 | mutex_unlock(&sbi->write_inode); | ||
591 | } | ||
592 | |||
593 | static void unblock_operations(struct f2fs_sb_info *sbi) | ||
594 | { | ||
595 | int t; | ||
596 | for (t = NODE_WRITE; t >= RENAME; t--) | ||
597 | mutex_unlock_op(sbi, t); | ||
598 | } | ||
599 | |||
600 | static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | ||
601 | { | ||
602 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
603 | nid_t last_nid = 0; | ||
604 | block_t start_blk; | ||
605 | struct page *cp_page; | ||
606 | unsigned int data_sum_blocks, orphan_blocks; | ||
607 | unsigned int crc32 = 0; | ||
608 | void *kaddr; | ||
609 | int i; | ||
610 | |||
611 | /* Flush all the NAT/SIT pages */ | ||
612 | while (get_pages(sbi, F2FS_DIRTY_META)) | ||
613 | sync_meta_pages(sbi, META, LONG_MAX); | ||
614 | |||
615 | next_free_nid(sbi, &last_nid); | ||
616 | |||
617 | /* | ||
618 | * modify checkpoint | ||
619 | * version number is already updated | ||
620 | */ | ||
621 | ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); | ||
622 | ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); | ||
623 | ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); | ||
624 | for (i = 0; i < 3; i++) { | ||
625 | ckpt->cur_node_segno[i] = | ||
626 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); | ||
627 | ckpt->cur_node_blkoff[i] = | ||
628 | cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); | ||
629 | ckpt->alloc_type[i + CURSEG_HOT_NODE] = | ||
630 | curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); | ||
631 | } | ||
632 | for (i = 0; i < 3; i++) { | ||
633 | ckpt->cur_data_segno[i] = | ||
634 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); | ||
635 | ckpt->cur_data_blkoff[i] = | ||
636 | cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); | ||
637 | ckpt->alloc_type[i + CURSEG_HOT_DATA] = | ||
638 | curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); | ||
639 | } | ||
640 | |||
641 | ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); | ||
642 | ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); | ||
643 | ckpt->next_free_nid = cpu_to_le32(last_nid); | ||
644 | |||
645 | /* 2 cp + n data seg summary + orphan inode blocks */ | ||
646 | data_sum_blocks = npages_for_summary_flush(sbi); | ||
647 | if (data_sum_blocks < 3) | ||
648 | set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); | ||
649 | else | ||
650 | clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); | ||
651 | |||
652 | orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) | ||
653 | / F2FS_ORPHANS_PER_BLOCK; | ||
654 | ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks); | ||
655 | |||
656 | if (is_umount) { | ||
657 | set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); | ||
658 | ckpt->cp_pack_total_block_count = cpu_to_le32(2 + | ||
659 | data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE); | ||
660 | } else { | ||
661 | clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); | ||
662 | ckpt->cp_pack_total_block_count = cpu_to_le32(2 + | ||
663 | data_sum_blocks + orphan_blocks); | ||
664 | } | ||
665 | |||
666 | if (sbi->n_orphans) | ||
667 | set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); | ||
668 | else | ||
669 | clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); | ||
670 | |||
671 | /* update SIT/NAT bitmap */ | ||
672 | get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); | ||
673 | get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); | ||
674 | |||
675 | crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); | ||
676 | *(__le32 *)((unsigned char *)ckpt + | ||
677 | le32_to_cpu(ckpt->checksum_offset)) | ||
678 | = cpu_to_le32(crc32); | ||
679 | |||
680 | start_blk = __start_cp_addr(sbi); | ||
681 | |||
682 | /* write out checkpoint buffer at block 0 */ | ||
683 | cp_page = grab_meta_page(sbi, start_blk++); | ||
684 | kaddr = page_address(cp_page); | ||
685 | memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); | ||
686 | set_page_dirty(cp_page); | ||
687 | f2fs_put_page(cp_page, 1); | ||
688 | |||
689 | if (sbi->n_orphans) { | ||
690 | write_orphan_inodes(sbi, start_blk); | ||
691 | start_blk += orphan_blocks; | ||
692 | } | ||
693 | |||
694 | write_data_summaries(sbi, start_blk); | ||
695 | start_blk += data_sum_blocks; | ||
696 | if (is_umount) { | ||
697 | write_node_summaries(sbi, start_blk); | ||
698 | start_blk += NR_CURSEG_NODE_TYPE; | ||
699 | } | ||
700 | |||
701 | /* writeout checkpoint block */ | ||
702 | cp_page = grab_meta_page(sbi, start_blk); | ||
703 | kaddr = page_address(cp_page); | ||
704 | memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); | ||
705 | set_page_dirty(cp_page); | ||
706 | f2fs_put_page(cp_page, 1); | ||
707 | |||
708 | /* wait for previous submitted node/meta pages writeback */ | ||
709 | while (get_pages(sbi, F2FS_WRITEBACK)) | ||
710 | congestion_wait(BLK_RW_ASYNC, HZ / 50); | ||
711 | |||
712 | filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX); | ||
713 | filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX); | ||
714 | |||
715 | /* update user_block_counts */ | ||
716 | sbi->last_valid_block_count = sbi->total_valid_block_count; | ||
717 | sbi->alloc_valid_block_count = 0; | ||
718 | |||
719 | /* Here, we only have one bio having CP pack */ | ||
720 | if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) | ||
721 | sbi->sb->s_flags |= MS_RDONLY; | ||
722 | else | ||
723 | sync_meta_pages(sbi, META_FLUSH, LONG_MAX); | ||
724 | |||
725 | clear_prefree_segments(sbi); | ||
726 | F2FS_RESET_SB_DIRT(sbi); | ||
727 | } | ||
728 | |||
729 | /* | ||
730 | * We guarantee that this checkpoint procedure should not fail. | ||
731 | */ | ||
732 | void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount) | ||
733 | { | ||
734 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
735 | unsigned long long ckpt_ver; | ||
736 | |||
737 | if (!blocked) { | ||
738 | mutex_lock(&sbi->cp_mutex); | ||
739 | block_operations(sbi); | ||
740 | } | ||
741 | |||
742 | f2fs_submit_bio(sbi, DATA, true); | ||
743 | f2fs_submit_bio(sbi, NODE, true); | ||
744 | f2fs_submit_bio(sbi, META, true); | ||
745 | |||
746 | /* | ||
747 | * update checkpoint pack index | ||
748 | * Increase the version number so that | ||
749 | * SIT entries and seg summaries are written at correct place | ||
750 | */ | ||
751 | ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver); | ||
752 | ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); | ||
753 | |||
754 | /* write cached NAT/SIT entries to NAT/SIT area */ | ||
755 | flush_nat_entries(sbi); | ||
756 | flush_sit_entries(sbi); | ||
757 | |||
758 | reset_victim_segmap(sbi); | ||
759 | |||
760 | /* unlock all the fs_lock[] in do_checkpoint() */ | ||
761 | do_checkpoint(sbi, is_umount); | ||
762 | |||
763 | unblock_operations(sbi); | ||
764 | mutex_unlock(&sbi->cp_mutex); | ||
765 | } | ||
766 | |||
767 | void init_orphan_info(struct f2fs_sb_info *sbi) | ||
768 | { | ||
769 | mutex_init(&sbi->orphan_inode_mutex); | ||
770 | INIT_LIST_HEAD(&sbi->orphan_inode_list); | ||
771 | sbi->n_orphans = 0; | ||
772 | } | ||
773 | |||
774 | int __init create_checkpoint_caches(void) | ||
775 | { | ||
776 | orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", | ||
777 | sizeof(struct orphan_inode_entry), NULL); | ||
778 | if (unlikely(!orphan_entry_slab)) | ||
779 | return -ENOMEM; | ||
780 | inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry", | ||
781 | sizeof(struct dir_inode_entry), NULL); | ||
782 | if (unlikely(!inode_entry_slab)) { | ||
783 | kmem_cache_destroy(orphan_entry_slab); | ||
784 | return -ENOMEM; | ||
785 | } | ||
786 | return 0; | ||
787 | } | ||
788 | |||
789 | void destroy_checkpoint_caches(void) | ||
790 | { | ||
791 | kmem_cache_destroy(orphan_entry_slab); | ||
792 | kmem_cache_destroy(inode_entry_slab); | ||
793 | } | ||