diff options
Diffstat (limited to 'fs/f2fs/checkpoint.c')
-rw-r--r-- | fs/f2fs/checkpoint.c | 792 |
1 files changed, 792 insertions, 0 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c new file mode 100644 index 000000000000..ab743f92ee06 --- /dev/null +++ b/fs/f2fs/checkpoint.c | |||
@@ -0,0 +1,792 @@ | |||
1 | /** | ||
2 | * fs/f2fs/checkpoint.c | ||
3 | * | ||
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com/ | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/bio.h> | ||
13 | #include <linux/mpage.h> | ||
14 | #include <linux/writeback.h> | ||
15 | #include <linux/blkdev.h> | ||
16 | #include <linux/f2fs_fs.h> | ||
17 | #include <linux/pagevec.h> | ||
18 | #include <linux/swap.h> | ||
19 | |||
20 | #include "f2fs.h" | ||
21 | #include "node.h" | ||
22 | #include "segment.h" | ||
23 | |||
24 | static struct kmem_cache *orphan_entry_slab; | ||
25 | static struct kmem_cache *inode_entry_slab; | ||
26 | |||
27 | /** | ||
28 | * We guarantee no failure on the returned page. | ||
29 | */ | ||
30 | struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) | ||
31 | { | ||
32 | struct address_space *mapping = sbi->meta_inode->i_mapping; | ||
33 | struct page *page = NULL; | ||
34 | repeat: | ||
35 | page = grab_cache_page(mapping, index); | ||
36 | if (!page) { | ||
37 | cond_resched(); | ||
38 | goto repeat; | ||
39 | } | ||
40 | |||
41 | /* We wait writeback only inside grab_meta_page() */ | ||
42 | wait_on_page_writeback(page); | ||
43 | SetPageUptodate(page); | ||
44 | return page; | ||
45 | } | ||
46 | |||
47 | /** | ||
48 | * We guarantee no failure on the returned page. | ||
49 | */ | ||
50 | struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) | ||
51 | { | ||
52 | struct address_space *mapping = sbi->meta_inode->i_mapping; | ||
53 | struct page *page; | ||
54 | repeat: | ||
55 | page = grab_cache_page(mapping, index); | ||
56 | if (!page) { | ||
57 | cond_resched(); | ||
58 | goto repeat; | ||
59 | } | ||
60 | if (f2fs_readpage(sbi, page, index, READ_SYNC)) { | ||
61 | f2fs_put_page(page, 1); | ||
62 | goto repeat; | ||
63 | } | ||
64 | mark_page_accessed(page); | ||
65 | |||
66 | /* We do not allow returning an errorneous page */ | ||
67 | return page; | ||
68 | } | ||
69 | |||
70 | static int f2fs_write_meta_page(struct page *page, | ||
71 | struct writeback_control *wbc) | ||
72 | { | ||
73 | struct inode *inode = page->mapping->host; | ||
74 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
75 | int err; | ||
76 | |||
77 | wait_on_page_writeback(page); | ||
78 | |||
79 | err = write_meta_page(sbi, page, wbc); | ||
80 | if (err) { | ||
81 | wbc->pages_skipped++; | ||
82 | set_page_dirty(page); | ||
83 | } | ||
84 | |||
85 | dec_page_count(sbi, F2FS_DIRTY_META); | ||
86 | |||
87 | /* In this case, we should not unlock this page */ | ||
88 | if (err != AOP_WRITEPAGE_ACTIVATE) | ||
89 | unlock_page(page); | ||
90 | return err; | ||
91 | } | ||
92 | |||
93 | static int f2fs_write_meta_pages(struct address_space *mapping, | ||
94 | struct writeback_control *wbc) | ||
95 | { | ||
96 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); | ||
97 | struct block_device *bdev = sbi->sb->s_bdev; | ||
98 | long written; | ||
99 | |||
100 | if (wbc->for_kupdate) | ||
101 | return 0; | ||
102 | |||
103 | if (get_pages(sbi, F2FS_DIRTY_META) == 0) | ||
104 | return 0; | ||
105 | |||
106 | /* if mounting is failed, skip writing node pages */ | ||
107 | mutex_lock(&sbi->cp_mutex); | ||
108 | written = sync_meta_pages(sbi, META, bio_get_nr_vecs(bdev)); | ||
109 | mutex_unlock(&sbi->cp_mutex); | ||
110 | wbc->nr_to_write -= written; | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, | ||
115 | long nr_to_write) | ||
116 | { | ||
117 | struct address_space *mapping = sbi->meta_inode->i_mapping; | ||
118 | pgoff_t index = 0, end = LONG_MAX; | ||
119 | struct pagevec pvec; | ||
120 | long nwritten = 0; | ||
121 | struct writeback_control wbc = { | ||
122 | .for_reclaim = 0, | ||
123 | }; | ||
124 | |||
125 | pagevec_init(&pvec, 0); | ||
126 | |||
127 | while (index <= end) { | ||
128 | int i, nr_pages; | ||
129 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | ||
130 | PAGECACHE_TAG_DIRTY, | ||
131 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | ||
132 | if (nr_pages == 0) | ||
133 | break; | ||
134 | |||
135 | for (i = 0; i < nr_pages; i++) { | ||
136 | struct page *page = pvec.pages[i]; | ||
137 | lock_page(page); | ||
138 | BUG_ON(page->mapping != mapping); | ||
139 | BUG_ON(!PageDirty(page)); | ||
140 | clear_page_dirty_for_io(page); | ||
141 | f2fs_write_meta_page(page, &wbc); | ||
142 | if (nwritten++ >= nr_to_write) | ||
143 | break; | ||
144 | } | ||
145 | pagevec_release(&pvec); | ||
146 | cond_resched(); | ||
147 | } | ||
148 | |||
149 | if (nwritten) | ||
150 | f2fs_submit_bio(sbi, type, nr_to_write == LONG_MAX); | ||
151 | |||
152 | return nwritten; | ||
153 | } | ||
154 | |||
155 | static int f2fs_set_meta_page_dirty(struct page *page) | ||
156 | { | ||
157 | struct address_space *mapping = page->mapping; | ||
158 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); | ||
159 | |||
160 | SetPageUptodate(page); | ||
161 | if (!PageDirty(page)) { | ||
162 | __set_page_dirty_nobuffers(page); | ||
163 | inc_page_count(sbi, F2FS_DIRTY_META); | ||
164 | F2FS_SET_SB_DIRT(sbi); | ||
165 | return 1; | ||
166 | } | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | const struct address_space_operations f2fs_meta_aops = { | ||
171 | .writepage = f2fs_write_meta_page, | ||
172 | .writepages = f2fs_write_meta_pages, | ||
173 | .set_page_dirty = f2fs_set_meta_page_dirty, | ||
174 | }; | ||
175 | |||
176 | int check_orphan_space(struct f2fs_sb_info *sbi) | ||
177 | { | ||
178 | unsigned int max_orphans; | ||
179 | int err = 0; | ||
180 | |||
181 | /* | ||
182 | * considering 512 blocks in a segment 5 blocks are needed for cp | ||
183 | * and log segment summaries. Remaining blocks are used to keep | ||
184 | * orphan entries with the limitation one reserved segment | ||
185 | * for cp pack we can have max 1020*507 orphan entries | ||
186 | */ | ||
187 | max_orphans = (sbi->blocks_per_seg - 5) * F2FS_ORPHANS_PER_BLOCK; | ||
188 | mutex_lock(&sbi->orphan_inode_mutex); | ||
189 | if (sbi->n_orphans >= max_orphans) | ||
190 | err = -ENOSPC; | ||
191 | mutex_unlock(&sbi->orphan_inode_mutex); | ||
192 | return err; | ||
193 | } | ||
194 | |||
195 | void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) | ||
196 | { | ||
197 | struct list_head *head, *this; | ||
198 | struct orphan_inode_entry *new = NULL, *orphan = NULL; | ||
199 | |||
200 | mutex_lock(&sbi->orphan_inode_mutex); | ||
201 | head = &sbi->orphan_inode_list; | ||
202 | list_for_each(this, head) { | ||
203 | orphan = list_entry(this, struct orphan_inode_entry, list); | ||
204 | if (orphan->ino == ino) | ||
205 | goto out; | ||
206 | if (orphan->ino > ino) | ||
207 | break; | ||
208 | orphan = NULL; | ||
209 | } | ||
210 | retry: | ||
211 | new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC); | ||
212 | if (!new) { | ||
213 | cond_resched(); | ||
214 | goto retry; | ||
215 | } | ||
216 | new->ino = ino; | ||
217 | INIT_LIST_HEAD(&new->list); | ||
218 | |||
219 | /* add new_oentry into list which is sorted by inode number */ | ||
220 | if (orphan) { | ||
221 | struct orphan_inode_entry *prev; | ||
222 | |||
223 | /* get previous entry */ | ||
224 | prev = list_entry(orphan->list.prev, typeof(*prev), list); | ||
225 | if (&prev->list != head) | ||
226 | /* insert new orphan inode entry */ | ||
227 | list_add(&new->list, &prev->list); | ||
228 | else | ||
229 | list_add(&new->list, head); | ||
230 | } else { | ||
231 | list_add_tail(&new->list, head); | ||
232 | } | ||
233 | sbi->n_orphans++; | ||
234 | out: | ||
235 | mutex_unlock(&sbi->orphan_inode_mutex); | ||
236 | } | ||
237 | |||
238 | void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) | ||
239 | { | ||
240 | struct list_head *this, *next, *head; | ||
241 | struct orphan_inode_entry *orphan; | ||
242 | |||
243 | mutex_lock(&sbi->orphan_inode_mutex); | ||
244 | head = &sbi->orphan_inode_list; | ||
245 | list_for_each_safe(this, next, head) { | ||
246 | orphan = list_entry(this, struct orphan_inode_entry, list); | ||
247 | if (orphan->ino == ino) { | ||
248 | list_del(&orphan->list); | ||
249 | kmem_cache_free(orphan_entry_slab, orphan); | ||
250 | sbi->n_orphans--; | ||
251 | break; | ||
252 | } | ||
253 | } | ||
254 | mutex_unlock(&sbi->orphan_inode_mutex); | ||
255 | } | ||
256 | |||
257 | static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) | ||
258 | { | ||
259 | struct inode *inode = f2fs_iget(sbi->sb, ino); | ||
260 | BUG_ON(IS_ERR(inode)); | ||
261 | clear_nlink(inode); | ||
262 | |||
263 | /* truncate all the data during iput */ | ||
264 | iput(inode); | ||
265 | } | ||
266 | |||
267 | int recover_orphan_inodes(struct f2fs_sb_info *sbi) | ||
268 | { | ||
269 | block_t start_blk, orphan_blkaddr, i, j; | ||
270 | |||
271 | if (!(F2FS_CKPT(sbi)->ckpt_flags & CP_ORPHAN_PRESENT_FLAG)) | ||
272 | return 0; | ||
273 | |||
274 | sbi->por_doing = 1; | ||
275 | start_blk = __start_cp_addr(sbi) + 1; | ||
276 | orphan_blkaddr = __start_sum_addr(sbi) - 1; | ||
277 | |||
278 | for (i = 0; i < orphan_blkaddr; i++) { | ||
279 | struct page *page = get_meta_page(sbi, start_blk + i); | ||
280 | struct f2fs_orphan_block *orphan_blk; | ||
281 | |||
282 | orphan_blk = (struct f2fs_orphan_block *)page_address(page); | ||
283 | for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { | ||
284 | nid_t ino = le32_to_cpu(orphan_blk->ino[j]); | ||
285 | recover_orphan_inode(sbi, ino); | ||
286 | } | ||
287 | f2fs_put_page(page, 1); | ||
288 | } | ||
289 | /* clear Orphan Flag */ | ||
290 | F2FS_CKPT(sbi)->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG); | ||
291 | sbi->por_doing = 0; | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) | ||
296 | { | ||
297 | struct list_head *head, *this, *next; | ||
298 | struct f2fs_orphan_block *orphan_blk = NULL; | ||
299 | struct page *page = NULL; | ||
300 | unsigned int nentries = 0; | ||
301 | unsigned short index = 1; | ||
302 | unsigned short orphan_blocks; | ||
303 | |||
304 | orphan_blocks = (unsigned short)((sbi->n_orphans + | ||
305 | (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK); | ||
306 | |||
307 | mutex_lock(&sbi->orphan_inode_mutex); | ||
308 | head = &sbi->orphan_inode_list; | ||
309 | |||
310 | /* loop for each orphan inode entry and write them in Jornal block */ | ||
311 | list_for_each_safe(this, next, head) { | ||
312 | struct orphan_inode_entry *orphan; | ||
313 | |||
314 | orphan = list_entry(this, struct orphan_inode_entry, list); | ||
315 | |||
316 | if (nentries == F2FS_ORPHANS_PER_BLOCK) { | ||
317 | /* | ||
318 | * an orphan block is full of 1020 entries, | ||
319 | * then we need to flush current orphan blocks | ||
320 | * and bring another one in memory | ||
321 | */ | ||
322 | orphan_blk->blk_addr = cpu_to_le16(index); | ||
323 | orphan_blk->blk_count = cpu_to_le16(orphan_blocks); | ||
324 | orphan_blk->entry_count = cpu_to_le32(nentries); | ||
325 | set_page_dirty(page); | ||
326 | f2fs_put_page(page, 1); | ||
327 | index++; | ||
328 | start_blk++; | ||
329 | nentries = 0; | ||
330 | page = NULL; | ||
331 | } | ||
332 | if (page) | ||
333 | goto page_exist; | ||
334 | |||
335 | page = grab_meta_page(sbi, start_blk); | ||
336 | orphan_blk = (struct f2fs_orphan_block *)page_address(page); | ||
337 | memset(orphan_blk, 0, sizeof(*orphan_blk)); | ||
338 | page_exist: | ||
339 | orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); | ||
340 | } | ||
341 | if (!page) | ||
342 | goto end; | ||
343 | |||
344 | orphan_blk->blk_addr = cpu_to_le16(index); | ||
345 | orphan_blk->blk_count = cpu_to_le16(orphan_blocks); | ||
346 | orphan_blk->entry_count = cpu_to_le32(nentries); | ||
347 | set_page_dirty(page); | ||
348 | f2fs_put_page(page, 1); | ||
349 | end: | ||
350 | mutex_unlock(&sbi->orphan_inode_mutex); | ||
351 | } | ||
352 | |||
353 | static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, | ||
354 | block_t cp_addr, unsigned long long *version) | ||
355 | { | ||
356 | struct page *cp_page_1, *cp_page_2 = NULL; | ||
357 | unsigned long blk_size = sbi->blocksize; | ||
358 | struct f2fs_checkpoint *cp_block; | ||
359 | unsigned long long cur_version = 0, pre_version = 0; | ||
360 | unsigned int crc = 0; | ||
361 | size_t crc_offset; | ||
362 | |||
363 | /* Read the 1st cp block in this CP pack */ | ||
364 | cp_page_1 = get_meta_page(sbi, cp_addr); | ||
365 | |||
366 | /* get the version number */ | ||
367 | cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1); | ||
368 | crc_offset = le32_to_cpu(cp_block->checksum_offset); | ||
369 | if (crc_offset >= blk_size) | ||
370 | goto invalid_cp1; | ||
371 | |||
372 | crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); | ||
373 | if (!f2fs_crc_valid(crc, cp_block, crc_offset)) | ||
374 | goto invalid_cp1; | ||
375 | |||
376 | pre_version = le64_to_cpu(cp_block->checkpoint_ver); | ||
377 | |||
378 | /* Read the 2nd cp block in this CP pack */ | ||
379 | cp_addr += le64_to_cpu(cp_block->cp_pack_total_block_count) - 1; | ||
380 | cp_page_2 = get_meta_page(sbi, cp_addr); | ||
381 | |||
382 | cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2); | ||
383 | crc_offset = le32_to_cpu(cp_block->checksum_offset); | ||
384 | if (crc_offset >= blk_size) | ||
385 | goto invalid_cp2; | ||
386 | |||
387 | crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); | ||
388 | if (!f2fs_crc_valid(crc, cp_block, crc_offset)) | ||
389 | goto invalid_cp2; | ||
390 | |||
391 | cur_version = le64_to_cpu(cp_block->checkpoint_ver); | ||
392 | |||
393 | if (cur_version == pre_version) { | ||
394 | *version = cur_version; | ||
395 | f2fs_put_page(cp_page_2, 1); | ||
396 | return cp_page_1; | ||
397 | } | ||
398 | invalid_cp2: | ||
399 | f2fs_put_page(cp_page_2, 1); | ||
400 | invalid_cp1: | ||
401 | f2fs_put_page(cp_page_1, 1); | ||
402 | return NULL; | ||
403 | } | ||
404 | |||
405 | int get_valid_checkpoint(struct f2fs_sb_info *sbi) | ||
406 | { | ||
407 | struct f2fs_checkpoint *cp_block; | ||
408 | struct f2fs_super_block *fsb = sbi->raw_super; | ||
409 | struct page *cp1, *cp2, *cur_page; | ||
410 | unsigned long blk_size = sbi->blocksize; | ||
411 | unsigned long long cp1_version = 0, cp2_version = 0; | ||
412 | unsigned long long cp_start_blk_no; | ||
413 | |||
414 | sbi->ckpt = kzalloc(blk_size, GFP_KERNEL); | ||
415 | if (!sbi->ckpt) | ||
416 | return -ENOMEM; | ||
417 | /* | ||
418 | * Finding out valid cp block involves read both | ||
419 | * sets( cp pack1 and cp pack 2) | ||
420 | */ | ||
421 | cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); | ||
422 | cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); | ||
423 | |||
424 | /* The second checkpoint pack should start at the next segment */ | ||
425 | cp_start_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg); | ||
426 | cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); | ||
427 | |||
428 | if (cp1 && cp2) { | ||
429 | if (ver_after(cp2_version, cp1_version)) | ||
430 | cur_page = cp2; | ||
431 | else | ||
432 | cur_page = cp1; | ||
433 | } else if (cp1) { | ||
434 | cur_page = cp1; | ||
435 | } else if (cp2) { | ||
436 | cur_page = cp2; | ||
437 | } else { | ||
438 | goto fail_no_cp; | ||
439 | } | ||
440 | |||
441 | cp_block = (struct f2fs_checkpoint *)page_address(cur_page); | ||
442 | memcpy(sbi->ckpt, cp_block, blk_size); | ||
443 | |||
444 | f2fs_put_page(cp1, 1); | ||
445 | f2fs_put_page(cp2, 1); | ||
446 | return 0; | ||
447 | |||
448 | fail_no_cp: | ||
449 | kfree(sbi->ckpt); | ||
450 | return -EINVAL; | ||
451 | } | ||
452 | |||
453 | void set_dirty_dir_page(struct inode *inode, struct page *page) | ||
454 | { | ||
455 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
456 | struct list_head *head = &sbi->dir_inode_list; | ||
457 | struct dir_inode_entry *new; | ||
458 | struct list_head *this; | ||
459 | |||
460 | if (!S_ISDIR(inode->i_mode)) | ||
461 | return; | ||
462 | retry: | ||
463 | new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS); | ||
464 | if (!new) { | ||
465 | cond_resched(); | ||
466 | goto retry; | ||
467 | } | ||
468 | new->inode = inode; | ||
469 | INIT_LIST_HEAD(&new->list); | ||
470 | |||
471 | spin_lock(&sbi->dir_inode_lock); | ||
472 | list_for_each(this, head) { | ||
473 | struct dir_inode_entry *entry; | ||
474 | entry = list_entry(this, struct dir_inode_entry, list); | ||
475 | if (entry->inode == inode) { | ||
476 | kmem_cache_free(inode_entry_slab, new); | ||
477 | goto out; | ||
478 | } | ||
479 | } | ||
480 | list_add_tail(&new->list, head); | ||
481 | sbi->n_dirty_dirs++; | ||
482 | |||
483 | BUG_ON(!S_ISDIR(inode->i_mode)); | ||
484 | out: | ||
485 | inc_page_count(sbi, F2FS_DIRTY_DENTS); | ||
486 | inode_inc_dirty_dents(inode); | ||
487 | SetPagePrivate(page); | ||
488 | |||
489 | spin_unlock(&sbi->dir_inode_lock); | ||
490 | } | ||
491 | |||
492 | void remove_dirty_dir_inode(struct inode *inode) | ||
493 | { | ||
494 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
495 | struct list_head *head = &sbi->dir_inode_list; | ||
496 | struct list_head *this; | ||
497 | |||
498 | if (!S_ISDIR(inode->i_mode)) | ||
499 | return; | ||
500 | |||
501 | spin_lock(&sbi->dir_inode_lock); | ||
502 | if (atomic_read(&F2FS_I(inode)->dirty_dents)) | ||
503 | goto out; | ||
504 | |||
505 | list_for_each(this, head) { | ||
506 | struct dir_inode_entry *entry; | ||
507 | entry = list_entry(this, struct dir_inode_entry, list); | ||
508 | if (entry->inode == inode) { | ||
509 | list_del(&entry->list); | ||
510 | kmem_cache_free(inode_entry_slab, entry); | ||
511 | sbi->n_dirty_dirs--; | ||
512 | break; | ||
513 | } | ||
514 | } | ||
515 | out: | ||
516 | spin_unlock(&sbi->dir_inode_lock); | ||
517 | } | ||
518 | |||
519 | void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) | ||
520 | { | ||
521 | struct list_head *head = &sbi->dir_inode_list; | ||
522 | struct dir_inode_entry *entry; | ||
523 | struct inode *inode; | ||
524 | retry: | ||
525 | spin_lock(&sbi->dir_inode_lock); | ||
526 | if (list_empty(head)) { | ||
527 | spin_unlock(&sbi->dir_inode_lock); | ||
528 | return; | ||
529 | } | ||
530 | entry = list_entry(head->next, struct dir_inode_entry, list); | ||
531 | inode = igrab(entry->inode); | ||
532 | spin_unlock(&sbi->dir_inode_lock); | ||
533 | if (inode) { | ||
534 | filemap_flush(inode->i_mapping); | ||
535 | iput(inode); | ||
536 | } else { | ||
537 | /* | ||
538 | * We should submit bio, since it exists several | ||
539 | * wribacking dentry pages in the freeing inode. | ||
540 | */ | ||
541 | f2fs_submit_bio(sbi, DATA, true); | ||
542 | } | ||
543 | goto retry; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * Freeze all the FS-operations for checkpoint. | ||
548 | */ | ||
549 | void block_operations(struct f2fs_sb_info *sbi) | ||
550 | { | ||
551 | int t; | ||
552 | struct writeback_control wbc = { | ||
553 | .sync_mode = WB_SYNC_ALL, | ||
554 | .nr_to_write = LONG_MAX, | ||
555 | .for_reclaim = 0, | ||
556 | }; | ||
557 | |||
558 | /* Stop renaming operation */ | ||
559 | mutex_lock_op(sbi, RENAME); | ||
560 | mutex_lock_op(sbi, DENTRY_OPS); | ||
561 | |||
562 | retry_dents: | ||
563 | /* write all the dirty dentry pages */ | ||
564 | sync_dirty_dir_inodes(sbi); | ||
565 | |||
566 | mutex_lock_op(sbi, DATA_WRITE); | ||
567 | if (get_pages(sbi, F2FS_DIRTY_DENTS)) { | ||
568 | mutex_unlock_op(sbi, DATA_WRITE); | ||
569 | goto retry_dents; | ||
570 | } | ||
571 | |||
572 | /* block all the operations */ | ||
573 | for (t = DATA_NEW; t <= NODE_TRUNC; t++) | ||
574 | mutex_lock_op(sbi, t); | ||
575 | |||
576 | mutex_lock(&sbi->write_inode); | ||
577 | |||
578 | /* | ||
579 | * POR: we should ensure that there is no dirty node pages | ||
580 | * until finishing nat/sit flush. | ||
581 | */ | ||
582 | retry: | ||
583 | sync_node_pages(sbi, 0, &wbc); | ||
584 | |||
585 | mutex_lock_op(sbi, NODE_WRITE); | ||
586 | |||
587 | if (get_pages(sbi, F2FS_DIRTY_NODES)) { | ||
588 | mutex_unlock_op(sbi, NODE_WRITE); | ||
589 | goto retry; | ||
590 | } | ||
591 | mutex_unlock(&sbi->write_inode); | ||
592 | } | ||
593 | |||
594 | static void unblock_operations(struct f2fs_sb_info *sbi) | ||
595 | { | ||
596 | int t; | ||
597 | for (t = NODE_WRITE; t >= RENAME; t--) | ||
598 | mutex_unlock_op(sbi, t); | ||
599 | } | ||
600 | |||
601 | static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | ||
602 | { | ||
603 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
604 | nid_t last_nid = 0; | ||
605 | block_t start_blk; | ||
606 | struct page *cp_page; | ||
607 | unsigned int data_sum_blocks, orphan_blocks; | ||
608 | void *kaddr; | ||
609 | __u32 crc32 = 0; | ||
610 | int i; | ||
611 | |||
612 | /* Flush all the NAT/SIT pages */ | ||
613 | while (get_pages(sbi, F2FS_DIRTY_META)) | ||
614 | sync_meta_pages(sbi, META, LONG_MAX); | ||
615 | |||
616 | next_free_nid(sbi, &last_nid); | ||
617 | |||
618 | /* | ||
619 | * modify checkpoint | ||
620 | * version number is already updated | ||
621 | */ | ||
622 | ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); | ||
623 | ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); | ||
624 | ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); | ||
625 | for (i = 0; i < 3; i++) { | ||
626 | ckpt->cur_node_segno[i] = | ||
627 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); | ||
628 | ckpt->cur_node_blkoff[i] = | ||
629 | cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); | ||
630 | ckpt->alloc_type[i + CURSEG_HOT_NODE] = | ||
631 | curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); | ||
632 | } | ||
633 | for (i = 0; i < 3; i++) { | ||
634 | ckpt->cur_data_segno[i] = | ||
635 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); | ||
636 | ckpt->cur_data_blkoff[i] = | ||
637 | cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); | ||
638 | ckpt->alloc_type[i + CURSEG_HOT_DATA] = | ||
639 | curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); | ||
640 | } | ||
641 | |||
642 | ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); | ||
643 | ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); | ||
644 | ckpt->next_free_nid = cpu_to_le32(last_nid); | ||
645 | |||
646 | /* 2 cp + n data seg summary + orphan inode blocks */ | ||
647 | data_sum_blocks = npages_for_summary_flush(sbi); | ||
648 | if (data_sum_blocks < 3) | ||
649 | ckpt->ckpt_flags |= CP_COMPACT_SUM_FLAG; | ||
650 | else | ||
651 | ckpt->ckpt_flags &= (~CP_COMPACT_SUM_FLAG); | ||
652 | |||
653 | orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) | ||
654 | / F2FS_ORPHANS_PER_BLOCK; | ||
655 | ckpt->cp_pack_start_sum = 1 + orphan_blocks; | ||
656 | ckpt->cp_pack_total_block_count = 2 + data_sum_blocks + orphan_blocks; | ||
657 | |||
658 | if (is_umount) { | ||
659 | ckpt->ckpt_flags |= CP_UMOUNT_FLAG; | ||
660 | ckpt->cp_pack_total_block_count += NR_CURSEG_NODE_TYPE; | ||
661 | } else { | ||
662 | ckpt->ckpt_flags &= (~CP_UMOUNT_FLAG); | ||
663 | } | ||
664 | |||
665 | if (sbi->n_orphans) | ||
666 | ckpt->ckpt_flags |= CP_ORPHAN_PRESENT_FLAG; | ||
667 | else | ||
668 | ckpt->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG); | ||
669 | |||
670 | /* update SIT/NAT bitmap */ | ||
671 | get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); | ||
672 | get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); | ||
673 | |||
674 | crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); | ||
675 | *(__u32 *)((unsigned char *)ckpt + | ||
676 | le32_to_cpu(ckpt->checksum_offset)) | ||
677 | = cpu_to_le32(crc32); | ||
678 | |||
679 | start_blk = __start_cp_addr(sbi); | ||
680 | |||
681 | /* write out checkpoint buffer at block 0 */ | ||
682 | cp_page = grab_meta_page(sbi, start_blk++); | ||
683 | kaddr = page_address(cp_page); | ||
684 | memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); | ||
685 | set_page_dirty(cp_page); | ||
686 | f2fs_put_page(cp_page, 1); | ||
687 | |||
688 | if (sbi->n_orphans) { | ||
689 | write_orphan_inodes(sbi, start_blk); | ||
690 | start_blk += orphan_blocks; | ||
691 | } | ||
692 | |||
693 | write_data_summaries(sbi, start_blk); | ||
694 | start_blk += data_sum_blocks; | ||
695 | if (is_umount) { | ||
696 | write_node_summaries(sbi, start_blk); | ||
697 | start_blk += NR_CURSEG_NODE_TYPE; | ||
698 | } | ||
699 | |||
700 | /* writeout checkpoint block */ | ||
701 | cp_page = grab_meta_page(sbi, start_blk); | ||
702 | kaddr = page_address(cp_page); | ||
703 | memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); | ||
704 | set_page_dirty(cp_page); | ||
705 | f2fs_put_page(cp_page, 1); | ||
706 | |||
707 | /* wait for previous submitted node/meta pages writeback */ | ||
708 | while (get_pages(sbi, F2FS_WRITEBACK)) | ||
709 | congestion_wait(BLK_RW_ASYNC, HZ / 50); | ||
710 | |||
711 | filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX); | ||
712 | filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX); | ||
713 | |||
714 | /* update user_block_counts */ | ||
715 | sbi->last_valid_block_count = sbi->total_valid_block_count; | ||
716 | sbi->alloc_valid_block_count = 0; | ||
717 | |||
718 | /* Here, we only have one bio having CP pack */ | ||
719 | if (sbi->ckpt->ckpt_flags & CP_ERROR_FLAG) | ||
720 | sbi->sb->s_flags |= MS_RDONLY; | ||
721 | else | ||
722 | sync_meta_pages(sbi, META_FLUSH, LONG_MAX); | ||
723 | |||
724 | clear_prefree_segments(sbi); | ||
725 | F2FS_RESET_SB_DIRT(sbi); | ||
726 | } | ||
727 | |||
728 | /** | ||
729 | * We guarantee that this checkpoint procedure should not fail. | ||
730 | */ | ||
731 | void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount) | ||
732 | { | ||
733 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | ||
734 | unsigned long long ckpt_ver; | ||
735 | |||
736 | if (!blocked) { | ||
737 | mutex_lock(&sbi->cp_mutex); | ||
738 | block_operations(sbi); | ||
739 | } | ||
740 | |||
741 | f2fs_submit_bio(sbi, DATA, true); | ||
742 | f2fs_submit_bio(sbi, NODE, true); | ||
743 | f2fs_submit_bio(sbi, META, true); | ||
744 | |||
745 | /* | ||
746 | * update checkpoint pack index | ||
747 | * Increase the version number so that | ||
748 | * SIT entries and seg summaries are written at correct place | ||
749 | */ | ||
750 | ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver); | ||
751 | ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); | ||
752 | |||
753 | /* write cached NAT/SIT entries to NAT/SIT area */ | ||
754 | flush_nat_entries(sbi); | ||
755 | flush_sit_entries(sbi); | ||
756 | |||
757 | reset_victim_segmap(sbi); | ||
758 | |||
759 | /* unlock all the fs_lock[] in do_checkpoint() */ | ||
760 | do_checkpoint(sbi, is_umount); | ||
761 | |||
762 | unblock_operations(sbi); | ||
763 | mutex_unlock(&sbi->cp_mutex); | ||
764 | } | ||
765 | |||
766 | void init_orphan_info(struct f2fs_sb_info *sbi) | ||
767 | { | ||
768 | mutex_init(&sbi->orphan_inode_mutex); | ||
769 | INIT_LIST_HEAD(&sbi->orphan_inode_list); | ||
770 | sbi->n_orphans = 0; | ||
771 | } | ||
772 | |||
773 | int create_checkpoint_caches(void) | ||
774 | { | ||
775 | orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", | ||
776 | sizeof(struct orphan_inode_entry), NULL); | ||
777 | if (unlikely(!orphan_entry_slab)) | ||
778 | return -ENOMEM; | ||
779 | inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry", | ||
780 | sizeof(struct dir_inode_entry), NULL); | ||
781 | if (unlikely(!inode_entry_slab)) { | ||
782 | kmem_cache_destroy(orphan_entry_slab); | ||
783 | return -ENOMEM; | ||
784 | } | ||
785 | return 0; | ||
786 | } | ||
787 | |||
788 | void destroy_checkpoint_caches(void) | ||
789 | { | ||
790 | kmem_cache_destroy(orphan_entry_slab); | ||
791 | kmem_cache_destroy(inode_entry_slab); | ||
792 | } | ||