diff options
Diffstat (limited to 'fs/f2fs/file.c')
-rw-r--r-- | fs/f2fs/file.c | 212 |
1 files changed, 133 insertions, 79 deletions
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 8e68bb64f835..3c27e0ecb3bc 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c | |||
@@ -41,18 +41,18 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, | |||
41 | 41 | ||
42 | sb_start_pagefault(inode->i_sb); | 42 | sb_start_pagefault(inode->i_sb); |
43 | 43 | ||
44 | /* force to convert with normal data indices */ | 44 | f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); |
45 | err = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, page); | ||
46 | if (err) | ||
47 | goto out; | ||
48 | 45 | ||
49 | /* block allocation */ | 46 | /* block allocation */ |
50 | f2fs_lock_op(sbi); | 47 | f2fs_lock_op(sbi); |
51 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 48 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
52 | err = f2fs_reserve_block(&dn, page->index); | 49 | err = f2fs_reserve_block(&dn, page->index); |
53 | f2fs_unlock_op(sbi); | 50 | if (err) { |
54 | if (err) | 51 | f2fs_unlock_op(sbi); |
55 | goto out; | 52 | goto out; |
53 | } | ||
54 | f2fs_put_dnode(&dn); | ||
55 | f2fs_unlock_op(sbi); | ||
56 | 56 | ||
57 | file_update_time(vma->vm_file); | 57 | file_update_time(vma->vm_file); |
58 | lock_page(page); | 58 | lock_page(page); |
@@ -130,10 +130,45 @@ static inline bool need_do_checkpoint(struct inode *inode) | |||
130 | need_cp = true; | 130 | need_cp = true; |
131 | else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) | 131 | else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) |
132 | need_cp = true; | 132 | need_cp = true; |
133 | else if (test_opt(sbi, FASTBOOT)) | ||
134 | need_cp = true; | ||
135 | else if (sbi->active_logs == 2) | ||
136 | need_cp = true; | ||
133 | 137 | ||
134 | return need_cp; | 138 | return need_cp; |
135 | } | 139 | } |
136 | 140 | ||
141 | static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) | ||
142 | { | ||
143 | struct page *i = find_get_page(NODE_MAPPING(sbi), ino); | ||
144 | bool ret = false; | ||
145 | /* But we need to avoid that there are some inode updates */ | ||
146 | if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino)) | ||
147 | ret = true; | ||
148 | f2fs_put_page(i, 0); | ||
149 | return ret; | ||
150 | } | ||
151 | |||
152 | static void try_to_fix_pino(struct inode *inode) | ||
153 | { | ||
154 | struct f2fs_inode_info *fi = F2FS_I(inode); | ||
155 | nid_t pino; | ||
156 | |||
157 | down_write(&fi->i_sem); | ||
158 | fi->xattr_ver = 0; | ||
159 | if (file_wrong_pino(inode) && inode->i_nlink == 1 && | ||
160 | get_parent_ino(inode, &pino)) { | ||
161 | fi->i_pino = pino; | ||
162 | file_got_pino(inode); | ||
163 | up_write(&fi->i_sem); | ||
164 | |||
165 | mark_inode_dirty_sync(inode); | ||
166 | f2fs_write_inode(inode, NULL); | ||
167 | } else { | ||
168 | up_write(&fi->i_sem); | ||
169 | } | ||
170 | } | ||
171 | |||
137 | int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | 172 | int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
138 | { | 173 | { |
139 | struct inode *inode = file->f_mapping->host; | 174 | struct inode *inode = file->f_mapping->host; |
@@ -164,19 +199,21 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
164 | return ret; | 199 | return ret; |
165 | } | 200 | } |
166 | 201 | ||
202 | /* if the inode is dirty, let's recover all the time */ | ||
203 | if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) { | ||
204 | update_inode_page(inode); | ||
205 | goto go_write; | ||
206 | } | ||
207 | |||
167 | /* | 208 | /* |
168 | * if there is no written data, don't waste time to write recovery info. | 209 | * if there is no written data, don't waste time to write recovery info. |
169 | */ | 210 | */ |
170 | if (!is_inode_flag_set(fi, FI_APPEND_WRITE) && | 211 | if (!is_inode_flag_set(fi, FI_APPEND_WRITE) && |
171 | !exist_written_data(sbi, ino, APPEND_INO)) { | 212 | !exist_written_data(sbi, ino, APPEND_INO)) { |
172 | struct page *i = find_get_page(NODE_MAPPING(sbi), ino); | ||
173 | 213 | ||
174 | /* But we need to avoid that there are some inode updates */ | 214 | /* it may call write_inode just prior to fsync */ |
175 | if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino)) { | 215 | if (need_inode_page_update(sbi, ino)) |
176 | f2fs_put_page(i, 0); | ||
177 | goto go_write; | 216 | goto go_write; |
178 | } | ||
179 | f2fs_put_page(i, 0); | ||
180 | 217 | ||
181 | if (is_inode_flag_set(fi, FI_UPDATE_WRITE) || | 218 | if (is_inode_flag_set(fi, FI_UPDATE_WRITE) || |
182 | exist_written_data(sbi, ino, UPDATE_INO)) | 219 | exist_written_data(sbi, ino, UPDATE_INO)) |
@@ -196,49 +233,36 @@ go_write: | |||
196 | up_read(&fi->i_sem); | 233 | up_read(&fi->i_sem); |
197 | 234 | ||
198 | if (need_cp) { | 235 | if (need_cp) { |
199 | nid_t pino; | ||
200 | |||
201 | /* all the dirty node pages should be flushed for POR */ | 236 | /* all the dirty node pages should be flushed for POR */ |
202 | ret = f2fs_sync_fs(inode->i_sb, 1); | 237 | ret = f2fs_sync_fs(inode->i_sb, 1); |
203 | 238 | ||
204 | down_write(&fi->i_sem); | 239 | /* |
205 | F2FS_I(inode)->xattr_ver = 0; | 240 | * We've secured consistency through sync_fs. Following pino |
206 | if (file_wrong_pino(inode) && inode->i_nlink == 1 && | 241 | * will be used only for fsynced inodes after checkpoint. |
207 | get_parent_ino(inode, &pino)) { | 242 | */ |
208 | F2FS_I(inode)->i_pino = pino; | 243 | try_to_fix_pino(inode); |
209 | file_got_pino(inode); | 244 | goto out; |
210 | up_write(&fi->i_sem); | 245 | } |
211 | mark_inode_dirty_sync(inode); | ||
212 | ret = f2fs_write_inode(inode, NULL); | ||
213 | if (ret) | ||
214 | goto out; | ||
215 | } else { | ||
216 | up_write(&fi->i_sem); | ||
217 | } | ||
218 | } else { | ||
219 | sync_nodes: | 246 | sync_nodes: |
220 | sync_node_pages(sbi, ino, &wbc); | 247 | sync_node_pages(sbi, ino, &wbc); |
221 | |||
222 | if (need_inode_block_update(sbi, ino)) { | ||
223 | mark_inode_dirty_sync(inode); | ||
224 | ret = f2fs_write_inode(inode, NULL); | ||
225 | if (ret) | ||
226 | goto out; | ||
227 | goto sync_nodes; | ||
228 | } | ||
229 | 248 | ||
230 | ret = wait_on_node_pages_writeback(sbi, ino); | 249 | if (need_inode_block_update(sbi, ino)) { |
231 | if (ret) | 250 | mark_inode_dirty_sync(inode); |
232 | goto out; | 251 | f2fs_write_inode(inode, NULL); |
252 | goto sync_nodes; | ||
253 | } | ||
254 | |||
255 | ret = wait_on_node_pages_writeback(sbi, ino); | ||
256 | if (ret) | ||
257 | goto out; | ||
233 | 258 | ||
234 | /* once recovery info is written, don't need to tack this */ | 259 | /* once recovery info is written, don't need to tack this */ |
235 | remove_dirty_inode(sbi, ino, APPEND_INO); | 260 | remove_dirty_inode(sbi, ino, APPEND_INO); |
236 | clear_inode_flag(fi, FI_APPEND_WRITE); | 261 | clear_inode_flag(fi, FI_APPEND_WRITE); |
237 | flush_out: | 262 | flush_out: |
238 | remove_dirty_inode(sbi, ino, UPDATE_INO); | 263 | remove_dirty_inode(sbi, ino, UPDATE_INO); |
239 | clear_inode_flag(fi, FI_UPDATE_WRITE); | 264 | clear_inode_flag(fi, FI_UPDATE_WRITE); |
240 | ret = f2fs_issue_flush(F2FS_I_SB(inode)); | 265 | ret = f2fs_issue_flush(sbi); |
241 | } | ||
242 | out: | 266 | out: |
243 | trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); | 267 | trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); |
244 | return ret; | 268 | return ret; |
@@ -296,7 +320,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) | |||
296 | goto fail; | 320 | goto fail; |
297 | 321 | ||
298 | /* handle inline data case */ | 322 | /* handle inline data case */ |
299 | if (f2fs_has_inline_data(inode)) { | 323 | if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { |
300 | if (whence == SEEK_HOLE) | 324 | if (whence == SEEK_HOLE) |
301 | data_ofs = isize; | 325 | data_ofs = isize; |
302 | goto found; | 326 | goto found; |
@@ -374,6 +398,15 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) | |||
374 | 398 | ||
375 | static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) | 399 | static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) |
376 | { | 400 | { |
401 | struct inode *inode = file_inode(file); | ||
402 | |||
403 | /* we don't need to use inline_data strictly */ | ||
404 | if (f2fs_has_inline_data(inode)) { | ||
405 | int err = f2fs_convert_inline_inode(inode); | ||
406 | if (err) | ||
407 | return err; | ||
408 | } | ||
409 | |||
377 | file_accessed(file); | 410 | file_accessed(file); |
378 | vma->vm_ops = &f2fs_file_vm_ops; | 411 | vma->vm_ops = &f2fs_file_vm_ops; |
379 | return 0; | 412 | return 0; |
@@ -415,20 +448,17 @@ void truncate_data_blocks(struct dnode_of_data *dn) | |||
415 | truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); | 448 | truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); |
416 | } | 449 | } |
417 | 450 | ||
418 | static void truncate_partial_data_page(struct inode *inode, u64 from) | 451 | static int truncate_partial_data_page(struct inode *inode, u64 from) |
419 | { | 452 | { |
420 | unsigned offset = from & (PAGE_CACHE_SIZE - 1); | 453 | unsigned offset = from & (PAGE_CACHE_SIZE - 1); |
421 | struct page *page; | 454 | struct page *page; |
422 | 455 | ||
423 | if (f2fs_has_inline_data(inode)) | ||
424 | return truncate_inline_data(inode, from); | ||
425 | |||
426 | if (!offset) | 456 | if (!offset) |
427 | return; | 457 | return 0; |
428 | 458 | ||
429 | page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false); | 459 | page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false); |
430 | if (IS_ERR(page)) | 460 | if (IS_ERR(page)) |
431 | return; | 461 | return 0; |
432 | 462 | ||
433 | lock_page(page); | 463 | lock_page(page); |
434 | if (unlikely(!PageUptodate(page) || | 464 | if (unlikely(!PageUptodate(page) || |
@@ -438,9 +468,9 @@ static void truncate_partial_data_page(struct inode *inode, u64 from) | |||
438 | f2fs_wait_on_page_writeback(page, DATA); | 468 | f2fs_wait_on_page_writeback(page, DATA); |
439 | zero_user(page, offset, PAGE_CACHE_SIZE - offset); | 469 | zero_user(page, offset, PAGE_CACHE_SIZE - offset); |
440 | set_page_dirty(page); | 470 | set_page_dirty(page); |
441 | |||
442 | out: | 471 | out: |
443 | f2fs_put_page(page, 1); | 472 | f2fs_put_page(page, 1); |
473 | return 0; | ||
444 | } | 474 | } |
445 | 475 | ||
446 | int truncate_blocks(struct inode *inode, u64 from, bool lock) | 476 | int truncate_blocks(struct inode *inode, u64 from, bool lock) |
@@ -450,27 +480,33 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) | |||
450 | struct dnode_of_data dn; | 480 | struct dnode_of_data dn; |
451 | pgoff_t free_from; | 481 | pgoff_t free_from; |
452 | int count = 0, err = 0; | 482 | int count = 0, err = 0; |
483 | struct page *ipage; | ||
453 | 484 | ||
454 | trace_f2fs_truncate_blocks_enter(inode, from); | 485 | trace_f2fs_truncate_blocks_enter(inode, from); |
455 | 486 | ||
456 | if (f2fs_has_inline_data(inode)) | ||
457 | goto done; | ||
458 | |||
459 | free_from = (pgoff_t) | 487 | free_from = (pgoff_t) |
460 | ((from + blocksize - 1) >> (sbi->log_blocksize)); | 488 | ((from + blocksize - 1) >> (sbi->log_blocksize)); |
461 | 489 | ||
462 | if (lock) | 490 | if (lock) |
463 | f2fs_lock_op(sbi); | 491 | f2fs_lock_op(sbi); |
464 | 492 | ||
465 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 493 | ipage = get_node_page(sbi, inode->i_ino); |
494 | if (IS_ERR(ipage)) { | ||
495 | err = PTR_ERR(ipage); | ||
496 | goto out; | ||
497 | } | ||
498 | |||
499 | if (f2fs_has_inline_data(inode)) { | ||
500 | f2fs_put_page(ipage, 1); | ||
501 | goto out; | ||
502 | } | ||
503 | |||
504 | set_new_dnode(&dn, inode, ipage, NULL, 0); | ||
466 | err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); | 505 | err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); |
467 | if (err) { | 506 | if (err) { |
468 | if (err == -ENOENT) | 507 | if (err == -ENOENT) |
469 | goto free_next; | 508 | goto free_next; |
470 | if (lock) | 509 | goto out; |
471 | f2fs_unlock_op(sbi); | ||
472 | trace_f2fs_truncate_blocks_exit(inode, err); | ||
473 | return err; | ||
474 | } | 510 | } |
475 | 511 | ||
476 | count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | 512 | count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); |
@@ -486,11 +522,13 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) | |||
486 | f2fs_put_dnode(&dn); | 522 | f2fs_put_dnode(&dn); |
487 | free_next: | 523 | free_next: |
488 | err = truncate_inode_blocks(inode, free_from); | 524 | err = truncate_inode_blocks(inode, free_from); |
525 | out: | ||
489 | if (lock) | 526 | if (lock) |
490 | f2fs_unlock_op(sbi); | 527 | f2fs_unlock_op(sbi); |
491 | done: | 528 | |
492 | /* lastly zero out the first data page */ | 529 | /* lastly zero out the first data page */ |
493 | truncate_partial_data_page(inode, from); | 530 | if (!err) |
531 | err = truncate_partial_data_page(inode, from); | ||
494 | 532 | ||
495 | trace_f2fs_truncate_blocks_exit(inode, err); | 533 | trace_f2fs_truncate_blocks_exit(inode, err); |
496 | return err; | 534 | return err; |
@@ -504,6 +542,12 @@ void f2fs_truncate(struct inode *inode) | |||
504 | 542 | ||
505 | trace_f2fs_truncate(inode); | 543 | trace_f2fs_truncate(inode); |
506 | 544 | ||
545 | /* we should check inline_data size */ | ||
546 | if (f2fs_has_inline_data(inode) && !f2fs_may_inline(inode)) { | ||
547 | if (f2fs_convert_inline_inode(inode)) | ||
548 | return; | ||
549 | } | ||
550 | |||
507 | if (!truncate_blocks(inode, i_size_read(inode), true)) { | 551 | if (!truncate_blocks(inode, i_size_read(inode), true)) { |
508 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 552 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
509 | mark_inode_dirty(inode); | 553 | mark_inode_dirty(inode); |
@@ -561,10 +605,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) | |||
561 | return err; | 605 | return err; |
562 | 606 | ||
563 | if (attr->ia_valid & ATTR_SIZE) { | 607 | if (attr->ia_valid & ATTR_SIZE) { |
564 | err = f2fs_convert_inline_data(inode, attr->ia_size, NULL); | ||
565 | if (err) | ||
566 | return err; | ||
567 | |||
568 | if (attr->ia_size != i_size_read(inode)) { | 608 | if (attr->ia_size != i_size_read(inode)) { |
569 | truncate_setsize(inode, attr->ia_size); | 609 | truncate_setsize(inode, attr->ia_size); |
570 | f2fs_truncate(inode); | 610 | f2fs_truncate(inode); |
@@ -665,9 +705,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
665 | if (offset >= inode->i_size) | 705 | if (offset >= inode->i_size) |
666 | return ret; | 706 | return ret; |
667 | 707 | ||
668 | ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL); | 708 | if (f2fs_has_inline_data(inode)) { |
669 | if (ret) | 709 | ret = f2fs_convert_inline_inode(inode); |
670 | return ret; | 710 | if (ret) |
711 | return ret; | ||
712 | } | ||
671 | 713 | ||
672 | pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; | 714 | pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; |
673 | pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; | 715 | pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; |
@@ -721,9 +763,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset, | |||
721 | if (ret) | 763 | if (ret) |
722 | return ret; | 764 | return ret; |
723 | 765 | ||
724 | ret = f2fs_convert_inline_data(inode, offset + len, NULL); | 766 | if (f2fs_has_inline_data(inode)) { |
725 | if (ret) | 767 | ret = f2fs_convert_inline_inode(inode); |
726 | return ret; | 768 | if (ret) |
769 | return ret; | ||
770 | } | ||
727 | 771 | ||
728 | pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; | 772 | pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; |
729 | pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; | 773 | pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; |
@@ -874,7 +918,15 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) | |||
874 | 918 | ||
875 | set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); | 919 | set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); |
876 | 920 | ||
877 | return f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL); | 921 | return f2fs_convert_inline_inode(inode); |
922 | } | ||
923 | |||
924 | static int f2fs_release_file(struct inode *inode, struct file *filp) | ||
925 | { | ||
926 | /* some remained atomic pages should discarded */ | ||
927 | if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) | ||
928 | commit_inmem_pages(inode, true); | ||
929 | return 0; | ||
878 | } | 930 | } |
879 | 931 | ||
880 | static int f2fs_ioc_commit_atomic_write(struct file *filp) | 932 | static int f2fs_ioc_commit_atomic_write(struct file *filp) |
@@ -908,7 +960,8 @@ static int f2fs_ioc_start_volatile_write(struct file *filp) | |||
908 | return -EACCES; | 960 | return -EACCES; |
909 | 961 | ||
910 | set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); | 962 | set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); |
911 | return 0; | 963 | |
964 | return f2fs_convert_inline_inode(inode); | ||
912 | } | 965 | } |
913 | 966 | ||
914 | static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) | 967 | static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) |
@@ -985,6 +1038,7 @@ const struct file_operations f2fs_file_operations = { | |||
985 | .read_iter = generic_file_read_iter, | 1038 | .read_iter = generic_file_read_iter, |
986 | .write_iter = generic_file_write_iter, | 1039 | .write_iter = generic_file_write_iter, |
987 | .open = generic_file_open, | 1040 | .open = generic_file_open, |
1041 | .release = f2fs_release_file, | ||
988 | .mmap = f2fs_file_mmap, | 1042 | .mmap = f2fs_file_mmap, |
989 | .fsync = f2fs_sync_file, | 1043 | .fsync = f2fs_sync_file, |
990 | .fallocate = f2fs_fallocate, | 1044 | .fallocate = f2fs_fallocate, |