diff options
author | Jaegeuk Kim <jaegeuk@kernel.org> | 2014-10-23 22:48:09 -0400 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk@kernel.org> | 2014-11-04 20:34:11 -0500 |
commit | b3d208f96d6bb21247108a956dead6a028d5cdb2 (patch) | |
tree | 0adfa5781bae898208c62a94c6087322f16a94be /fs | |
parent | 1f7732fe6cc0c37befc74cef1d289cd2272b7a5c (diff) |
f2fs: revisit inline_data to avoid data races and potential bugs
This patch simplifies the inline_data usage with the following rule.
1. inline_data is set during the file creation.
2. If new data is requested to be written ranges out of inline_data,
f2fs converts that inode permanently.
3. There is no cases which converts non-inline_data inode to inline_data.
4. The inline_data flag should be changed under inode page lock.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/f2fs/data.c | 89 | ||||
-rw-r--r-- | fs/f2fs/f2fs.h | 24 | ||||
-rw-r--r-- | fs/f2fs/file.c | 110 | ||||
-rw-r--r-- | fs/f2fs/inline.c | 203 | ||||
-rw-r--r-- | fs/f2fs/inode.c | 33 | ||||
-rw-r--r-- | fs/f2fs/namei.c | 3 |
6 files changed, 250 insertions, 212 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index e3788bd206d8..ceee1a69c5aa 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -737,14 +737,14 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
737 | static int f2fs_read_data_page(struct file *file, struct page *page) | 737 | static int f2fs_read_data_page(struct file *file, struct page *page) |
738 | { | 738 | { |
739 | struct inode *inode = page->mapping->host; | 739 | struct inode *inode = page->mapping->host; |
740 | int ret; | 740 | int ret = -EAGAIN; |
741 | 741 | ||
742 | trace_f2fs_readpage(page, DATA); | 742 | trace_f2fs_readpage(page, DATA); |
743 | 743 | ||
744 | /* If the file has inline data, try to read it directly */ | 744 | /* If the file has inline data, try to read it directly */ |
745 | if (f2fs_has_inline_data(inode)) | 745 | if (f2fs_has_inline_data(inode)) |
746 | ret = f2fs_read_inline_data(inode, page); | 746 | ret = f2fs_read_inline_data(inode, page); |
747 | else | 747 | if (ret == -EAGAIN) |
748 | ret = mpage_readpage(page, get_data_block); | 748 | ret = mpage_readpage(page, get_data_block); |
749 | 749 | ||
750 | return ret; | 750 | return ret; |
@@ -856,10 +856,11 @@ write: | |||
856 | else if (has_not_enough_free_secs(sbi, 0)) | 856 | else if (has_not_enough_free_secs(sbi, 0)) |
857 | goto redirty_out; | 857 | goto redirty_out; |
858 | 858 | ||
859 | err = -EAGAIN; | ||
859 | f2fs_lock_op(sbi); | 860 | f2fs_lock_op(sbi); |
860 | if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode)) | 861 | if (f2fs_has_inline_data(inode)) |
861 | err = f2fs_write_inline_data(inode, page, offset); | 862 | err = f2fs_write_inline_data(inode, page); |
862 | else | 863 | if (err == -EAGAIN) |
863 | err = do_write_data_page(page, &fio); | 864 | err = do_write_data_page(page, &fio); |
864 | f2fs_unlock_op(sbi); | 865 | f2fs_unlock_op(sbi); |
865 | done: | 866 | done: |
@@ -957,24 +958,14 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, | |||
957 | 958 | ||
958 | f2fs_balance_fs(sbi); | 959 | f2fs_balance_fs(sbi); |
959 | repeat: | 960 | repeat: |
960 | err = f2fs_convert_inline_data(inode, pos + len, NULL); | ||
961 | if (err) | ||
962 | goto fail; | ||
963 | |||
964 | page = grab_cache_page_write_begin(mapping, index, flags); | 961 | page = grab_cache_page_write_begin(mapping, index, flags); |
965 | if (!page) { | 962 | if (!page) { |
966 | err = -ENOMEM; | 963 | err = -ENOMEM; |
967 | goto fail; | 964 | goto fail; |
968 | } | 965 | } |
969 | 966 | ||
970 | /* to avoid latency during memory pressure */ | ||
971 | unlock_page(page); | ||
972 | |||
973 | *pagep = page; | 967 | *pagep = page; |
974 | 968 | ||
975 | if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA) | ||
976 | goto inline_data; | ||
977 | |||
978 | f2fs_lock_op(sbi); | 969 | f2fs_lock_op(sbi); |
979 | 970 | ||
980 | /* check inline_data */ | 971 | /* check inline_data */ |
@@ -982,32 +973,42 @@ repeat: | |||
982 | if (IS_ERR(ipage)) | 973 | if (IS_ERR(ipage)) |
983 | goto unlock_fail; | 974 | goto unlock_fail; |
984 | 975 | ||
976 | set_new_dnode(&dn, inode, ipage, ipage, 0); | ||
977 | |||
985 | if (f2fs_has_inline_data(inode)) { | 978 | if (f2fs_has_inline_data(inode)) { |
986 | f2fs_put_page(ipage, 1); | 979 | if (pos + len <= MAX_INLINE_DATA) { |
987 | f2fs_unlock_op(sbi); | 980 | read_inline_data(page, ipage); |
988 | f2fs_put_page(page, 0); | 981 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); |
989 | goto repeat; | 982 | sync_inode_page(&dn); |
983 | goto put_next; | ||
984 | } else if (page->index == 0) { | ||
985 | err = f2fs_convert_inline_page(&dn, page); | ||
986 | if (err) | ||
987 | goto unlock_fail; | ||
988 | } else { | ||
989 | struct page *p = grab_cache_page(inode->i_mapping, 0); | ||
990 | if (!p) { | ||
991 | err = -ENOMEM; | ||
992 | goto unlock_fail; | ||
993 | } | ||
994 | err = f2fs_convert_inline_page(&dn, p); | ||
995 | f2fs_put_page(p, 1); | ||
996 | if (err) | ||
997 | goto unlock_fail; | ||
998 | } | ||
990 | } | 999 | } |
991 | |||
992 | set_new_dnode(&dn, inode, ipage, NULL, 0); | ||
993 | err = f2fs_reserve_block(&dn, index); | 1000 | err = f2fs_reserve_block(&dn, index); |
994 | if (err) | 1001 | if (err) |
995 | goto unlock_fail; | 1002 | goto unlock_fail; |
1003 | put_next: | ||
996 | f2fs_put_dnode(&dn); | 1004 | f2fs_put_dnode(&dn); |
997 | f2fs_unlock_op(sbi); | 1005 | f2fs_unlock_op(sbi); |
998 | 1006 | ||
999 | inline_data: | ||
1000 | lock_page(page); | ||
1001 | if (unlikely(page->mapping != mapping)) { | ||
1002 | f2fs_put_page(page, 1); | ||
1003 | goto repeat; | ||
1004 | } | ||
1005 | |||
1006 | f2fs_wait_on_page_writeback(page, DATA); | ||
1007 | |||
1008 | if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) | 1007 | if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) |
1009 | return 0; | 1008 | return 0; |
1010 | 1009 | ||
1010 | f2fs_wait_on_page_writeback(page, DATA); | ||
1011 | |||
1011 | if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { | 1012 | if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { |
1012 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); | 1013 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); |
1013 | unsigned end = start + len; | 1014 | unsigned end = start + len; |
@@ -1017,13 +1018,7 @@ inline_data: | |||
1017 | goto out; | 1018 | goto out; |
1018 | } | 1019 | } |
1019 | 1020 | ||
1020 | if (f2fs_has_inline_data(inode)) { | 1021 | if (dn.data_blkaddr == NEW_ADDR) { |
1021 | err = f2fs_read_inline_data(inode, page); | ||
1022 | if (err) { | ||
1023 | page_cache_release(page); | ||
1024 | goto fail; | ||
1025 | } | ||
1026 | } else if (dn.data_blkaddr == NEW_ADDR) { | ||
1027 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | 1022 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
1028 | } else { | 1023 | } else { |
1029 | err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, | 1024 | err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, |
@@ -1049,7 +1044,7 @@ out: | |||
1049 | 1044 | ||
1050 | unlock_fail: | 1045 | unlock_fail: |
1051 | f2fs_unlock_op(sbi); | 1046 | f2fs_unlock_op(sbi); |
1052 | f2fs_put_page(page, 0); | 1047 | f2fs_put_page(page, 1); |
1053 | fail: | 1048 | fail: |
1054 | f2fs_write_failed(mapping, pos + len); | 1049 | f2fs_write_failed(mapping, pos + len); |
1055 | return err; | 1050 | return err; |
@@ -1102,9 +1097,12 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, | |||
1102 | size_t count = iov_iter_count(iter); | 1097 | size_t count = iov_iter_count(iter); |
1103 | int err; | 1098 | int err; |
1104 | 1099 | ||
1105 | /* Let buffer I/O handle the inline data case. */ | 1100 | /* we don't need to use inline_data strictly */ |
1106 | if (f2fs_has_inline_data(inode)) | 1101 | if (f2fs_has_inline_data(inode)) { |
1107 | return 0; | 1102 | err = f2fs_convert_inline_inode(inode); |
1103 | if (err) | ||
1104 | return err; | ||
1105 | } | ||
1108 | 1106 | ||
1109 | if (check_direct_IO(inode, rw, iter, offset)) | 1107 | if (check_direct_IO(inode, rw, iter, offset)) |
1110 | return 0; | 1108 | return 0; |
@@ -1170,9 +1168,12 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) | |||
1170 | { | 1168 | { |
1171 | struct inode *inode = mapping->host; | 1169 | struct inode *inode = mapping->host; |
1172 | 1170 | ||
1173 | if (f2fs_has_inline_data(inode)) | 1171 | /* we don't need to use inline_data strictly */ |
1174 | return 0; | 1172 | if (f2fs_has_inline_data(inode)) { |
1175 | 1173 | int err = f2fs_convert_inline_inode(inode); | |
1174 | if (err) | ||
1175 | return err; | ||
1176 | } | ||
1176 | return generic_block_bmap(mapping, block, get_data_block); | 1177 | return generic_block_bmap(mapping, block, get_data_block); |
1177 | } | 1178 | } |
1178 | 1179 | ||
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 2e9d2e3051f7..afe3022ffac5 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -1101,6 +1101,7 @@ enum { | |||
1101 | FI_NEED_IPU, /* used for ipu per file */ | 1101 | FI_NEED_IPU, /* used for ipu per file */ |
1102 | FI_ATOMIC_FILE, /* indicate atomic file */ | 1102 | FI_ATOMIC_FILE, /* indicate atomic file */ |
1103 | FI_VOLATILE_FILE, /* indicate volatile file */ | 1103 | FI_VOLATILE_FILE, /* indicate volatile file */ |
1104 | FI_DATA_EXIST, /* indicate data exists */ | ||
1104 | }; | 1105 | }; |
1105 | 1106 | ||
1106 | static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) | 1107 | static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) |
@@ -1135,6 +1136,8 @@ static inline void get_inline_info(struct f2fs_inode_info *fi, | |||
1135 | set_inode_flag(fi, FI_INLINE_DATA); | 1136 | set_inode_flag(fi, FI_INLINE_DATA); |
1136 | if (ri->i_inline & F2FS_INLINE_DENTRY) | 1137 | if (ri->i_inline & F2FS_INLINE_DENTRY) |
1137 | set_inode_flag(fi, FI_INLINE_DENTRY); | 1138 | set_inode_flag(fi, FI_INLINE_DENTRY); |
1139 | if (ri->i_inline & F2FS_DATA_EXIST) | ||
1140 | set_inode_flag(fi, FI_DATA_EXIST); | ||
1138 | } | 1141 | } |
1139 | 1142 | ||
1140 | static inline void set_raw_inline(struct f2fs_inode_info *fi, | 1143 | static inline void set_raw_inline(struct f2fs_inode_info *fi, |
@@ -1148,6 +1151,8 @@ static inline void set_raw_inline(struct f2fs_inode_info *fi, | |||
1148 | ri->i_inline |= F2FS_INLINE_DATA; | 1151 | ri->i_inline |= F2FS_INLINE_DATA; |
1149 | if (is_inode_flag_set(fi, FI_INLINE_DENTRY)) | 1152 | if (is_inode_flag_set(fi, FI_INLINE_DENTRY)) |
1150 | ri->i_inline |= F2FS_INLINE_DENTRY; | 1153 | ri->i_inline |= F2FS_INLINE_DENTRY; |
1154 | if (is_inode_flag_set(fi, FI_DATA_EXIST)) | ||
1155 | ri->i_inline |= F2FS_DATA_EXIST; | ||
1151 | } | 1156 | } |
1152 | 1157 | ||
1153 | static inline int f2fs_has_inline_xattr(struct inode *inode) | 1158 | static inline int f2fs_has_inline_xattr(struct inode *inode) |
@@ -1182,6 +1187,17 @@ static inline int f2fs_has_inline_data(struct inode *inode) | |||
1182 | return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA); | 1187 | return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA); |
1183 | } | 1188 | } |
1184 | 1189 | ||
1190 | static inline void f2fs_clear_inline_inode(struct inode *inode) | ||
1191 | { | ||
1192 | clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | ||
1193 | clear_inode_flag(F2FS_I(inode), FI_DATA_EXIST); | ||
1194 | } | ||
1195 | |||
1196 | static inline int f2fs_exist_data(struct inode *inode) | ||
1197 | { | ||
1198 | return is_inode_flag_set(F2FS_I(inode), FI_DATA_EXIST); | ||
1199 | } | ||
1200 | |||
1185 | static inline bool f2fs_is_atomic_file(struct inode *inode) | 1201 | static inline bool f2fs_is_atomic_file(struct inode *inode) |
1186 | { | 1202 | { |
1187 | return is_inode_flag_set(F2FS_I(inode), FI_ATOMIC_FILE); | 1203 | return is_inode_flag_set(F2FS_I(inode), FI_ATOMIC_FILE); |
@@ -1590,10 +1606,12 @@ extern const struct inode_operations f2fs_special_inode_operations; | |||
1590 | * inline.c | 1606 | * inline.c |
1591 | */ | 1607 | */ |
1592 | bool f2fs_may_inline(struct inode *); | 1608 | bool f2fs_may_inline(struct inode *); |
1609 | void read_inline_data(struct page *, struct page *); | ||
1593 | int f2fs_read_inline_data(struct inode *, struct page *); | 1610 | int f2fs_read_inline_data(struct inode *, struct page *); |
1594 | int f2fs_convert_inline_data(struct inode *, pgoff_t, struct page *); | 1611 | int f2fs_convert_inline_page(struct dnode_of_data *, struct page *); |
1595 | int f2fs_write_inline_data(struct inode *, struct page *, unsigned int); | 1612 | int f2fs_convert_inline_inode(struct inode *); |
1596 | void truncate_inline_data(struct inode *, u64); | 1613 | int f2fs_write_inline_data(struct inode *, struct page *); |
1614 | void truncate_inline_data(struct page *, u64); | ||
1597 | bool recover_inline_data(struct inode *, struct page *); | 1615 | bool recover_inline_data(struct inode *, struct page *); |
1598 | struct f2fs_dir_entry *find_in_inline_dir(struct inode *, struct qstr *, | 1616 | struct f2fs_dir_entry *find_in_inline_dir(struct inode *, struct qstr *, |
1599 | struct page **); | 1617 | struct page **); |
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 402e38185b8c..832bd91922b8 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c | |||
@@ -35,35 +35,17 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, | |||
35 | struct inode *inode = file_inode(vma->vm_file); | 35 | struct inode *inode = file_inode(vma->vm_file); |
36 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 36 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
37 | struct dnode_of_data dn; | 37 | struct dnode_of_data dn; |
38 | struct page *ipage; | ||
39 | int err; | 38 | int err; |
40 | 39 | ||
41 | f2fs_balance_fs(sbi); | 40 | f2fs_balance_fs(sbi); |
42 | 41 | ||
43 | sb_start_pagefault(inode->i_sb); | 42 | sb_start_pagefault(inode->i_sb); |
44 | retry: | 43 | |
45 | /* force to convert with normal data indices */ | 44 | f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); |
46 | err = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, page); | ||
47 | if (err) | ||
48 | goto out; | ||
49 | 45 | ||
50 | /* block allocation */ | 46 | /* block allocation */ |
51 | f2fs_lock_op(sbi); | 47 | f2fs_lock_op(sbi); |
52 | 48 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
53 | /* check inline_data */ | ||
54 | ipage = get_node_page(sbi, inode->i_ino); | ||
55 | if (IS_ERR(ipage)) { | ||
56 | f2fs_unlock_op(sbi); | ||
57 | goto out; | ||
58 | } | ||
59 | |||
60 | if (f2fs_has_inline_data(inode)) { | ||
61 | f2fs_put_page(ipage, 1); | ||
62 | f2fs_unlock_op(sbi); | ||
63 | goto retry; | ||
64 | } | ||
65 | |||
66 | set_new_dnode(&dn, inode, ipage, NULL, 0); | ||
67 | err = f2fs_reserve_block(&dn, page->index); | 49 | err = f2fs_reserve_block(&dn, page->index); |
68 | if (err) { | 50 | if (err) { |
69 | f2fs_unlock_op(sbi); | 51 | f2fs_unlock_op(sbi); |
@@ -392,6 +374,15 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) | |||
392 | 374 | ||
393 | static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) | 375 | static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) |
394 | { | 376 | { |
377 | struct inode *inode = file_inode(file); | ||
378 | |||
379 | /* we don't need to use inline_data strictly */ | ||
380 | if (f2fs_has_inline_data(inode)) { | ||
381 | int err = f2fs_convert_inline_inode(inode); | ||
382 | if (err) | ||
383 | return err; | ||
384 | } | ||
385 | |||
395 | file_accessed(file); | 386 | file_accessed(file); |
396 | vma->vm_ops = &f2fs_file_vm_ops; | 387 | vma->vm_ops = &f2fs_file_vm_ops; |
397 | return 0; | 388 | return 0; |
@@ -433,20 +424,17 @@ void truncate_data_blocks(struct dnode_of_data *dn) | |||
433 | truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); | 424 | truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); |
434 | } | 425 | } |
435 | 426 | ||
436 | static void truncate_partial_data_page(struct inode *inode, u64 from) | 427 | static int truncate_partial_data_page(struct inode *inode, u64 from) |
437 | { | 428 | { |
438 | unsigned offset = from & (PAGE_CACHE_SIZE - 1); | 429 | unsigned offset = from & (PAGE_CACHE_SIZE - 1); |
439 | struct page *page; | 430 | struct page *page; |
440 | 431 | ||
441 | if (f2fs_has_inline_data(inode)) | ||
442 | return truncate_inline_data(inode, from); | ||
443 | |||
444 | if (!offset) | 432 | if (!offset) |
445 | return; | 433 | return 0; |
446 | 434 | ||
447 | page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false); | 435 | page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false); |
448 | if (IS_ERR(page)) | 436 | if (IS_ERR(page)) |
449 | return; | 437 | return 0; |
450 | 438 | ||
451 | lock_page(page); | 439 | lock_page(page); |
452 | if (unlikely(!PageUptodate(page) || | 440 | if (unlikely(!PageUptodate(page) || |
@@ -456,9 +444,9 @@ static void truncate_partial_data_page(struct inode *inode, u64 from) | |||
456 | f2fs_wait_on_page_writeback(page, DATA); | 444 | f2fs_wait_on_page_writeback(page, DATA); |
457 | zero_user(page, offset, PAGE_CACHE_SIZE - offset); | 445 | zero_user(page, offset, PAGE_CACHE_SIZE - offset); |
458 | set_page_dirty(page); | 446 | set_page_dirty(page); |
459 | |||
460 | out: | 447 | out: |
461 | f2fs_put_page(page, 1); | 448 | f2fs_put_page(page, 1); |
449 | return 0; | ||
462 | } | 450 | } |
463 | 451 | ||
464 | int truncate_blocks(struct inode *inode, u64 from, bool lock) | 452 | int truncate_blocks(struct inode *inode, u64 from, bool lock) |
@@ -468,33 +456,35 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) | |||
468 | struct dnode_of_data dn; | 456 | struct dnode_of_data dn; |
469 | pgoff_t free_from; | 457 | pgoff_t free_from; |
470 | int count = 0, err = 0; | 458 | int count = 0, err = 0; |
459 | struct page *ipage; | ||
471 | 460 | ||
472 | trace_f2fs_truncate_blocks_enter(inode, from); | 461 | trace_f2fs_truncate_blocks_enter(inode, from); |
473 | 462 | ||
474 | if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) | ||
475 | goto done; | ||
476 | |||
477 | free_from = (pgoff_t) | 463 | free_from = (pgoff_t) |
478 | ((from + blocksize - 1) >> (sbi->log_blocksize)); | 464 | ((from + blocksize - 1) >> (sbi->log_blocksize)); |
479 | 465 | ||
480 | if (lock) | 466 | if (lock) |
481 | f2fs_lock_op(sbi); | 467 | f2fs_lock_op(sbi); |
482 | 468 | ||
483 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 469 | ipage = get_node_page(sbi, inode->i_ino); |
470 | if (IS_ERR(ipage)) { | ||
471 | err = PTR_ERR(ipage); | ||
472 | goto out; | ||
473 | } | ||
474 | |||
475 | if (f2fs_has_inline_data(inode)) { | ||
476 | truncate_inline_data(ipage, from); | ||
477 | update_inode(inode, ipage); | ||
478 | f2fs_put_page(ipage, 1); | ||
479 | goto out; | ||
480 | } | ||
481 | |||
482 | set_new_dnode(&dn, inode, ipage, NULL, 0); | ||
484 | err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); | 483 | err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); |
485 | if (err) { | 484 | if (err) { |
486 | if (err == -ENOENT) | 485 | if (err == -ENOENT) |
487 | goto free_next; | 486 | goto free_next; |
488 | if (lock) | 487 | goto out; |
489 | f2fs_unlock_op(sbi); | ||
490 | trace_f2fs_truncate_blocks_exit(inode, err); | ||
491 | return err; | ||
492 | } | ||
493 | |||
494 | /* writepage can convert inline_data under get_donde_of_data */ | ||
495 | if (f2fs_has_inline_data(inode)) { | ||
496 | f2fs_put_dnode(&dn); | ||
497 | goto unlock_done; | ||
498 | } | 488 | } |
499 | 489 | ||
500 | count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | 490 | count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); |
@@ -510,12 +500,13 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) | |||
510 | f2fs_put_dnode(&dn); | 500 | f2fs_put_dnode(&dn); |
511 | free_next: | 501 | free_next: |
512 | err = truncate_inode_blocks(inode, free_from); | 502 | err = truncate_inode_blocks(inode, free_from); |
513 | unlock_done: | 503 | |
504 | /* lastly zero out the first data page */ | ||
505 | if (!err) | ||
506 | err = truncate_partial_data_page(inode, from); | ||
507 | out: | ||
514 | if (lock) | 508 | if (lock) |
515 | f2fs_unlock_op(sbi); | 509 | f2fs_unlock_op(sbi); |
516 | done: | ||
517 | /* lastly zero out the first data page */ | ||
518 | truncate_partial_data_page(inode, from); | ||
519 | 510 | ||
520 | trace_f2fs_truncate_blocks_exit(inode, err); | 511 | trace_f2fs_truncate_blocks_exit(inode, err); |
521 | return err; | 512 | return err; |
@@ -586,10 +577,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) | |||
586 | return err; | 577 | return err; |
587 | 578 | ||
588 | if (attr->ia_valid & ATTR_SIZE) { | 579 | if (attr->ia_valid & ATTR_SIZE) { |
589 | err = f2fs_convert_inline_data(inode, attr->ia_size, NULL); | ||
590 | if (err) | ||
591 | return err; | ||
592 | |||
593 | if (attr->ia_size != i_size_read(inode)) { | 580 | if (attr->ia_size != i_size_read(inode)) { |
594 | truncate_setsize(inode, attr->ia_size); | 581 | truncate_setsize(inode, attr->ia_size); |
595 | f2fs_truncate(inode); | 582 | f2fs_truncate(inode); |
@@ -690,9 +677,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
690 | if (offset >= inode->i_size) | 677 | if (offset >= inode->i_size) |
691 | return ret; | 678 | return ret; |
692 | 679 | ||
693 | ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL); | 680 | if (f2fs_has_inline_data(inode)) { |
694 | if (ret) | 681 | ret = f2fs_convert_inline_inode(inode); |
695 | return ret; | 682 | if (ret) |
683 | return ret; | ||
684 | } | ||
696 | 685 | ||
697 | pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; | 686 | pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; |
698 | pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; | 687 | pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; |
@@ -746,9 +735,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset, | |||
746 | if (ret) | 735 | if (ret) |
747 | return ret; | 736 | return ret; |
748 | 737 | ||
749 | ret = f2fs_convert_inline_data(inode, offset + len, NULL); | 738 | if (f2fs_has_inline_data(inode)) { |
750 | if (ret) | 739 | ret = f2fs_convert_inline_inode(inode); |
751 | return ret; | 740 | if (ret) |
741 | return ret; | ||
742 | } | ||
752 | 743 | ||
753 | pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; | 744 | pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; |
754 | pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; | 745 | pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; |
@@ -899,7 +890,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) | |||
899 | 890 | ||
900 | set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); | 891 | set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); |
901 | 892 | ||
902 | return f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL); | 893 | return f2fs_convert_inline_inode(inode); |
903 | } | 894 | } |
904 | 895 | ||
905 | static int f2fs_ioc_commit_atomic_write(struct file *filp) | 896 | static int f2fs_ioc_commit_atomic_write(struct file *filp) |
@@ -933,7 +924,8 @@ static int f2fs_ioc_start_volatile_write(struct file *filp) | |||
933 | return -EACCES; | 924 | return -EACCES; |
934 | 925 | ||
935 | set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); | 926 | set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); |
936 | return 0; | 927 | |
928 | return f2fs_convert_inline_inode(inode); | ||
937 | } | 929 | } |
938 | 930 | ||
939 | static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) | 931 | static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) |
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index d6677d6bbd89..8b6610906ea5 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c | |||
@@ -15,41 +15,26 @@ | |||
15 | 15 | ||
16 | bool f2fs_may_inline(struct inode *inode) | 16 | bool f2fs_may_inline(struct inode *inode) |
17 | { | 17 | { |
18 | block_t nr_blocks; | ||
19 | loff_t i_size; | ||
20 | |||
21 | if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) | 18 | if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) |
22 | return false; | 19 | return false; |
23 | 20 | ||
24 | if (f2fs_is_atomic_file(inode)) | 21 | if (f2fs_is_atomic_file(inode)) |
25 | return false; | 22 | return false; |
26 | 23 | ||
27 | nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2; | 24 | if (!S_ISREG(inode->i_mode)) |
28 | if (inode->i_blocks > nr_blocks) | ||
29 | return false; | ||
30 | |||
31 | i_size = i_size_read(inode); | ||
32 | if (i_size > MAX_INLINE_DATA) | ||
33 | return false; | 25 | return false; |
34 | 26 | ||
35 | return true; | 27 | return true; |
36 | } | 28 | } |
37 | 29 | ||
38 | int f2fs_read_inline_data(struct inode *inode, struct page *page) | 30 | void read_inline_data(struct page *page, struct page *ipage) |
39 | { | 31 | { |
40 | struct page *ipage; | ||
41 | void *src_addr, *dst_addr; | 32 | void *src_addr, *dst_addr; |
42 | 33 | ||
43 | if (page->index) { | 34 | if (PageUptodate(page)) |
44 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | 35 | return; |
45 | goto out; | ||
46 | } | ||
47 | 36 | ||
48 | ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); | 37 | f2fs_bug_on(F2FS_P_SB(page), page->index); |
49 | if (IS_ERR(ipage)) { | ||
50 | unlock_page(page); | ||
51 | return PTR_ERR(ipage); | ||
52 | } | ||
53 | 38 | ||
54 | zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); | 39 | zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); |
55 | 40 | ||
@@ -59,104 +44,120 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page) | |||
59 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); | 44 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); |
60 | flush_dcache_page(page); | 45 | flush_dcache_page(page); |
61 | kunmap_atomic(dst_addr); | 46 | kunmap_atomic(dst_addr); |
62 | f2fs_put_page(ipage, 1); | ||
63 | out: | ||
64 | SetPageUptodate(page); | 47 | SetPageUptodate(page); |
65 | unlock_page(page); | 48 | } |
66 | 49 | ||
50 | int f2fs_read_inline_data(struct inode *inode, struct page *page) | ||
51 | { | ||
52 | struct page *ipage; | ||
53 | |||
54 | ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); | ||
55 | if (IS_ERR(ipage)) { | ||
56 | unlock_page(page); | ||
57 | return PTR_ERR(ipage); | ||
58 | } | ||
59 | |||
60 | if (!f2fs_has_inline_data(inode)) { | ||
61 | f2fs_put_page(ipage, 1); | ||
62 | return -EAGAIN; | ||
63 | } | ||
64 | |||
65 | if (page->index) | ||
66 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | ||
67 | else | ||
68 | read_inline_data(page, ipage); | ||
69 | |||
70 | SetPageUptodate(page); | ||
71 | f2fs_put_page(ipage, 1); | ||
72 | unlock_page(page); | ||
67 | return 0; | 73 | return 0; |
68 | } | 74 | } |
69 | 75 | ||
70 | static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) | 76 | int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) |
71 | { | 77 | { |
72 | int err = 0; | ||
73 | struct page *ipage; | ||
74 | struct dnode_of_data dn; | ||
75 | void *src_addr, *dst_addr; | 78 | void *src_addr, *dst_addr; |
76 | block_t new_blk_addr; | 79 | block_t new_blk_addr; |
77 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | ||
78 | struct f2fs_io_info fio = { | 80 | struct f2fs_io_info fio = { |
79 | .type = DATA, | 81 | .type = DATA, |
80 | .rw = WRITE_SYNC | REQ_PRIO, | 82 | .rw = WRITE_SYNC | REQ_PRIO, |
81 | }; | 83 | }; |
84 | int err; | ||
82 | 85 | ||
83 | f2fs_lock_op(sbi); | 86 | f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); |
84 | ipage = get_node_page(sbi, inode->i_ino); | ||
85 | if (IS_ERR(ipage)) { | ||
86 | err = PTR_ERR(ipage); | ||
87 | goto out; | ||
88 | } | ||
89 | 87 | ||
90 | /* someone else converted inline_data already */ | 88 | if (!f2fs_exist_data(dn->inode)) |
91 | if (!f2fs_has_inline_data(inode)) | 89 | goto clear_out; |
92 | goto out; | ||
93 | 90 | ||
94 | /* | 91 | err = f2fs_reserve_block(dn, 0); |
95 | * i_addr[0] is not used for inline data, | ||
96 | * so reserving new block will not destroy inline data | ||
97 | */ | ||
98 | set_new_dnode(&dn, inode, ipage, NULL, 0); | ||
99 | err = f2fs_reserve_block(&dn, 0); | ||
100 | if (err) | 92 | if (err) |
101 | goto out; | 93 | return err; |
102 | 94 | ||
103 | f2fs_wait_on_page_writeback(page, DATA); | 95 | f2fs_wait_on_page_writeback(page, DATA); |
96 | |||
97 | if (PageUptodate(page)) | ||
98 | goto no_update; | ||
99 | |||
104 | zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); | 100 | zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); |
105 | 101 | ||
106 | /* Copy the whole inline data block */ | 102 | /* Copy the whole inline data block */ |
107 | src_addr = inline_data_addr(ipage); | 103 | src_addr = inline_data_addr(dn->inode_page); |
108 | dst_addr = kmap_atomic(page); | 104 | dst_addr = kmap_atomic(page); |
109 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); | 105 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); |
110 | kunmap_atomic(dst_addr); | 106 | kunmap_atomic(dst_addr); |
111 | SetPageUptodate(page); | 107 | SetPageUptodate(page); |
112 | 108 | no_update: | |
113 | /* write data page to try to make data consistent */ | 109 | /* write data page to try to make data consistent */ |
114 | set_page_writeback(page); | 110 | set_page_writeback(page); |
115 | write_data_page(page, &dn, &new_blk_addr, &fio); | 111 | |
116 | update_extent_cache(new_blk_addr, &dn); | 112 | write_data_page(page, dn, &new_blk_addr, &fio); |
113 | update_extent_cache(new_blk_addr, dn); | ||
117 | f2fs_wait_on_page_writeback(page, DATA); | 114 | f2fs_wait_on_page_writeback(page, DATA); |
118 | 115 | ||
119 | /* clear inline data and flag after data writeback */ | 116 | /* clear inline data and flag after data writeback */ |
120 | zero_user_segment(ipage, INLINE_DATA_OFFSET, | 117 | truncate_inline_data(dn->inode_page, 0); |
121 | INLINE_DATA_OFFSET + MAX_INLINE_DATA); | 118 | clear_out: |
122 | clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | 119 | f2fs_clear_inline_inode(dn->inode); |
123 | stat_dec_inline_inode(inode); | 120 | stat_dec_inline_inode(dn->inode); |
124 | 121 | sync_inode_page(dn); | |
125 | sync_inode_page(&dn); | 122 | f2fs_put_dnode(dn); |
126 | f2fs_put_dnode(&dn); | 123 | return 0; |
127 | out: | ||
128 | f2fs_unlock_op(sbi); | ||
129 | return err; | ||
130 | } | 124 | } |
131 | 125 | ||
132 | int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size, | 126 | int f2fs_convert_inline_inode(struct inode *inode) |
133 | struct page *page) | ||
134 | { | 127 | { |
135 | struct page *new_page = page; | 128 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
136 | int err; | 129 | struct dnode_of_data dn; |
130 | struct page *ipage, *page; | ||
131 | int err = 0; | ||
137 | 132 | ||
138 | if (!f2fs_has_inline_data(inode)) | 133 | page = grab_cache_page(inode->i_mapping, 0); |
139 | return 0; | 134 | if (!page) |
140 | else if (to_size <= MAX_INLINE_DATA) | 135 | return -ENOMEM; |
141 | return 0; | ||
142 | 136 | ||
143 | if (!page || page->index != 0) { | 137 | f2fs_lock_op(sbi); |
144 | new_page = grab_cache_page(inode->i_mapping, 0); | 138 | |
145 | if (!new_page) | 139 | ipage = get_node_page(sbi, inode->i_ino); |
146 | return -ENOMEM; | 140 | if (IS_ERR(ipage)) { |
141 | f2fs_unlock_op(sbi); | ||
142 | return PTR_ERR(ipage); | ||
147 | } | 143 | } |
148 | 144 | ||
149 | err = __f2fs_convert_inline_data(inode, new_page); | 145 | set_new_dnode(&dn, inode, ipage, ipage, 0); |
150 | if (!page || page->index != 0) | 146 | |
151 | f2fs_put_page(new_page, 1); | 147 | if (f2fs_has_inline_data(inode)) |
148 | err = f2fs_convert_inline_page(&dn, page); | ||
149 | |||
150 | f2fs_put_dnode(&dn); | ||
151 | |||
152 | f2fs_unlock_op(sbi); | ||
153 | |||
154 | f2fs_put_page(page, 1); | ||
152 | return err; | 155 | return err; |
153 | } | 156 | } |
154 | 157 | ||
155 | int f2fs_write_inline_data(struct inode *inode, | 158 | int f2fs_write_inline_data(struct inode *inode, struct page *page) |
156 | struct page *page, unsigned size) | ||
157 | { | 159 | { |
158 | void *src_addr, *dst_addr; | 160 | void *src_addr, *dst_addr; |
159 | struct page *ipage; | ||
160 | struct dnode_of_data dn; | 161 | struct dnode_of_data dn; |
161 | int err; | 162 | int err; |
162 | 163 | ||
@@ -164,48 +165,39 @@ int f2fs_write_inline_data(struct inode *inode, | |||
164 | err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); | 165 | err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); |
165 | if (err) | 166 | if (err) |
166 | return err; | 167 | return err; |
167 | ipage = dn.inode_page; | ||
168 | 168 | ||
169 | /* Release any data block if it is allocated */ | ||
170 | if (!f2fs_has_inline_data(inode)) { | 169 | if (!f2fs_has_inline_data(inode)) { |
171 | int count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | 170 | f2fs_put_dnode(&dn); |
172 | truncate_data_blocks_range(&dn, count); | 171 | return -EAGAIN; |
173 | set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | ||
174 | stat_inc_inline_inode(inode); | ||
175 | } | 172 | } |
176 | 173 | ||
177 | f2fs_wait_on_page_writeback(ipage, NODE); | 174 | f2fs_bug_on(F2FS_I_SB(inode), page->index); |
178 | zero_user_segment(ipage, INLINE_DATA_OFFSET, | 175 | |
179 | INLINE_DATA_OFFSET + MAX_INLINE_DATA); | 176 | f2fs_wait_on_page_writeback(dn.inode_page, NODE); |
180 | src_addr = kmap_atomic(page); | 177 | src_addr = kmap_atomic(page); |
181 | dst_addr = inline_data_addr(ipage); | 178 | dst_addr = inline_data_addr(dn.inode_page); |
182 | memcpy(dst_addr, src_addr, size); | 179 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); |
183 | kunmap_atomic(src_addr); | 180 | kunmap_atomic(src_addr); |
184 | 181 | ||
185 | set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); | 182 | set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); |
183 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); | ||
184 | |||
186 | sync_inode_page(&dn); | 185 | sync_inode_page(&dn); |
187 | f2fs_put_dnode(&dn); | 186 | f2fs_put_dnode(&dn); |
188 | |||
189 | return 0; | 187 | return 0; |
190 | } | 188 | } |
191 | 189 | ||
192 | void truncate_inline_data(struct inode *inode, u64 from) | 190 | void truncate_inline_data(struct page *ipage, u64 from) |
193 | { | 191 | { |
194 | struct page *ipage; | 192 | void *addr; |
195 | 193 | ||
196 | if (from >= MAX_INLINE_DATA) | 194 | if (from >= MAX_INLINE_DATA) |
197 | return; | 195 | return; |
198 | 196 | ||
199 | ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); | ||
200 | if (IS_ERR(ipage)) | ||
201 | return; | ||
202 | |||
203 | f2fs_wait_on_page_writeback(ipage, NODE); | 197 | f2fs_wait_on_page_writeback(ipage, NODE); |
204 | 198 | ||
205 | zero_user_segment(ipage, INLINE_DATA_OFFSET + from, | 199 | addr = inline_data_addr(ipage); |
206 | INLINE_DATA_OFFSET + MAX_INLINE_DATA); | 200 | memset(addr + from, 0, MAX_INLINE_DATA - from); |
207 | set_page_dirty(ipage); | ||
208 | f2fs_put_page(ipage, 1); | ||
209 | } | 201 | } |
210 | 202 | ||
211 | bool recover_inline_data(struct inode *inode, struct page *npage) | 203 | bool recover_inline_data(struct inode *inode, struct page *npage) |
@@ -237,6 +229,10 @@ process_inline: | |||
237 | src_addr = inline_data_addr(npage); | 229 | src_addr = inline_data_addr(npage); |
238 | dst_addr = inline_data_addr(ipage); | 230 | dst_addr = inline_data_addr(ipage); |
239 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); | 231 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); |
232 | |||
233 | set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | ||
234 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); | ||
235 | |||
240 | update_inode(inode, ipage); | 236 | update_inode(inode, ipage); |
241 | f2fs_put_page(ipage, 1); | 237 | f2fs_put_page(ipage, 1); |
242 | return true; | 238 | return true; |
@@ -245,15 +241,12 @@ process_inline: | |||
245 | if (f2fs_has_inline_data(inode)) { | 241 | if (f2fs_has_inline_data(inode)) { |
246 | ipage = get_node_page(sbi, inode->i_ino); | 242 | ipage = get_node_page(sbi, inode->i_ino); |
247 | f2fs_bug_on(sbi, IS_ERR(ipage)); | 243 | f2fs_bug_on(sbi, IS_ERR(ipage)); |
248 | f2fs_wait_on_page_writeback(ipage, NODE); | 244 | truncate_inline_data(ipage, 0); |
249 | zero_user_segment(ipage, INLINE_DATA_OFFSET, | 245 | f2fs_clear_inline_inode(inode); |
250 | INLINE_DATA_OFFSET + MAX_INLINE_DATA); | ||
251 | clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | ||
252 | update_inode(inode, ipage); | 246 | update_inode(inode, ipage); |
253 | f2fs_put_page(ipage, 1); | 247 | f2fs_put_page(ipage, 1); |
254 | } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { | 248 | } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { |
255 | truncate_blocks(inode, 0, false); | 249 | truncate_blocks(inode, 0, false); |
256 | set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | ||
257 | goto process_inline; | 250 | goto process_inline; |
258 | } | 251 | } |
259 | return false; | 252 | return false; |
@@ -366,8 +359,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, | |||
366 | set_page_dirty(page); | 359 | set_page_dirty(page); |
367 | 360 | ||
368 | /* clear inline dir and flag after data writeback */ | 361 | /* clear inline dir and flag after data writeback */ |
369 | zero_user_segment(ipage, INLINE_DATA_OFFSET, | 362 | truncate_inline_data(ipage, 0); |
370 | INLINE_DATA_OFFSET + MAX_INLINE_DATA); | 363 | |
371 | stat_dec_inline_dir(dir); | 364 | stat_dec_inline_dir(dir); |
372 | clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); | 365 | clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); |
373 | 366 | ||
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 4131e3cfd1cf..9fe110ef8cc4 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c | |||
@@ -67,12 +67,38 @@ static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) | |||
67 | } | 67 | } |
68 | } | 68 | } |
69 | 69 | ||
70 | static int __recover_inline_status(struct inode *inode, struct page *ipage) | ||
71 | { | ||
72 | void *inline_data = inline_data_addr(ipage); | ||
73 | struct f2fs_inode *ri; | ||
74 | void *zbuf; | ||
75 | |||
76 | zbuf = kzalloc(MAX_INLINE_DATA, GFP_NOFS); | ||
77 | if (!zbuf) | ||
78 | return -ENOMEM; | ||
79 | |||
80 | if (!memcmp(zbuf, inline_data, MAX_INLINE_DATA)) { | ||
81 | kfree(zbuf); | ||
82 | return 0; | ||
83 | } | ||
84 | kfree(zbuf); | ||
85 | |||
86 | f2fs_wait_on_page_writeback(ipage, NODE); | ||
87 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); | ||
88 | |||
89 | ri = F2FS_INODE(ipage); | ||
90 | set_raw_inline(F2FS_I(inode), ri); | ||
91 | set_page_dirty(ipage); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
70 | static int do_read_inode(struct inode *inode) | 95 | static int do_read_inode(struct inode *inode) |
71 | { | 96 | { |
72 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 97 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
73 | struct f2fs_inode_info *fi = F2FS_I(inode); | 98 | struct f2fs_inode_info *fi = F2FS_I(inode); |
74 | struct page *node_page; | 99 | struct page *node_page; |
75 | struct f2fs_inode *ri; | 100 | struct f2fs_inode *ri; |
101 | int err = 0; | ||
76 | 102 | ||
77 | /* Check if ino is within scope */ | 103 | /* Check if ino is within scope */ |
78 | if (check_nid_range(sbi, inode->i_ino)) { | 104 | if (check_nid_range(sbi, inode->i_ino)) { |
@@ -114,11 +140,15 @@ static int do_read_inode(struct inode *inode) | |||
114 | get_extent_info(&fi->ext, ri->i_ext); | 140 | get_extent_info(&fi->ext, ri->i_ext); |
115 | get_inline_info(fi, ri); | 141 | get_inline_info(fi, ri); |
116 | 142 | ||
143 | /* check data exist */ | ||
144 | if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) | ||
145 | err = __recover_inline_status(inode, node_page); | ||
146 | |||
117 | /* get rdev by using inline_info */ | 147 | /* get rdev by using inline_info */ |
118 | __get_inode_rdev(inode, ri); | 148 | __get_inode_rdev(inode, ri); |
119 | 149 | ||
120 | f2fs_put_page(node_page, 1); | 150 | f2fs_put_page(node_page, 1); |
121 | return 0; | 151 | return err; |
122 | } | 152 | } |
123 | 153 | ||
124 | struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) | 154 | struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) |
@@ -329,6 +359,7 @@ void handle_failed_inode(struct inode *inode) | |||
329 | 359 | ||
330 | remove_inode_page(inode); | 360 | remove_inode_page(inode); |
331 | 361 | ||
362 | clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | ||
332 | clear_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY); | 363 | clear_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY); |
333 | alloc_nid_failed(sbi, inode->i_ino); | 364 | alloc_nid_failed(sbi, inode->i_ino); |
334 | f2fs_unlock_op(sbi); | 365 | f2fs_unlock_op(sbi); |
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index a004a978096f..6312dd2e53f7 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c | |||
@@ -55,6 +55,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) | |||
55 | goto out; | 55 | goto out; |
56 | } | 56 | } |
57 | 57 | ||
58 | if (f2fs_may_inline(inode)) | ||
59 | set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | ||
58 | if (test_opt(sbi, INLINE_DENTRY) && S_ISDIR(inode->i_mode)) | 60 | if (test_opt(sbi, INLINE_DENTRY) && S_ISDIR(inode->i_mode)) |
59 | set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY); | 61 | set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY); |
60 | 62 | ||
@@ -133,6 +135,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
133 | 135 | ||
134 | alloc_nid_done(sbi, ino); | 136 | alloc_nid_done(sbi, ino); |
135 | 137 | ||
138 | stat_inc_inline_inode(inode); | ||
136 | d_instantiate(dentry, inode); | 139 | d_instantiate(dentry, inode); |
137 | unlock_new_inode(inode); | 140 | unlock_new_inode(inode); |
138 | return 0; | 141 | return 0; |