aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/file.c')
-rw-r--r--fs/f2fs/file.c116
1 files changed, 54 insertions, 62 deletions
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index db626282d424..1cae864f8dfc 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -13,6 +13,7 @@
13#include <linux/stat.h> 13#include <linux/stat.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/writeback.h> 15#include <linux/writeback.h>
16#include <linux/blkdev.h>
16#include <linux/falloc.h> 17#include <linux/falloc.h>
17#include <linux/types.h> 18#include <linux/types.h>
18#include <linux/compat.h> 19#include <linux/compat.h>
@@ -24,6 +25,7 @@
24#include "segment.h" 25#include "segment.h"
25#include "xattr.h" 26#include "xattr.h"
26#include "acl.h" 27#include "acl.h"
28#include <trace/events/f2fs.h>
27 29
28static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, 30static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
29 struct vm_fault *vmf) 31 struct vm_fault *vmf)
@@ -33,19 +35,18 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
33 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 35 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
34 block_t old_blk_addr; 36 block_t old_blk_addr;
35 struct dnode_of_data dn; 37 struct dnode_of_data dn;
36 int err; 38 int err, ilock;
37 39
38 f2fs_balance_fs(sbi); 40 f2fs_balance_fs(sbi);
39 41
40 sb_start_pagefault(inode->i_sb); 42 sb_start_pagefault(inode->i_sb);
41 43
42 mutex_lock_op(sbi, DATA_NEW);
43
44 /* block allocation */ 44 /* block allocation */
45 ilock = mutex_lock_op(sbi);
45 set_new_dnode(&dn, inode, NULL, NULL, 0); 46 set_new_dnode(&dn, inode, NULL, NULL, 0);
46 err = get_dnode_of_data(&dn, page->index, 0); 47 err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
47 if (err) { 48 if (err) {
48 mutex_unlock_op(sbi, DATA_NEW); 49 mutex_unlock_op(sbi, ilock);
49 goto out; 50 goto out;
50 } 51 }
51 52
@@ -55,13 +56,12 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
55 err = reserve_new_block(&dn); 56 err = reserve_new_block(&dn);
56 if (err) { 57 if (err) {
57 f2fs_put_dnode(&dn); 58 f2fs_put_dnode(&dn);
58 mutex_unlock_op(sbi, DATA_NEW); 59 mutex_unlock_op(sbi, ilock);
59 goto out; 60 goto out;
60 } 61 }
61 } 62 }
62 f2fs_put_dnode(&dn); 63 f2fs_put_dnode(&dn);
63 64 mutex_unlock_op(sbi, ilock);
64 mutex_unlock_op(sbi, DATA_NEW);
65 65
66 lock_page(page); 66 lock_page(page);
67 if (page->mapping != inode->i_mapping || 67 if (page->mapping != inode->i_mapping ||
@@ -102,28 +102,10 @@ static const struct vm_operations_struct f2fs_file_vm_ops = {
102 .remap_pages = generic_file_remap_pages, 102 .remap_pages = generic_file_remap_pages,
103}; 103};
104 104
105static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
106{
107 struct dentry *dentry;
108 nid_t pino;
109
110 inode = igrab(inode);
111 dentry = d_find_any_alias(inode);
112 if (!dentry) {
113 iput(inode);
114 return 0;
115 }
116 pino = dentry->d_parent->d_inode->i_ino;
117 dput(dentry);
118 iput(inode);
119 return !is_checkpointed_node(sbi, pino);
120}
121
122int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 105int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
123{ 106{
124 struct inode *inode = file->f_mapping->host; 107 struct inode *inode = file->f_mapping->host;
125 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 108 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
126 unsigned long long cur_version;
127 int ret = 0; 109 int ret = 0;
128 bool need_cp = false; 110 bool need_cp = false;
129 struct writeback_control wbc = { 111 struct writeback_control wbc = {
@@ -135,9 +117,12 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
135 if (inode->i_sb->s_flags & MS_RDONLY) 117 if (inode->i_sb->s_flags & MS_RDONLY)
136 return 0; 118 return 0;
137 119
120 trace_f2fs_sync_file_enter(inode);
138 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 121 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
139 if (ret) 122 if (ret) {
123 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
140 return ret; 124 return ret;
125 }
141 126
142 /* guarantee free sections for fsync */ 127 /* guarantee free sections for fsync */
143 f2fs_balance_fs(sbi); 128 f2fs_balance_fs(sbi);
@@ -147,28 +132,18 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
147 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 132 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
148 goto out; 133 goto out;
149 134
150 mutex_lock(&sbi->cp_mutex);
151 cur_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
152 mutex_unlock(&sbi->cp_mutex);
153
154 if (F2FS_I(inode)->data_version != cur_version &&
155 !(inode->i_state & I_DIRTY))
156 goto out;
157 F2FS_I(inode)->data_version--;
158
159 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) 135 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
160 need_cp = true; 136 need_cp = true;
161 else if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP)) 137 else if (is_cp_file(inode))
162 need_cp = true; 138 need_cp = true;
163 else if (!space_for_roll_forward(sbi)) 139 else if (!space_for_roll_forward(sbi))
164 need_cp = true; 140 need_cp = true;
165 else if (need_to_sync_dir(sbi, inode)) 141 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
166 need_cp = true; 142 need_cp = true;
167 143
168 if (need_cp) { 144 if (need_cp) {
169 /* all the dirty node pages should be flushed for POR */ 145 /* all the dirty node pages should be flushed for POR */
170 ret = f2fs_sync_fs(inode->i_sb, 1); 146 ret = f2fs_sync_fs(inode->i_sb, 1);
171 clear_inode_flag(F2FS_I(inode), FI_NEED_CP);
172 } else { 147 } else {
173 /* if there is no written node page, write its inode page */ 148 /* if there is no written node page, write its inode page */
174 while (!sync_node_pages(sbi, inode->i_ino, &wbc)) { 149 while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
@@ -178,9 +153,11 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
178 } 153 }
179 filemap_fdatawait_range(sbi->node_inode->i_mapping, 154 filemap_fdatawait_range(sbi->node_inode->i_mapping,
180 0, LONG_MAX); 155 0, LONG_MAX);
156 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
181 } 157 }
182out: 158out:
183 mutex_unlock(&inode->i_mutex); 159 mutex_unlock(&inode->i_mutex);
160 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
184 return ret; 161 return ret;
185} 162}
186 163
@@ -216,6 +193,9 @@ static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
216 sync_inode_page(dn); 193 sync_inode_page(dn);
217 } 194 }
218 dn->ofs_in_node = ofs; 195 dn->ofs_in_node = ofs;
196
197 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
198 dn->ofs_in_node, nr_free);
219 return nr_free; 199 return nr_free;
220} 200}
221 201
@@ -232,11 +212,15 @@ static void truncate_partial_data_page(struct inode *inode, u64 from)
232 if (!offset) 212 if (!offset)
233 return; 213 return;
234 214
235 page = find_data_page(inode, from >> PAGE_CACHE_SHIFT); 215 page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
236 if (IS_ERR(page)) 216 if (IS_ERR(page))
237 return; 217 return;
238 218
239 lock_page(page); 219 lock_page(page);
220 if (page->mapping != inode->i_mapping) {
221 f2fs_put_page(page, 1);
222 return;
223 }
240 wait_on_page_writeback(page); 224 wait_on_page_writeback(page);
241 zero_user(page, offset, PAGE_CACHE_SIZE - offset); 225 zero_user(page, offset, PAGE_CACHE_SIZE - offset);
242 set_page_dirty(page); 226 set_page_dirty(page);
@@ -249,20 +233,22 @@ static int truncate_blocks(struct inode *inode, u64 from)
249 unsigned int blocksize = inode->i_sb->s_blocksize; 233 unsigned int blocksize = inode->i_sb->s_blocksize;
250 struct dnode_of_data dn; 234 struct dnode_of_data dn;
251 pgoff_t free_from; 235 pgoff_t free_from;
252 int count = 0; 236 int count = 0, ilock = -1;
253 int err; 237 int err;
254 238
239 trace_f2fs_truncate_blocks_enter(inode, from);
240
255 free_from = (pgoff_t) 241 free_from = (pgoff_t)
256 ((from + blocksize - 1) >> (sbi->log_blocksize)); 242 ((from + blocksize - 1) >> (sbi->log_blocksize));
257 243
258 mutex_lock_op(sbi, DATA_TRUNC); 244 ilock = mutex_lock_op(sbi);
259
260 set_new_dnode(&dn, inode, NULL, NULL, 0); 245 set_new_dnode(&dn, inode, NULL, NULL, 0);
261 err = get_dnode_of_data(&dn, free_from, RDONLY_NODE); 246 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
262 if (err) { 247 if (err) {
263 if (err == -ENOENT) 248 if (err == -ENOENT)
264 goto free_next; 249 goto free_next;
265 mutex_unlock_op(sbi, DATA_TRUNC); 250 mutex_unlock_op(sbi, ilock);
251 trace_f2fs_truncate_blocks_exit(inode, err);
266 return err; 252 return err;
267 } 253 }
268 254
@@ -273,6 +259,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
273 259
274 count -= dn.ofs_in_node; 260 count -= dn.ofs_in_node;
275 BUG_ON(count < 0); 261 BUG_ON(count < 0);
262
276 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 263 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
277 truncate_data_blocks_range(&dn, count); 264 truncate_data_blocks_range(&dn, count);
278 free_from += count; 265 free_from += count;
@@ -281,11 +268,12 @@ static int truncate_blocks(struct inode *inode, u64 from)
281 f2fs_put_dnode(&dn); 268 f2fs_put_dnode(&dn);
282free_next: 269free_next:
283 err = truncate_inode_blocks(inode, free_from); 270 err = truncate_inode_blocks(inode, free_from);
284 mutex_unlock_op(sbi, DATA_TRUNC); 271 mutex_unlock_op(sbi, ilock);
285 272
286 /* lastly zero out the first data page */ 273 /* lastly zero out the first data page */
287 truncate_partial_data_page(inode, from); 274 truncate_partial_data_page(inode, from);
288 275
276 trace_f2fs_truncate_blocks_exit(inode, err);
289 return err; 277 return err;
290} 278}
291 279
@@ -295,6 +283,8 @@ void f2fs_truncate(struct inode *inode)
295 S_ISLNK(inode->i_mode))) 283 S_ISLNK(inode->i_mode)))
296 return; 284 return;
297 285
286 trace_f2fs_truncate(inode);
287
298 if (!truncate_blocks(inode, i_size_read(inode))) { 288 if (!truncate_blocks(inode, i_size_read(inode))) {
299 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 289 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
300 mark_inode_dirty(inode); 290 mark_inode_dirty(inode);
@@ -389,15 +379,16 @@ static void fill_zero(struct inode *inode, pgoff_t index,
389{ 379{
390 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 380 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
391 struct page *page; 381 struct page *page;
382 int ilock;
392 383
393 if (!len) 384 if (!len)
394 return; 385 return;
395 386
396 f2fs_balance_fs(sbi); 387 f2fs_balance_fs(sbi);
397 388
398 mutex_lock_op(sbi, DATA_NEW); 389 ilock = mutex_lock_op(sbi);
399 page = get_new_data_page(inode, index, false); 390 page = get_new_data_page(inode, index, false);
400 mutex_unlock_op(sbi, DATA_NEW); 391 mutex_unlock_op(sbi, ilock);
401 392
402 if (!IS_ERR(page)) { 393 if (!IS_ERR(page)) {
403 wait_on_page_writeback(page); 394 wait_on_page_writeback(page);
@@ -414,15 +405,10 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
414 405
415 for (index = pg_start; index < pg_end; index++) { 406 for (index = pg_start; index < pg_end; index++) {
416 struct dnode_of_data dn; 407 struct dnode_of_data dn;
417 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
418
419 f2fs_balance_fs(sbi);
420 408
421 mutex_lock_op(sbi, DATA_TRUNC);
422 set_new_dnode(&dn, inode, NULL, NULL, 0); 409 set_new_dnode(&dn, inode, NULL, NULL, 0);
423 err = get_dnode_of_data(&dn, index, RDONLY_NODE); 410 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
424 if (err) { 411 if (err) {
425 mutex_unlock_op(sbi, DATA_TRUNC);
426 if (err == -ENOENT) 412 if (err == -ENOENT)
427 continue; 413 continue;
428 return err; 414 return err;
@@ -431,7 +417,6 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
431 if (dn.data_blkaddr != NULL_ADDR) 417 if (dn.data_blkaddr != NULL_ADDR)
432 truncate_data_blocks_range(&dn, 1); 418 truncate_data_blocks_range(&dn, 1);
433 f2fs_put_dnode(&dn); 419 f2fs_put_dnode(&dn);
434 mutex_unlock_op(sbi, DATA_TRUNC);
435 } 420 }
436 return 0; 421 return 0;
437} 422}
@@ -461,12 +446,19 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
461 if (pg_start < pg_end) { 446 if (pg_start < pg_end) {
462 struct address_space *mapping = inode->i_mapping; 447 struct address_space *mapping = inode->i_mapping;
463 loff_t blk_start, blk_end; 448 loff_t blk_start, blk_end;
449 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
450 int ilock;
451
452 f2fs_balance_fs(sbi);
464 453
465 blk_start = pg_start << PAGE_CACHE_SHIFT; 454 blk_start = pg_start << PAGE_CACHE_SHIFT;
466 blk_end = pg_end << PAGE_CACHE_SHIFT; 455 blk_end = pg_end << PAGE_CACHE_SHIFT;
467 truncate_inode_pages_range(mapping, blk_start, 456 truncate_inode_pages_range(mapping, blk_start,
468 blk_end - 1); 457 blk_end - 1);
458
459 ilock = mutex_lock_op(sbi);
469 ret = truncate_hole(inode, pg_start, pg_end); 460 ret = truncate_hole(inode, pg_start, pg_end);
461 mutex_unlock_op(sbi, ilock);
470 } 462 }
471 } 463 }
472 464
@@ -500,13 +492,13 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
500 492
501 for (index = pg_start; index <= pg_end; index++) { 493 for (index = pg_start; index <= pg_end; index++) {
502 struct dnode_of_data dn; 494 struct dnode_of_data dn;
495 int ilock;
503 496
504 mutex_lock_op(sbi, DATA_NEW); 497 ilock = mutex_lock_op(sbi);
505
506 set_new_dnode(&dn, inode, NULL, NULL, 0); 498 set_new_dnode(&dn, inode, NULL, NULL, 0);
507 ret = get_dnode_of_data(&dn, index, 0); 499 ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
508 if (ret) { 500 if (ret) {
509 mutex_unlock_op(sbi, DATA_NEW); 501 mutex_unlock_op(sbi, ilock);
510 break; 502 break;
511 } 503 }
512 504
@@ -514,13 +506,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
514 ret = reserve_new_block(&dn); 506 ret = reserve_new_block(&dn);
515 if (ret) { 507 if (ret) {
516 f2fs_put_dnode(&dn); 508 f2fs_put_dnode(&dn);
517 mutex_unlock_op(sbi, DATA_NEW); 509 mutex_unlock_op(sbi, ilock);
518 break; 510 break;
519 } 511 }
520 } 512 }
521 f2fs_put_dnode(&dn); 513 f2fs_put_dnode(&dn);
522 514 mutex_unlock_op(sbi, ilock);
523 mutex_unlock_op(sbi, DATA_NEW);
524 515
525 if (pg_start == pg_end) 516 if (pg_start == pg_end)
526 new_size = offset + len; 517 new_size = offset + len;
@@ -559,6 +550,7 @@ static long f2fs_fallocate(struct file *file, int mode,
559 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 550 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
560 mark_inode_dirty(inode); 551 mark_inode_dirty(inode);
561 } 552 }
553 trace_f2fs_fallocate(inode, mode, offset, len, ret);
562 return ret; 554 return ret;
563} 555}
564 556