diff options
Diffstat (limited to 'fs/f2fs/file.c')
-rw-r--r-- | fs/f2fs/file.c | 46 |
1 files changed, 22 insertions, 24 deletions
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 155b362dad63..07be88ddb9f8 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c | |||
@@ -34,19 +34,18 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, | |||
34 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 34 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
35 | block_t old_blk_addr; | 35 | block_t old_blk_addr; |
36 | struct dnode_of_data dn; | 36 | struct dnode_of_data dn; |
37 | int err; | 37 | int err, ilock; |
38 | 38 | ||
39 | f2fs_balance_fs(sbi); | 39 | f2fs_balance_fs(sbi); |
40 | 40 | ||
41 | sb_start_pagefault(inode->i_sb); | 41 | sb_start_pagefault(inode->i_sb); |
42 | 42 | ||
43 | mutex_lock_op(sbi, DATA_NEW); | ||
44 | |||
45 | /* block allocation */ | 43 | /* block allocation */ |
44 | ilock = mutex_lock_op(sbi); | ||
46 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 45 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
47 | err = get_dnode_of_data(&dn, page->index, ALLOC_NODE); | 46 | err = get_dnode_of_data(&dn, page->index, ALLOC_NODE); |
48 | if (err) { | 47 | if (err) { |
49 | mutex_unlock_op(sbi, DATA_NEW); | 48 | mutex_unlock_op(sbi, ilock); |
50 | goto out; | 49 | goto out; |
51 | } | 50 | } |
52 | 51 | ||
@@ -56,13 +55,12 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, | |||
56 | err = reserve_new_block(&dn); | 55 | err = reserve_new_block(&dn); |
57 | if (err) { | 56 | if (err) { |
58 | f2fs_put_dnode(&dn); | 57 | f2fs_put_dnode(&dn); |
59 | mutex_unlock_op(sbi, DATA_NEW); | 58 | mutex_unlock_op(sbi, ilock); |
60 | goto out; | 59 | goto out; |
61 | } | 60 | } |
62 | } | 61 | } |
63 | f2fs_put_dnode(&dn); | 62 | f2fs_put_dnode(&dn); |
64 | 63 | mutex_unlock_op(sbi, ilock); | |
65 | mutex_unlock_op(sbi, DATA_NEW); | ||
66 | 64 | ||
67 | lock_page(page); | 65 | lock_page(page); |
68 | if (page->mapping != inode->i_mapping || | 66 | if (page->mapping != inode->i_mapping || |
@@ -223,20 +221,19 @@ static int truncate_blocks(struct inode *inode, u64 from) | |||
223 | unsigned int blocksize = inode->i_sb->s_blocksize; | 221 | unsigned int blocksize = inode->i_sb->s_blocksize; |
224 | struct dnode_of_data dn; | 222 | struct dnode_of_data dn; |
225 | pgoff_t free_from; | 223 | pgoff_t free_from; |
226 | int count = 0; | 224 | int count = 0, ilock = -1; |
227 | int err; | 225 | int err; |
228 | 226 | ||
229 | free_from = (pgoff_t) | 227 | free_from = (pgoff_t) |
230 | ((from + blocksize - 1) >> (sbi->log_blocksize)); | 228 | ((from + blocksize - 1) >> (sbi->log_blocksize)); |
231 | 229 | ||
232 | mutex_lock_op(sbi, DATA_TRUNC); | 230 | ilock = mutex_lock_op(sbi); |
233 | |||
234 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 231 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
235 | err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); | 232 | err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); |
236 | if (err) { | 233 | if (err) { |
237 | if (err == -ENOENT) | 234 | if (err == -ENOENT) |
238 | goto free_next; | 235 | goto free_next; |
239 | mutex_unlock_op(sbi, DATA_TRUNC); | 236 | mutex_unlock_op(sbi, ilock); |
240 | return err; | 237 | return err; |
241 | } | 238 | } |
242 | 239 | ||
@@ -247,6 +244,7 @@ static int truncate_blocks(struct inode *inode, u64 from) | |||
247 | 244 | ||
248 | count -= dn.ofs_in_node; | 245 | count -= dn.ofs_in_node; |
249 | BUG_ON(count < 0); | 246 | BUG_ON(count < 0); |
247 | |||
250 | if (dn.ofs_in_node || IS_INODE(dn.node_page)) { | 248 | if (dn.ofs_in_node || IS_INODE(dn.node_page)) { |
251 | truncate_data_blocks_range(&dn, count); | 249 | truncate_data_blocks_range(&dn, count); |
252 | free_from += count; | 250 | free_from += count; |
@@ -255,7 +253,7 @@ static int truncate_blocks(struct inode *inode, u64 from) | |||
255 | f2fs_put_dnode(&dn); | 253 | f2fs_put_dnode(&dn); |
256 | free_next: | 254 | free_next: |
257 | err = truncate_inode_blocks(inode, free_from); | 255 | err = truncate_inode_blocks(inode, free_from); |
258 | mutex_unlock_op(sbi, DATA_TRUNC); | 256 | mutex_unlock_op(sbi, ilock); |
259 | 257 | ||
260 | /* lastly zero out the first data page */ | 258 | /* lastly zero out the first data page */ |
261 | truncate_partial_data_page(inode, from); | 259 | truncate_partial_data_page(inode, from); |
@@ -363,15 +361,16 @@ static void fill_zero(struct inode *inode, pgoff_t index, | |||
363 | { | 361 | { |
364 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 362 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
365 | struct page *page; | 363 | struct page *page; |
364 | int ilock; | ||
366 | 365 | ||
367 | if (!len) | 366 | if (!len) |
368 | return; | 367 | return; |
369 | 368 | ||
370 | f2fs_balance_fs(sbi); | 369 | f2fs_balance_fs(sbi); |
371 | 370 | ||
372 | mutex_lock_op(sbi, DATA_NEW); | 371 | ilock = mutex_lock_op(sbi); |
373 | page = get_new_data_page(inode, index, false); | 372 | page = get_new_data_page(inode, index, false); |
374 | mutex_unlock_op(sbi, DATA_NEW); | 373 | mutex_unlock_op(sbi, ilock); |
375 | 374 | ||
376 | if (!IS_ERR(page)) { | 375 | if (!IS_ERR(page)) { |
377 | wait_on_page_writeback(page); | 376 | wait_on_page_writeback(page); |
@@ -388,13 +387,10 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) | |||
388 | 387 | ||
389 | for (index = pg_start; index < pg_end; index++) { | 388 | for (index = pg_start; index < pg_end; index++) { |
390 | struct dnode_of_data dn; | 389 | struct dnode_of_data dn; |
391 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
392 | 390 | ||
393 | mutex_lock_op(sbi, DATA_TRUNC); | ||
394 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 391 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
395 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); | 392 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); |
396 | if (err) { | 393 | if (err) { |
397 | mutex_unlock_op(sbi, DATA_TRUNC); | ||
398 | if (err == -ENOENT) | 394 | if (err == -ENOENT) |
399 | continue; | 395 | continue; |
400 | return err; | 396 | return err; |
@@ -403,7 +399,6 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) | |||
403 | if (dn.data_blkaddr != NULL_ADDR) | 399 | if (dn.data_blkaddr != NULL_ADDR) |
404 | truncate_data_blocks_range(&dn, 1); | 400 | truncate_data_blocks_range(&dn, 1); |
405 | f2fs_put_dnode(&dn); | 401 | f2fs_put_dnode(&dn); |
406 | mutex_unlock_op(sbi, DATA_TRUNC); | ||
407 | } | 402 | } |
408 | return 0; | 403 | return 0; |
409 | } | 404 | } |
@@ -434,6 +429,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode) | |||
434 | struct address_space *mapping = inode->i_mapping; | 429 | struct address_space *mapping = inode->i_mapping; |
435 | loff_t blk_start, blk_end; | 430 | loff_t blk_start, blk_end; |
436 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 431 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
432 | int ilock; | ||
437 | 433 | ||
438 | f2fs_balance_fs(sbi); | 434 | f2fs_balance_fs(sbi); |
439 | 435 | ||
@@ -441,7 +437,10 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode) | |||
441 | blk_end = pg_end << PAGE_CACHE_SHIFT; | 437 | blk_end = pg_end << PAGE_CACHE_SHIFT; |
442 | truncate_inode_pages_range(mapping, blk_start, | 438 | truncate_inode_pages_range(mapping, blk_start, |
443 | blk_end - 1); | 439 | blk_end - 1); |
440 | |||
441 | ilock = mutex_lock_op(sbi); | ||
444 | ret = truncate_hole(inode, pg_start, pg_end); | 442 | ret = truncate_hole(inode, pg_start, pg_end); |
443 | mutex_unlock_op(sbi, ilock); | ||
445 | } | 444 | } |
446 | } | 445 | } |
447 | 446 | ||
@@ -475,13 +474,13 @@ static int expand_inode_data(struct inode *inode, loff_t offset, | |||
475 | 474 | ||
476 | for (index = pg_start; index <= pg_end; index++) { | 475 | for (index = pg_start; index <= pg_end; index++) { |
477 | struct dnode_of_data dn; | 476 | struct dnode_of_data dn; |
477 | int ilock; | ||
478 | 478 | ||
479 | mutex_lock_op(sbi, DATA_NEW); | 479 | ilock = mutex_lock_op(sbi); |
480 | |||
481 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 480 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
482 | ret = get_dnode_of_data(&dn, index, ALLOC_NODE); | 481 | ret = get_dnode_of_data(&dn, index, ALLOC_NODE); |
483 | if (ret) { | 482 | if (ret) { |
484 | mutex_unlock_op(sbi, DATA_NEW); | 483 | mutex_unlock_op(sbi, ilock); |
485 | break; | 484 | break; |
486 | } | 485 | } |
487 | 486 | ||
@@ -489,13 +488,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset, | |||
489 | ret = reserve_new_block(&dn); | 488 | ret = reserve_new_block(&dn); |
490 | if (ret) { | 489 | if (ret) { |
491 | f2fs_put_dnode(&dn); | 490 | f2fs_put_dnode(&dn); |
492 | mutex_unlock_op(sbi, DATA_NEW); | 491 | mutex_unlock_op(sbi, ilock); |
493 | break; | 492 | break; |
494 | } | 493 | } |
495 | } | 494 | } |
496 | f2fs_put_dnode(&dn); | 495 | f2fs_put_dnode(&dn); |
497 | 496 | mutex_unlock_op(sbi, ilock); | |
498 | mutex_unlock_op(sbi, DATA_NEW); | ||
499 | 497 | ||
500 | if (pg_start == pg_end) | 498 | if (pg_start == pg_end) |
501 | new_size = offset + len; | 499 | new_size = offset + len; |