aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c218
1 files changed, 147 insertions, 71 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7ec697b37f19..985ed023a750 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -22,6 +22,7 @@
22#include "f2fs.h" 22#include "f2fs.h"
23#include "node.h" 23#include "node.h"
24#include "segment.h" 24#include "segment.h"
25#include "trace.h"
25#include <trace/events/f2fs.h> 26#include <trace/events/f2fs.h>
26 27
27static void f2fs_read_end_io(struct bio *bio, int err) 28static void f2fs_read_end_io(struct bio *bio, int err)
@@ -95,11 +96,9 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
95 return; 96 return;
96 97
97 if (is_read_io(fio->rw)) 98 if (is_read_io(fio->rw))
98 trace_f2fs_submit_read_bio(io->sbi->sb, fio->rw, 99 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
99 fio->type, io->bio);
100 else 100 else
101 trace_f2fs_submit_write_bio(io->sbi->sb, fio->rw, 101 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
102 fio->type, io->bio);
103 102
104 submit_bio(fio->rw, io->bio); 103 submit_bio(fio->rw, io->bio);
105 io->bio = NULL; 104 io->bio = NULL;
@@ -132,14 +131,15 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
132 * Return unlocked page. 131 * Return unlocked page.
133 */ 132 */
134int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, 133int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
135 block_t blk_addr, int rw) 134 struct f2fs_io_info *fio)
136{ 135{
137 struct bio *bio; 136 struct bio *bio;
138 137
139 trace_f2fs_submit_page_bio(page, blk_addr, rw); 138 trace_f2fs_submit_page_bio(page, fio);
139 f2fs_trace_ios(page, fio, 0);
140 140
141 /* Allocate a new bio */ 141 /* Allocate a new bio */
142 bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw)); 142 bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw));
143 143
144 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 144 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
145 bio_put(bio); 145 bio_put(bio);
@@ -147,12 +147,12 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
147 return -EFAULT; 147 return -EFAULT;
148 } 148 }
149 149
150 submit_bio(rw, bio); 150 submit_bio(fio->rw, bio);
151 return 0; 151 return 0;
152} 152}
153 153
154void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, 154void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
155 block_t blk_addr, struct f2fs_io_info *fio) 155 struct f2fs_io_info *fio)
156{ 156{
157 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 157 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
158 struct f2fs_bio_info *io; 158 struct f2fs_bio_info *io;
@@ -160,21 +160,21 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
160 160
161 io = is_read ? &sbi->read_io : &sbi->write_io[btype]; 161 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
162 162
163 verify_block_addr(sbi, blk_addr); 163 verify_block_addr(sbi, fio->blk_addr);
164 164
165 down_write(&io->io_rwsem); 165 down_write(&io->io_rwsem);
166 166
167 if (!is_read) 167 if (!is_read)
168 inc_page_count(sbi, F2FS_WRITEBACK); 168 inc_page_count(sbi, F2FS_WRITEBACK);
169 169
170 if (io->bio && (io->last_block_in_bio != blk_addr - 1 || 170 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
171 io->fio.rw != fio->rw)) 171 io->fio.rw != fio->rw))
172 __submit_merged_bio(io); 172 __submit_merged_bio(io);
173alloc_new: 173alloc_new:
174 if (io->bio == NULL) { 174 if (io->bio == NULL) {
175 int bio_blocks = MAX_BIO_BLOCKS(sbi); 175 int bio_blocks = MAX_BIO_BLOCKS(sbi);
176 176
177 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read); 177 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
178 io->fio = *fio; 178 io->fio = *fio;
179 } 179 }
180 180
@@ -184,10 +184,11 @@ alloc_new:
184 goto alloc_new; 184 goto alloc_new;
185 } 185 }
186 186
187 io->last_block_in_bio = blk_addr; 187 io->last_block_in_bio = fio->blk_addr;
188 f2fs_trace_ios(page, fio, 0);
188 189
189 up_write(&io->io_rwsem); 190 up_write(&io->io_rwsem);
190 trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr); 191 trace_f2fs_submit_page_mbio(page, fio);
191} 192}
192 193
193/* 194/*
@@ -196,7 +197,7 @@ alloc_new:
196 * ->node_page 197 * ->node_page
197 * update block addresses in the node page 198 * update block addresses in the node page
198 */ 199 */
199static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr) 200static void __set_data_blkaddr(struct dnode_of_data *dn)
200{ 201{
201 struct f2fs_node *rn; 202 struct f2fs_node *rn;
202 __le32 *addr_array; 203 __le32 *addr_array;
@@ -209,7 +210,7 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
209 210
210 /* Get physical address of data block */ 211 /* Get physical address of data block */
211 addr_array = blkaddr_in_node(rn); 212 addr_array = blkaddr_in_node(rn);
212 addr_array[ofs_in_node] = cpu_to_le32(new_addr); 213 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
213 set_page_dirty(node_page); 214 set_page_dirty(node_page);
214} 215}
215 216
@@ -224,8 +225,8 @@ int reserve_new_block(struct dnode_of_data *dn)
224 225
225 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 226 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
226 227
227 __set_data_blkaddr(dn, NEW_ADDR);
228 dn->data_blkaddr = NEW_ADDR; 228 dn->data_blkaddr = NEW_ADDR;
229 __set_data_blkaddr(dn);
229 mark_inode_dirty(dn->inode); 230 mark_inode_dirty(dn->inode);
230 sync_inode_page(dn); 231 sync_inode_page(dn);
231 return 0; 232 return 0;
@@ -273,7 +274,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
273 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 274 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
274 size_t count; 275 size_t count;
275 276
276 clear_buffer_new(bh_result); 277 set_buffer_new(bh_result);
277 map_bh(bh_result, inode->i_sb, 278 map_bh(bh_result, inode->i_sb,
278 start_blkaddr + pgofs - start_fofs); 279 start_blkaddr + pgofs - start_fofs);
279 count = end_fofs - pgofs + 1; 280 count = end_fofs - pgofs + 1;
@@ -290,23 +291,24 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
290 return 0; 291 return 0;
291} 292}
292 293
293void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) 294void update_extent_cache(struct dnode_of_data *dn)
294{ 295{
295 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 296 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
296 pgoff_t fofs, start_fofs, end_fofs; 297 pgoff_t fofs, start_fofs, end_fofs;
297 block_t start_blkaddr, end_blkaddr; 298 block_t start_blkaddr, end_blkaddr;
298 int need_update = true; 299 int need_update = true;
299 300
300 f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR); 301 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
301 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
302 dn->ofs_in_node;
303 302
304 /* Update the page address in the parent node */ 303 /* Update the page address in the parent node */
305 __set_data_blkaddr(dn, blk_addr); 304 __set_data_blkaddr(dn);
306 305
307 if (is_inode_flag_set(fi, FI_NO_EXTENT)) 306 if (is_inode_flag_set(fi, FI_NO_EXTENT))
308 return; 307 return;
309 308
309 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
310 dn->ofs_in_node;
311
310 write_lock(&fi->ext.ext_lock); 312 write_lock(&fi->ext.ext_lock);
311 313
312 start_fofs = fi->ext.fofs; 314 start_fofs = fi->ext.fofs;
@@ -320,16 +322,16 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
320 322
321 /* Initial extent */ 323 /* Initial extent */
322 if (fi->ext.len == 0) { 324 if (fi->ext.len == 0) {
323 if (blk_addr != NULL_ADDR) { 325 if (dn->data_blkaddr != NULL_ADDR) {
324 fi->ext.fofs = fofs; 326 fi->ext.fofs = fofs;
325 fi->ext.blk_addr = blk_addr; 327 fi->ext.blk_addr = dn->data_blkaddr;
326 fi->ext.len = 1; 328 fi->ext.len = 1;
327 } 329 }
328 goto end_update; 330 goto end_update;
329 } 331 }
330 332
331 /* Front merge */ 333 /* Front merge */
332 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) { 334 if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) {
333 fi->ext.fofs--; 335 fi->ext.fofs--;
334 fi->ext.blk_addr--; 336 fi->ext.blk_addr--;
335 fi->ext.len++; 337 fi->ext.len++;
@@ -337,7 +339,7 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
337 } 339 }
338 340
339 /* Back merge */ 341 /* Back merge */
340 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) { 342 if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) {
341 fi->ext.len++; 343 fi->ext.len++;
342 goto end_update; 344 goto end_update;
343 } 345 }
@@ -376,6 +378,10 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
376 struct dnode_of_data dn; 378 struct dnode_of_data dn;
377 struct page *page; 379 struct page *page;
378 int err; 380 int err;
381 struct f2fs_io_info fio = {
382 .type = DATA,
383 .rw = sync ? READ_SYNC : READA,
384 };
379 385
380 page = find_get_page(mapping, index); 386 page = find_get_page(mapping, index);
381 if (page && PageUptodate(page)) 387 if (page && PageUptodate(page))
@@ -404,8 +410,8 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
404 return page; 410 return page;
405 } 411 }
406 412
407 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, dn.data_blkaddr, 413 fio.blk_addr = dn.data_blkaddr;
408 sync ? READ_SYNC : READA); 414 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
409 if (err) 415 if (err)
410 return ERR_PTR(err); 416 return ERR_PTR(err);
411 417
@@ -430,7 +436,10 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
430 struct dnode_of_data dn; 436 struct dnode_of_data dn;
431 struct page *page; 437 struct page *page;
432 int err; 438 int err;
433 439 struct f2fs_io_info fio = {
440 .type = DATA,
441 .rw = READ_SYNC,
442 };
434repeat: 443repeat:
435 page = grab_cache_page(mapping, index); 444 page = grab_cache_page(mapping, index);
436 if (!page) 445 if (!page)
@@ -464,8 +473,8 @@ repeat:
464 return page; 473 return page;
465 } 474 }
466 475
467 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, 476 fio.blk_addr = dn.data_blkaddr;
468 dn.data_blkaddr, READ_SYNC); 477 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
469 if (err) 478 if (err)
470 return ERR_PTR(err); 479 return ERR_PTR(err);
471 480
@@ -515,8 +524,12 @@ repeat:
515 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 524 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
516 SetPageUptodate(page); 525 SetPageUptodate(page);
517 } else { 526 } else {
518 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, 527 struct f2fs_io_info fio = {
519 dn.data_blkaddr, READ_SYNC); 528 .type = DATA,
529 .rw = READ_SYNC,
530 .blk_addr = dn.data_blkaddr,
531 };
532 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
520 if (err) 533 if (err)
521 goto put_err; 534 goto put_err;
522 535
@@ -550,30 +563,25 @@ static int __allocate_data_block(struct dnode_of_data *dn)
550 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 563 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
551 struct f2fs_inode_info *fi = F2FS_I(dn->inode); 564 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
552 struct f2fs_summary sum; 565 struct f2fs_summary sum;
553 block_t new_blkaddr;
554 struct node_info ni; 566 struct node_info ni;
567 int seg = CURSEG_WARM_DATA;
555 pgoff_t fofs; 568 pgoff_t fofs;
556 int type;
557 569
558 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 570 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
559 return -EPERM; 571 return -EPERM;
560 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 572 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
561 return -ENOSPC; 573 return -ENOSPC;
562 574
563 __set_data_blkaddr(dn, NEW_ADDR);
564 dn->data_blkaddr = NEW_ADDR;
565
566 get_node_info(sbi, dn->nid, &ni); 575 get_node_info(sbi, dn->nid, &ni);
567 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 576 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
568 577
569 type = CURSEG_WARM_DATA; 578 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
579 seg = CURSEG_DIRECT_IO;
570 580
571 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type); 581 allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
572 582
573 /* direct IO doesn't use extent cache to maximize the performance */ 583 /* direct IO doesn't use extent cache to maximize the performance */
574 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); 584 __set_data_blkaddr(dn);
575 update_extent_cache(new_blkaddr, dn);
576 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
577 585
578 /* update i_size */ 586 /* update i_size */
579 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 587 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
@@ -581,10 +589,59 @@ static int __allocate_data_block(struct dnode_of_data *dn)
581 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) 589 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
582 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); 590 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
583 591
584 dn->data_blkaddr = new_blkaddr;
585 return 0; 592 return 0;
586} 593}
587 594
595static void __allocate_data_blocks(struct inode *inode, loff_t offset,
596 size_t count)
597{
598 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
599 struct dnode_of_data dn;
600 u64 start = F2FS_BYTES_TO_BLK(offset);
601 u64 len = F2FS_BYTES_TO_BLK(count);
602 bool allocated;
603 u64 end_offset;
604
605 while (len) {
606 f2fs_balance_fs(sbi);
607 f2fs_lock_op(sbi);
608
609 /* When reading holes, we need its node page */
610 set_new_dnode(&dn, inode, NULL, NULL, 0);
611 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
612 goto out;
613
614 allocated = false;
615 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
616
617 while (dn.ofs_in_node < end_offset && len) {
618 if (dn.data_blkaddr == NULL_ADDR) {
619 if (__allocate_data_block(&dn))
620 goto sync_out;
621 allocated = true;
622 }
623 len--;
624 start++;
625 dn.ofs_in_node++;
626 }
627
628 if (allocated)
629 sync_inode_page(&dn);
630
631 f2fs_put_dnode(&dn);
632 f2fs_unlock_op(sbi);
633 }
634 return;
635
636sync_out:
637 if (allocated)
638 sync_inode_page(&dn);
639 f2fs_put_dnode(&dn);
640out:
641 f2fs_unlock_op(sbi);
642 return;
643}
644
588/* 645/*
589 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. 646 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
590 * If original data blocks are allocated, then give them to blockdev. 647 * If original data blocks are allocated, then give them to blockdev.
@@ -610,10 +667,8 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
610 if (check_extent_cache(inode, pgofs, bh_result)) 667 if (check_extent_cache(inode, pgofs, bh_result))
611 goto out; 668 goto out;
612 669
613 if (create) { 670 if (create)
614 f2fs_balance_fs(F2FS_I_SB(inode));
615 f2fs_lock_op(F2FS_I_SB(inode)); 671 f2fs_lock_op(F2FS_I_SB(inode));
616 }
617 672
618 /* When reading holes, we need its node page */ 673 /* When reading holes, we need its node page */
619 set_new_dnode(&dn, inode, NULL, NULL, 0); 674 set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -627,12 +682,14 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
627 goto put_out; 682 goto put_out;
628 683
629 if (dn.data_blkaddr != NULL_ADDR) { 684 if (dn.data_blkaddr != NULL_ADDR) {
685 set_buffer_new(bh_result);
630 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 686 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
631 } else if (create) { 687 } else if (create) {
632 err = __allocate_data_block(&dn); 688 err = __allocate_data_block(&dn);
633 if (err) 689 if (err)
634 goto put_out; 690 goto put_out;
635 allocated = true; 691 allocated = true;
692 set_buffer_new(bh_result);
636 map_bh(bh_result, inode->i_sb, dn.data_blkaddr); 693 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
637 } else { 694 } else {
638 goto put_out; 695 goto put_out;
@@ -745,7 +802,6 @@ static int f2fs_read_data_pages(struct file *file,
745int do_write_data_page(struct page *page, struct f2fs_io_info *fio) 802int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
746{ 803{
747 struct inode *inode = page->mapping->host; 804 struct inode *inode = page->mapping->host;
748 block_t old_blkaddr, new_blkaddr;
749 struct dnode_of_data dn; 805 struct dnode_of_data dn;
750 int err = 0; 806 int err = 0;
751 807
@@ -754,10 +810,10 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
754 if (err) 810 if (err)
755 return err; 811 return err;
756 812
757 old_blkaddr = dn.data_blkaddr; 813 fio->blk_addr = dn.data_blkaddr;
758 814
759 /* This page is already truncated */ 815 /* This page is already truncated */
760 if (old_blkaddr == NULL_ADDR) 816 if (fio->blk_addr == NULL_ADDR)
761 goto out_writepage; 817 goto out_writepage;
762 818
763 set_page_writeback(page); 819 set_page_writeback(page);
@@ -766,14 +822,14 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
766 * If current allocation needs SSR, 822 * If current allocation needs SSR,
767 * it had better in-place writes for updated data. 823 * it had better in-place writes for updated data.
768 */ 824 */
769 if (unlikely(old_blkaddr != NEW_ADDR && 825 if (unlikely(fio->blk_addr != NEW_ADDR &&
770 !is_cold_data(page) && 826 !is_cold_data(page) &&
771 need_inplace_update(inode))) { 827 need_inplace_update(inode))) {
772 rewrite_data_page(page, old_blkaddr, fio); 828 rewrite_data_page(page, fio);
773 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); 829 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
774 } else { 830 } else {
775 write_data_page(page, &dn, &new_blkaddr, fio); 831 write_data_page(page, &dn, fio);
776 update_extent_cache(new_blkaddr, &dn); 832 update_extent_cache(&dn);
777 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 833 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
778 } 834 }
779out_writepage: 835out_writepage:
@@ -812,7 +868,12 @@ static int f2fs_write_data_page(struct page *page,
812 868
813 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 869 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
814write: 870write:
815 if (unlikely(sbi->por_doing)) 871 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
872 goto redirty_out;
873 if (f2fs_is_drop_cache(inode))
874 goto out;
875 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
876 available_free_memory(sbi, BASE_CHECK))
816 goto redirty_out; 877 goto redirty_out;
817 878
818 /* Dentry blocks are controlled by checkpoint */ 879 /* Dentry blocks are controlled by checkpoint */
@@ -826,7 +887,6 @@ write:
826 /* we should bypass data pages to proceed the kworkder jobs */ 887 /* we should bypass data pages to proceed the kworkder jobs */
827 if (unlikely(f2fs_cp_error(sbi))) { 888 if (unlikely(f2fs_cp_error(sbi))) {
828 SetPageError(page); 889 SetPageError(page);
829 unlock_page(page);
830 goto out; 890 goto out;
831 } 891 }
832 892
@@ -1002,8 +1062,12 @@ put_next:
1002 if (dn.data_blkaddr == NEW_ADDR) { 1062 if (dn.data_blkaddr == NEW_ADDR) {
1003 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1063 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1004 } else { 1064 } else {
1005 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, 1065 struct f2fs_io_info fio = {
1006 READ_SYNC); 1066 .type = DATA,
1067 .rw = READ_SYNC,
1068 .blk_addr = dn.data_blkaddr,
1069 };
1070 err = f2fs_submit_page_bio(sbi, page, &fio);
1007 if (err) 1071 if (err)
1008 goto fail; 1072 goto fail;
1009 1073
@@ -1092,6 +1156,9 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
1092 1156
1093 trace_f2fs_direct_IO_enter(inode, offset, count, rw); 1157 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1094 1158
1159 if (rw & WRITE)
1160 __allocate_data_blocks(inode, offset, count);
1161
1095 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block); 1162 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
1096 if (err < 0 && (rw & WRITE)) 1163 if (err < 0 && (rw & WRITE))
1097 f2fs_write_failed(mapping, offset + count); 1164 f2fs_write_failed(mapping, offset + count);
@@ -1101,24 +1168,33 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
1101 return err; 1168 return err;
1102} 1169}
1103 1170
1104static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, 1171void f2fs_invalidate_page(struct page *page, unsigned int offset,
1105 unsigned int length) 1172 unsigned int length)
1106{ 1173{
1107 struct inode *inode = page->mapping->host; 1174 struct inode *inode = page->mapping->host;
1175 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1108 1176
1109 if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE) 1177 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1178 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
1110 return; 1179 return;
1111 1180
1112 if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) 1181 if (PageDirty(page)) {
1113 invalidate_inmem_page(inode, page); 1182 if (inode->i_ino == F2FS_META_INO(sbi))
1114 1183 dec_page_count(sbi, F2FS_DIRTY_META);
1115 if (PageDirty(page)) 1184 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1116 inode_dec_dirty_pages(inode); 1185 dec_page_count(sbi, F2FS_DIRTY_NODES);
1186 else
1187 inode_dec_dirty_pages(inode);
1188 }
1117 ClearPagePrivate(page); 1189 ClearPagePrivate(page);
1118} 1190}
1119 1191
1120static int f2fs_release_data_page(struct page *page, gfp_t wait) 1192int f2fs_release_page(struct page *page, gfp_t wait)
1121{ 1193{
1194 /* If this is dirty page, keep PagePrivate */
1195 if (PageDirty(page))
1196 return 0;
1197
1122 ClearPagePrivate(page); 1198 ClearPagePrivate(page);
1123 return 1; 1199 return 1;
1124} 1200}
@@ -1132,7 +1208,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
1132 1208
1133 SetPageUptodate(page); 1209 SetPageUptodate(page);
1134 1210
1135 if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) { 1211 if (f2fs_is_atomic_file(inode)) {
1136 register_inmem_page(inode, page); 1212 register_inmem_page(inode, page);
1137 return 1; 1213 return 1;
1138 } 1214 }
@@ -1168,8 +1244,8 @@ const struct address_space_operations f2fs_dblock_aops = {
1168 .write_begin = f2fs_write_begin, 1244 .write_begin = f2fs_write_begin,
1169 .write_end = f2fs_write_end, 1245 .write_end = f2fs_write_end,
1170 .set_page_dirty = f2fs_set_data_page_dirty, 1246 .set_page_dirty = f2fs_set_data_page_dirty,
1171 .invalidatepage = f2fs_invalidate_data_page, 1247 .invalidatepage = f2fs_invalidate_page,
1172 .releasepage = f2fs_release_data_page, 1248 .releasepage = f2fs_release_page,
1173 .direct_IO = f2fs_direct_IO, 1249 .direct_IO = f2fs_direct_IO,
1174 .bmap = f2fs_bmap, 1250 .bmap = f2fs_bmap,
1175}; 1251};