aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c316
1 files changed, 206 insertions, 110 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c9a76f8c1028..4e2fc09f0e4f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -25,6 +25,204 @@
25#include <trace/events/f2fs.h> 25#include <trace/events/f2fs.h>
26 26
27/* 27/*
28 * Low-level block read/write IO operations.
29 */
30static struct bio *__bio_alloc(struct block_device *bdev, int npages)
31{
32 struct bio *bio;
33
34 /* No failure on bio allocation */
35 bio = bio_alloc(GFP_NOIO, npages);
36 bio->bi_bdev = bdev;
37 bio->bi_private = NULL;
38 return bio;
39}
40
41static void f2fs_read_end_io(struct bio *bio, int err)
42{
43 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
44 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
45
46 do {
47 struct page *page = bvec->bv_page;
48
49 if (--bvec >= bio->bi_io_vec)
50 prefetchw(&bvec->bv_page->flags);
51
52 if (uptodate) {
53 SetPageUptodate(page);
54 } else {
55 ClearPageUptodate(page);
56 SetPageError(page);
57 }
58 unlock_page(page);
59 } while (bvec >= bio->bi_io_vec);
60
61 bio_put(bio);
62}
63
64static void f2fs_write_end_io(struct bio *bio, int err)
65{
66 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
67 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
68 struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb);
69
70 do {
71 struct page *page = bvec->bv_page;
72
73 if (--bvec >= bio->bi_io_vec)
74 prefetchw(&bvec->bv_page->flags);
75
76 if (!uptodate) {
77 SetPageError(page);
78 set_bit(AS_EIO, &page->mapping->flags);
79 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
80 sbi->sb->s_flags |= MS_RDONLY;
81 }
82 end_page_writeback(page);
83 dec_page_count(sbi, F2FS_WRITEBACK);
84 } while (bvec >= bio->bi_io_vec);
85
86 if (bio->bi_private)
87 complete(bio->bi_private);
88
89 if (!get_pages(sbi, F2FS_WRITEBACK) &&
90 !list_empty(&sbi->cp_wait.task_list))
91 wake_up(&sbi->cp_wait);
92
93 bio_put(bio);
94}
95
96static void __submit_merged_bio(struct f2fs_sb_info *sbi,
97 struct f2fs_bio_info *io,
98 enum page_type type, bool sync, int rw)
99{
100 enum page_type btype = PAGE_TYPE_OF_BIO(type);
101
102 if (!io->bio)
103 return;
104
105 if (btype == META)
106 rw |= REQ_META;
107
108 if (is_read_io(rw)) {
109 if (sync)
110 rw |= READ_SYNC;
111 submit_bio(rw, io->bio);
112 trace_f2fs_submit_read_bio(sbi->sb, rw, type, io->bio);
113 io->bio = NULL;
114 return;
115 }
116
117 if (sync)
118 rw |= WRITE_SYNC;
119 if (type >= META_FLUSH)
120 rw |= WRITE_FLUSH_FUA;
121
122 /*
123 * META_FLUSH is only from the checkpoint procedure, and we should wait
124 * this metadata bio for FS consistency.
125 */
126 if (type == META_FLUSH) {
127 DECLARE_COMPLETION_ONSTACK(wait);
128 io->bio->bi_private = &wait;
129 submit_bio(rw, io->bio);
130 wait_for_completion(&wait);
131 } else {
132 submit_bio(rw, io->bio);
133 }
134 trace_f2fs_submit_write_bio(sbi->sb, rw, btype, io->bio);
135 io->bio = NULL;
136}
137
138void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
139 enum page_type type, bool sync, int rw)
140{
141 enum page_type btype = PAGE_TYPE_OF_BIO(type);
142 struct f2fs_bio_info *io;
143
144 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
145
146 mutex_lock(&io->io_mutex);
147 __submit_merged_bio(sbi, io, type, sync, rw);
148 mutex_unlock(&io->io_mutex);
149}
150
151/*
152 * Fill the locked page with data located in the block address.
153 * Return unlocked page.
154 */
155int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
156 block_t blk_addr, int rw)
157{
158 struct block_device *bdev = sbi->sb->s_bdev;
159 struct bio *bio;
160
161 trace_f2fs_submit_page_bio(page, blk_addr, rw);
162
163 /* Allocate a new bio */
164 bio = __bio_alloc(bdev, 1);
165
166 /* Initialize the bio */
167 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
168 bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io : f2fs_write_end_io;
169
170 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
171 bio_put(bio);
172 f2fs_put_page(page, 1);
173 return -EFAULT;
174 }
175
176 submit_bio(rw, bio);
177 return 0;
178}
179
180void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
181 block_t blk_addr, enum page_type type, int rw)
182{
183 enum page_type btype = PAGE_TYPE_OF_BIO(type);
184 struct block_device *bdev = sbi->sb->s_bdev;
185 struct f2fs_bio_info *io;
186 int bio_blocks;
187
188 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
189
190 verify_block_addr(sbi, blk_addr);
191
192 mutex_lock(&io->io_mutex);
193
194 if (!is_read_io(rw))
195 inc_page_count(sbi, F2FS_WRITEBACK);
196
197 if (io->bio && io->last_block_in_bio != blk_addr - 1)
198 __submit_merged_bio(sbi, io, type, true, rw);
199alloc_new:
200 if (io->bio == NULL) {
201 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
202 io->bio = __bio_alloc(bdev, bio_blocks);
203 io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
204 io->bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io :
205 f2fs_write_end_io;
206 /*
207 * The end_io will be assigned at the sumbission phase.
208 * Until then, let bio_add_page() merge consecutive IOs as much
209 * as possible.
210 */
211 }
212
213 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
214 PAGE_CACHE_SIZE) {
215 __submit_merged_bio(sbi, io, type, true, rw);
216 goto alloc_new;
217 }
218
219 io->last_block_in_bio = blk_addr;
220
221 mutex_unlock(&io->io_mutex);
222 trace_f2fs_submit_page_mbio(page, rw, type, blk_addr);
223}
224
225/*
28 * Lock ordering for the change of data block address: 226 * Lock ordering for the change of data block address:
29 * ->data_page 227 * ->data_page
30 * ->node_page 228 * ->node_page
@@ -238,7 +436,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
238 return page; 436 return page;
239 } 437 }
240 438
241 err = f2fs_readpage(sbi, page, dn.data_blkaddr, 439 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
242 sync ? READ_SYNC : READA); 440 sync ? READ_SYNC : READA);
243 if (err) 441 if (err)
244 return ERR_PTR(err); 442 return ERR_PTR(err);
@@ -299,7 +497,7 @@ repeat:
299 return page; 497 return page;
300 } 498 }
301 499
302 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); 500 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC);
303 if (err) 501 if (err)
304 return ERR_PTR(err); 502 return ERR_PTR(err);
305 503
@@ -349,7 +547,8 @@ repeat:
349 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 547 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
350 SetPageUptodate(page); 548 SetPageUptodate(page);
351 } else { 549 } else {
352 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); 550 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
551 READ_SYNC);
353 if (err) 552 if (err)
354 return ERR_PTR(err); 553 return ERR_PTR(err);
355 lock_page(page); 554 lock_page(page);
@@ -373,110 +572,6 @@ repeat:
373 return page; 572 return page;
374} 573}
375 574
376static void read_end_io(struct bio *bio, int err)
377{
378 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
379 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
380
381 do {
382 struct page *page = bvec->bv_page;
383
384 if (--bvec >= bio->bi_io_vec)
385 prefetchw(&bvec->bv_page->flags);
386
387 if (uptodate) {
388 SetPageUptodate(page);
389 } else {
390 ClearPageUptodate(page);
391 SetPageError(page);
392 }
393 unlock_page(page);
394 } while (bvec >= bio->bi_io_vec);
395 bio_put(bio);
396}
397
398/*
399 * Fill the locked page with data located in the block address.
400 * Return unlocked page.
401 */
402int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
403 block_t blk_addr, int type)
404{
405 struct block_device *bdev = sbi->sb->s_bdev;
406 struct bio *bio;
407
408 trace_f2fs_readpage(page, blk_addr, type);
409
410 /* Allocate a new bio */
411 bio = f2fs_bio_alloc(bdev, 1);
412
413 /* Initialize the bio */
414 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
415 bio->bi_end_io = read_end_io;
416
417 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
418 bio_put(bio);
419 f2fs_put_page(page, 1);
420 return -EFAULT;
421 }
422
423 submit_bio(type, bio);
424 return 0;
425}
426
427void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, int rw)
428{
429 struct f2fs_bio_info *io = &sbi->read_io;
430
431 if (!io->bio)
432 return;
433
434 trace_f2fs_submit_read_bio(sbi->sb, rw, META, io->bio);
435
436 mutex_lock(&io->io_mutex);
437 if (io->bio) {
438 submit_bio(rw, io->bio);
439 io->bio = NULL;
440 }
441 mutex_unlock(&io->io_mutex);
442}
443
444void submit_read_page(struct f2fs_sb_info *sbi, struct page *page,
445 block_t blk_addr, int rw)
446{
447 struct block_device *bdev = sbi->sb->s_bdev;
448 struct f2fs_bio_info *io = &sbi->read_io;
449 int bio_blocks;
450
451 verify_block_addr(sbi, blk_addr);
452
453 mutex_lock(&io->io_mutex);
454
455 if (io->bio && io->last_block_in_bio != blk_addr - 1) {
456 submit_bio(rw, io->bio);
457 io->bio = NULL;
458 }
459alloc_new:
460 if (io->bio == NULL) {
461 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
462 io->bio = f2fs_bio_alloc(bdev, bio_blocks);
463 io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
464 io->bio->bi_end_io = read_end_io;
465 }
466
467 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
468 PAGE_CACHE_SIZE) {
469 submit_bio(rw, io->bio);
470 io->bio = NULL;
471 goto alloc_new;
472 }
473
474 io->last_block_in_bio = blk_addr;
475
476 mutex_unlock(&io->io_mutex);
477 trace_f2fs_submit_read_page(page, rw, META, blk_addr);
478}
479
480/* 575/*
481 * This function should be used by the data read flow only where it 576 * This function should be used by the data read flow only where it
482 * does not check the "create" flag that indicates block allocation. 577 * does not check the "create" flag that indicates block allocation.
@@ -638,7 +733,7 @@ write:
638 goto redirty_out; 733 goto redirty_out;
639 734
640 if (wbc->for_reclaim) 735 if (wbc->for_reclaim)
641 f2fs_submit_bio(sbi, DATA, true); 736 f2fs_submit_merged_bio(sbi, DATA, true, WRITE);
642 737
643 clear_cold_data(page); 738 clear_cold_data(page);
644out: 739out:
@@ -690,7 +785,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
690 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 785 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
691 if (locked) 786 if (locked)
692 mutex_unlock(&sbi->writepages); 787 mutex_unlock(&sbi->writepages);
693 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); 788 f2fs_submit_merged_bio(sbi, DATA, wbc->sync_mode == WB_SYNC_ALL, WRITE);
694 789
695 remove_dirty_dir_inode(inode); 790 remove_dirty_dir_inode(inode);
696 791
@@ -741,7 +836,8 @@ repeat:
741 if (dn.data_blkaddr == NEW_ADDR) { 836 if (dn.data_blkaddr == NEW_ADDR) {
742 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 837 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
743 } else { 838 } else {
744 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); 839 err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
840 READ_SYNC);
745 if (err) 841 if (err)
746 return err; 842 return err;
747 lock_page(page); 843 lock_page(page);