diff options
author | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2013-11-29 22:51:14 -0500 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2013-12-22 20:18:05 -0500 |
commit | 93dfe2ac516250755f7d5edd438b0ce67c0e3aa6 (patch) | |
tree | 66a4821769e2feb60de4265b8b9d7b03706d544d /fs | |
parent | 187b5b8b3dfcfc73126f2743c89cc47df3bf07be (diff) |
f2fs: refactor bio-related operations
This patch integrates redundant bio operations on read and write IOs.
1. Move bio-related codes to the top of data.c.
2. Replace f2fs_submit_bio with f2fs_submit_merged_bio, which handles read
bios additionally.
3. Introduce __submit_merged_bio to submit the merged bio.
4. Change f2fs_readpage to f2fs_submit_page_bio.
5. Introduce f2fs_submit_page_mbio to integrate previous submit_read_page and
submit_write_page.
Reviewed-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
Reviewed-by: Chao Yu <chao2.yu@samsung.com >
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/f2fs/checkpoint.c | 14 | ||||
-rw-r--r-- | fs/f2fs/data.c | 316 | ||||
-rw-r--r-- | fs/f2fs/f2fs.h | 12 | ||||
-rw-r--r-- | fs/f2fs/gc.c | 2 | ||||
-rw-r--r-- | fs/f2fs/node.c | 14 | ||||
-rw-r--r-- | fs/f2fs/recovery.c | 4 | ||||
-rw-r--r-- | fs/f2fs/segment.c | 164 |
7 files changed, 247 insertions, 279 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 40eea42f85ff..38f4a2245085 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c | |||
@@ -61,7 +61,8 @@ repeat: | |||
61 | if (PageUptodate(page)) | 61 | if (PageUptodate(page)) |
62 | goto out; | 62 | goto out; |
63 | 63 | ||
64 | if (f2fs_readpage(sbi, page, index, READ_SYNC | REQ_META | REQ_PRIO)) | 64 | if (f2fs_submit_page_bio(sbi, page, index, |
65 | READ_SYNC | REQ_META | REQ_PRIO)) | ||
65 | goto repeat; | 66 | goto repeat; |
66 | 67 | ||
67 | lock_page(page); | 68 | lock_page(page); |
@@ -157,7 +158,8 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, | |||
157 | } | 158 | } |
158 | 159 | ||
159 | if (nwritten) | 160 | if (nwritten) |
160 | f2fs_submit_bio(sbi, type, nr_to_write == LONG_MAX); | 161 | f2fs_submit_merged_bio(sbi, type, nr_to_write == LONG_MAX, |
162 | WRITE); | ||
161 | 163 | ||
162 | return nwritten; | 164 | return nwritten; |
163 | } | 165 | } |
@@ -590,7 +592,7 @@ retry: | |||
590 | * We should submit bio, since it exists several | 592 | * We should submit bio, since it exists several |
591 | * wribacking dentry pages in the freeing inode. | 593 | * wribacking dentry pages in the freeing inode. |
592 | */ | 594 | */ |
593 | f2fs_submit_bio(sbi, DATA, true); | 595 | f2fs_submit_merged_bio(sbi, DATA, true, WRITE); |
594 | } | 596 | } |
595 | goto retry; | 597 | goto retry; |
596 | } | 598 | } |
@@ -796,9 +798,9 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
796 | 798 | ||
797 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); | 799 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); |
798 | 800 | ||
799 | f2fs_submit_bio(sbi, DATA, true); | 801 | f2fs_submit_merged_bio(sbi, DATA, true, WRITE); |
800 | f2fs_submit_bio(sbi, NODE, true); | 802 | f2fs_submit_merged_bio(sbi, NODE, true, WRITE); |
801 | f2fs_submit_bio(sbi, META, true); | 803 | f2fs_submit_merged_bio(sbi, META, true, WRITE); |
802 | 804 | ||
803 | /* | 805 | /* |
804 | * update checkpoint pack index | 806 | * update checkpoint pack index |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c9a76f8c1028..4e2fc09f0e4f 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -25,6 +25,204 @@ | |||
25 | #include <trace/events/f2fs.h> | 25 | #include <trace/events/f2fs.h> |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Low-level block read/write IO operations. | ||
29 | */ | ||
30 | static struct bio *__bio_alloc(struct block_device *bdev, int npages) | ||
31 | { | ||
32 | struct bio *bio; | ||
33 | |||
34 | /* No failure on bio allocation */ | ||
35 | bio = bio_alloc(GFP_NOIO, npages); | ||
36 | bio->bi_bdev = bdev; | ||
37 | bio->bi_private = NULL; | ||
38 | return bio; | ||
39 | } | ||
40 | |||
41 | static void f2fs_read_end_io(struct bio *bio, int err) | ||
42 | { | ||
43 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
44 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
45 | |||
46 | do { | ||
47 | struct page *page = bvec->bv_page; | ||
48 | |||
49 | if (--bvec >= bio->bi_io_vec) | ||
50 | prefetchw(&bvec->bv_page->flags); | ||
51 | |||
52 | if (uptodate) { | ||
53 | SetPageUptodate(page); | ||
54 | } else { | ||
55 | ClearPageUptodate(page); | ||
56 | SetPageError(page); | ||
57 | } | ||
58 | unlock_page(page); | ||
59 | } while (bvec >= bio->bi_io_vec); | ||
60 | |||
61 | bio_put(bio); | ||
62 | } | ||
63 | |||
64 | static void f2fs_write_end_io(struct bio *bio, int err) | ||
65 | { | ||
66 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
67 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
68 | struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb); | ||
69 | |||
70 | do { | ||
71 | struct page *page = bvec->bv_page; | ||
72 | |||
73 | if (--bvec >= bio->bi_io_vec) | ||
74 | prefetchw(&bvec->bv_page->flags); | ||
75 | |||
76 | if (!uptodate) { | ||
77 | SetPageError(page); | ||
78 | set_bit(AS_EIO, &page->mapping->flags); | ||
79 | set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); | ||
80 | sbi->sb->s_flags |= MS_RDONLY; | ||
81 | } | ||
82 | end_page_writeback(page); | ||
83 | dec_page_count(sbi, F2FS_WRITEBACK); | ||
84 | } while (bvec >= bio->bi_io_vec); | ||
85 | |||
86 | if (bio->bi_private) | ||
87 | complete(bio->bi_private); | ||
88 | |||
89 | if (!get_pages(sbi, F2FS_WRITEBACK) && | ||
90 | !list_empty(&sbi->cp_wait.task_list)) | ||
91 | wake_up(&sbi->cp_wait); | ||
92 | |||
93 | bio_put(bio); | ||
94 | } | ||
95 | |||
96 | static void __submit_merged_bio(struct f2fs_sb_info *sbi, | ||
97 | struct f2fs_bio_info *io, | ||
98 | enum page_type type, bool sync, int rw) | ||
99 | { | ||
100 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | ||
101 | |||
102 | if (!io->bio) | ||
103 | return; | ||
104 | |||
105 | if (btype == META) | ||
106 | rw |= REQ_META; | ||
107 | |||
108 | if (is_read_io(rw)) { | ||
109 | if (sync) | ||
110 | rw |= READ_SYNC; | ||
111 | submit_bio(rw, io->bio); | ||
112 | trace_f2fs_submit_read_bio(sbi->sb, rw, type, io->bio); | ||
113 | io->bio = NULL; | ||
114 | return; | ||
115 | } | ||
116 | |||
117 | if (sync) | ||
118 | rw |= WRITE_SYNC; | ||
119 | if (type >= META_FLUSH) | ||
120 | rw |= WRITE_FLUSH_FUA; | ||
121 | |||
122 | /* | ||
123 | * META_FLUSH is only from the checkpoint procedure, and we should wait | ||
124 | * this metadata bio for FS consistency. | ||
125 | */ | ||
126 | if (type == META_FLUSH) { | ||
127 | DECLARE_COMPLETION_ONSTACK(wait); | ||
128 | io->bio->bi_private = &wait; | ||
129 | submit_bio(rw, io->bio); | ||
130 | wait_for_completion(&wait); | ||
131 | } else { | ||
132 | submit_bio(rw, io->bio); | ||
133 | } | ||
134 | trace_f2fs_submit_write_bio(sbi->sb, rw, btype, io->bio); | ||
135 | io->bio = NULL; | ||
136 | } | ||
137 | |||
138 | void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | ||
139 | enum page_type type, bool sync, int rw) | ||
140 | { | ||
141 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | ||
142 | struct f2fs_bio_info *io; | ||
143 | |||
144 | io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; | ||
145 | |||
146 | mutex_lock(&io->io_mutex); | ||
147 | __submit_merged_bio(sbi, io, type, sync, rw); | ||
148 | mutex_unlock(&io->io_mutex); | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Fill the locked page with data located in the block address. | ||
153 | * Return unlocked page. | ||
154 | */ | ||
155 | int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, | ||
156 | block_t blk_addr, int rw) | ||
157 | { | ||
158 | struct block_device *bdev = sbi->sb->s_bdev; | ||
159 | struct bio *bio; | ||
160 | |||
161 | trace_f2fs_submit_page_bio(page, blk_addr, rw); | ||
162 | |||
163 | /* Allocate a new bio */ | ||
164 | bio = __bio_alloc(bdev, 1); | ||
165 | |||
166 | /* Initialize the bio */ | ||
167 | bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | ||
168 | bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io : f2fs_write_end_io; | ||
169 | |||
170 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { | ||
171 | bio_put(bio); | ||
172 | f2fs_put_page(page, 1); | ||
173 | return -EFAULT; | ||
174 | } | ||
175 | |||
176 | submit_bio(rw, bio); | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, | ||
181 | block_t blk_addr, enum page_type type, int rw) | ||
182 | { | ||
183 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | ||
184 | struct block_device *bdev = sbi->sb->s_bdev; | ||
185 | struct f2fs_bio_info *io; | ||
186 | int bio_blocks; | ||
187 | |||
188 | io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; | ||
189 | |||
190 | verify_block_addr(sbi, blk_addr); | ||
191 | |||
192 | mutex_lock(&io->io_mutex); | ||
193 | |||
194 | if (!is_read_io(rw)) | ||
195 | inc_page_count(sbi, F2FS_WRITEBACK); | ||
196 | |||
197 | if (io->bio && io->last_block_in_bio != blk_addr - 1) | ||
198 | __submit_merged_bio(sbi, io, type, true, rw); | ||
199 | alloc_new: | ||
200 | if (io->bio == NULL) { | ||
201 | bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); | ||
202 | io->bio = __bio_alloc(bdev, bio_blocks); | ||
203 | io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | ||
204 | io->bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io : | ||
205 | f2fs_write_end_io; | ||
206 | /* | ||
207 | * The end_io will be assigned at the sumbission phase. | ||
208 | * Until then, let bio_add_page() merge consecutive IOs as much | ||
209 | * as possible. | ||
210 | */ | ||
211 | } | ||
212 | |||
213 | if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < | ||
214 | PAGE_CACHE_SIZE) { | ||
215 | __submit_merged_bio(sbi, io, type, true, rw); | ||
216 | goto alloc_new; | ||
217 | } | ||
218 | |||
219 | io->last_block_in_bio = blk_addr; | ||
220 | |||
221 | mutex_unlock(&io->io_mutex); | ||
222 | trace_f2fs_submit_page_mbio(page, rw, type, blk_addr); | ||
223 | } | ||
224 | |||
225 | /* | ||
28 | * Lock ordering for the change of data block address: | 226 | * Lock ordering for the change of data block address: |
29 | * ->data_page | 227 | * ->data_page |
30 | * ->node_page | 228 | * ->node_page |
@@ -238,7 +436,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) | |||
238 | return page; | 436 | return page; |
239 | } | 437 | } |
240 | 438 | ||
241 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, | 439 | err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, |
242 | sync ? READ_SYNC : READA); | 440 | sync ? READ_SYNC : READA); |
243 | if (err) | 441 | if (err) |
244 | return ERR_PTR(err); | 442 | return ERR_PTR(err); |
@@ -299,7 +497,7 @@ repeat: | |||
299 | return page; | 497 | return page; |
300 | } | 498 | } |
301 | 499 | ||
302 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); | 500 | err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC); |
303 | if (err) | 501 | if (err) |
304 | return ERR_PTR(err); | 502 | return ERR_PTR(err); |
305 | 503 | ||
@@ -349,7 +547,8 @@ repeat: | |||
349 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | 547 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
350 | SetPageUptodate(page); | 548 | SetPageUptodate(page); |
351 | } else { | 549 | } else { |
352 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); | 550 | err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, |
551 | READ_SYNC); | ||
353 | if (err) | 552 | if (err) |
354 | return ERR_PTR(err); | 553 | return ERR_PTR(err); |
355 | lock_page(page); | 554 | lock_page(page); |
@@ -373,110 +572,6 @@ repeat: | |||
373 | return page; | 572 | return page; |
374 | } | 573 | } |
375 | 574 | ||
376 | static void read_end_io(struct bio *bio, int err) | ||
377 | { | ||
378 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
379 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
380 | |||
381 | do { | ||
382 | struct page *page = bvec->bv_page; | ||
383 | |||
384 | if (--bvec >= bio->bi_io_vec) | ||
385 | prefetchw(&bvec->bv_page->flags); | ||
386 | |||
387 | if (uptodate) { | ||
388 | SetPageUptodate(page); | ||
389 | } else { | ||
390 | ClearPageUptodate(page); | ||
391 | SetPageError(page); | ||
392 | } | ||
393 | unlock_page(page); | ||
394 | } while (bvec >= bio->bi_io_vec); | ||
395 | bio_put(bio); | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * Fill the locked page with data located in the block address. | ||
400 | * Return unlocked page. | ||
401 | */ | ||
402 | int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, | ||
403 | block_t blk_addr, int type) | ||
404 | { | ||
405 | struct block_device *bdev = sbi->sb->s_bdev; | ||
406 | struct bio *bio; | ||
407 | |||
408 | trace_f2fs_readpage(page, blk_addr, type); | ||
409 | |||
410 | /* Allocate a new bio */ | ||
411 | bio = f2fs_bio_alloc(bdev, 1); | ||
412 | |||
413 | /* Initialize the bio */ | ||
414 | bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | ||
415 | bio->bi_end_io = read_end_io; | ||
416 | |||
417 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { | ||
418 | bio_put(bio); | ||
419 | f2fs_put_page(page, 1); | ||
420 | return -EFAULT; | ||
421 | } | ||
422 | |||
423 | submit_bio(type, bio); | ||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, int rw) | ||
428 | { | ||
429 | struct f2fs_bio_info *io = &sbi->read_io; | ||
430 | |||
431 | if (!io->bio) | ||
432 | return; | ||
433 | |||
434 | trace_f2fs_submit_read_bio(sbi->sb, rw, META, io->bio); | ||
435 | |||
436 | mutex_lock(&io->io_mutex); | ||
437 | if (io->bio) { | ||
438 | submit_bio(rw, io->bio); | ||
439 | io->bio = NULL; | ||
440 | } | ||
441 | mutex_unlock(&io->io_mutex); | ||
442 | } | ||
443 | |||
444 | void submit_read_page(struct f2fs_sb_info *sbi, struct page *page, | ||
445 | block_t blk_addr, int rw) | ||
446 | { | ||
447 | struct block_device *bdev = sbi->sb->s_bdev; | ||
448 | struct f2fs_bio_info *io = &sbi->read_io; | ||
449 | int bio_blocks; | ||
450 | |||
451 | verify_block_addr(sbi, blk_addr); | ||
452 | |||
453 | mutex_lock(&io->io_mutex); | ||
454 | |||
455 | if (io->bio && io->last_block_in_bio != blk_addr - 1) { | ||
456 | submit_bio(rw, io->bio); | ||
457 | io->bio = NULL; | ||
458 | } | ||
459 | alloc_new: | ||
460 | if (io->bio == NULL) { | ||
461 | bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); | ||
462 | io->bio = f2fs_bio_alloc(bdev, bio_blocks); | ||
463 | io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | ||
464 | io->bio->bi_end_io = read_end_io; | ||
465 | } | ||
466 | |||
467 | if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < | ||
468 | PAGE_CACHE_SIZE) { | ||
469 | submit_bio(rw, io->bio); | ||
470 | io->bio = NULL; | ||
471 | goto alloc_new; | ||
472 | } | ||
473 | |||
474 | io->last_block_in_bio = blk_addr; | ||
475 | |||
476 | mutex_unlock(&io->io_mutex); | ||
477 | trace_f2fs_submit_read_page(page, rw, META, blk_addr); | ||
478 | } | ||
479 | |||
480 | /* | 575 | /* |
481 | * This function should be used by the data read flow only where it | 576 | * This function should be used by the data read flow only where it |
482 | * does not check the "create" flag that indicates block allocation. | 577 | * does not check the "create" flag that indicates block allocation. |
@@ -638,7 +733,7 @@ write: | |||
638 | goto redirty_out; | 733 | goto redirty_out; |
639 | 734 | ||
640 | if (wbc->for_reclaim) | 735 | if (wbc->for_reclaim) |
641 | f2fs_submit_bio(sbi, DATA, true); | 736 | f2fs_submit_merged_bio(sbi, DATA, true, WRITE); |
642 | 737 | ||
643 | clear_cold_data(page); | 738 | clear_cold_data(page); |
644 | out: | 739 | out: |
@@ -690,7 +785,7 @@ static int f2fs_write_data_pages(struct address_space *mapping, | |||
690 | ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); | 785 | ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); |
691 | if (locked) | 786 | if (locked) |
692 | mutex_unlock(&sbi->writepages); | 787 | mutex_unlock(&sbi->writepages); |
693 | f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); | 788 | f2fs_submit_merged_bio(sbi, DATA, wbc->sync_mode == WB_SYNC_ALL, WRITE); |
694 | 789 | ||
695 | remove_dirty_dir_inode(inode); | 790 | remove_dirty_dir_inode(inode); |
696 | 791 | ||
@@ -741,7 +836,8 @@ repeat: | |||
741 | if (dn.data_blkaddr == NEW_ADDR) { | 836 | if (dn.data_blkaddr == NEW_ADDR) { |
742 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | 837 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
743 | } else { | 838 | } else { |
744 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); | 839 | err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, |
840 | READ_SYNC); | ||
745 | if (err) | 841 | if (err) |
746 | return err; | 842 | return err; |
747 | lock_page(page); | 843 | lock_page(page); |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index ca33cda78e02..10eca022e1e1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -364,6 +364,7 @@ enum page_type { | |||
364 | META_FLUSH, | 364 | META_FLUSH, |
365 | }; | 365 | }; |
366 | 366 | ||
367 | #define is_read_io(rw) (((rw) & 1) == READ) | ||
367 | struct f2fs_bio_info { | 368 | struct f2fs_bio_info { |
368 | struct bio *bio; /* bios to merge */ | 369 | struct bio *bio; /* bios to merge */ |
369 | sector_t last_block_in_bio; /* last block number */ | 370 | sector_t last_block_in_bio; /* last block number */ |
@@ -1093,9 +1094,6 @@ void clear_prefree_segments(struct f2fs_sb_info *); | |||
1093 | int npages_for_summary_flush(struct f2fs_sb_info *); | 1094 | int npages_for_summary_flush(struct f2fs_sb_info *); |
1094 | void allocate_new_segments(struct f2fs_sb_info *); | 1095 | void allocate_new_segments(struct f2fs_sb_info *); |
1095 | struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); | 1096 | struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); |
1096 | struct bio *f2fs_bio_alloc(struct block_device *, int); | ||
1097 | void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool); | ||
1098 | void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool); | ||
1099 | void write_meta_page(struct f2fs_sb_info *, struct page *); | 1097 | void write_meta_page(struct f2fs_sb_info *, struct page *); |
1100 | void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, | 1098 | void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, |
1101 | block_t, block_t *); | 1099 | block_t, block_t *); |
@@ -1106,6 +1104,7 @@ void recover_data_page(struct f2fs_sb_info *, struct page *, | |||
1106 | struct f2fs_summary *, block_t, block_t); | 1104 | struct f2fs_summary *, block_t, block_t); |
1107 | void rewrite_node_page(struct f2fs_sb_info *, struct page *, | 1105 | void rewrite_node_page(struct f2fs_sb_info *, struct page *, |
1108 | struct f2fs_summary *, block_t, block_t); | 1106 | struct f2fs_summary *, block_t, block_t); |
1107 | void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool); | ||
1109 | void write_data_summaries(struct f2fs_sb_info *, block_t); | 1108 | void write_data_summaries(struct f2fs_sb_info *, block_t); |
1110 | void write_node_summaries(struct f2fs_sb_info *, block_t); | 1109 | void write_node_summaries(struct f2fs_sb_info *, block_t); |
1111 | int lookup_journal_in_cursum(struct f2fs_summary_block *, | 1110 | int lookup_journal_in_cursum(struct f2fs_summary_block *, |
@@ -1141,15 +1140,16 @@ void destroy_checkpoint_caches(void); | |||
1141 | /* | 1140 | /* |
1142 | * data.c | 1141 | * data.c |
1143 | */ | 1142 | */ |
1143 | void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, bool, int); | ||
1144 | int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, block_t, int); | ||
1145 | void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, block_t, | ||
1146 | enum page_type, int); | ||
1144 | int reserve_new_block(struct dnode_of_data *); | 1147 | int reserve_new_block(struct dnode_of_data *); |
1145 | int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); | 1148 | int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); |
1146 | void update_extent_cache(block_t, struct dnode_of_data *); | 1149 | void update_extent_cache(block_t, struct dnode_of_data *); |
1147 | struct page *find_data_page(struct inode *, pgoff_t, bool); | 1150 | struct page *find_data_page(struct inode *, pgoff_t, bool); |
1148 | struct page *get_lock_data_page(struct inode *, pgoff_t); | 1151 | struct page *get_lock_data_page(struct inode *, pgoff_t); |
1149 | struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); | 1152 | struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); |
1150 | int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); | ||
1151 | void f2fs_submit_read_bio(struct f2fs_sb_info *, int); | ||
1152 | void submit_read_page(struct f2fs_sb_info *, struct page *, block_t, int); | ||
1153 | int do_write_data_page(struct page *); | 1153 | int do_write_data_page(struct page *); |
1154 | 1154 | ||
1155 | /* | 1155 | /* |
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 5fa54c1ca33b..2886aef35d59 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c | |||
@@ -631,7 +631,7 @@ next_iput: | |||
631 | goto next_step; | 631 | goto next_step; |
632 | 632 | ||
633 | if (gc_type == FG_GC) { | 633 | if (gc_type == FG_GC) { |
634 | f2fs_submit_bio(sbi, DATA, true); | 634 | f2fs_submit_merged_bio(sbi, DATA, true, WRITE); |
635 | 635 | ||
636 | /* | 636 | /* |
637 | * In the case of FG_GC, it'd be better to reclaim this victim | 637 | * In the case of FG_GC, it'd be better to reclaim this victim |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index d0ab00334b02..0e1a3df18e58 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -106,11 +106,11 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) | |||
106 | f2fs_put_page(page, 1); | 106 | f2fs_put_page(page, 1); |
107 | continue; | 107 | continue; |
108 | } | 108 | } |
109 | submit_read_page(sbi, page, index, READ_SYNC | REQ_META); | 109 | f2fs_submit_page_mbio(sbi, page, index, META, READ); |
110 | mark_page_accessed(page); | 110 | mark_page_accessed(page); |
111 | f2fs_put_page(page, 0); | 111 | f2fs_put_page(page, 0); |
112 | } | 112 | } |
113 | f2fs_submit_read_bio(sbi, READ_SYNC | REQ_META); | 113 | f2fs_submit_merged_bio(sbi, META, true, READ); |
114 | } | 114 | } |
115 | 115 | ||
116 | static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) | 116 | static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) |
@@ -891,7 +891,7 @@ fail: | |||
891 | * LOCKED_PAGE: f2fs_put_page(page, 1) | 891 | * LOCKED_PAGE: f2fs_put_page(page, 1) |
892 | * error: nothing | 892 | * error: nothing |
893 | */ | 893 | */ |
894 | static int read_node_page(struct page *page, int type) | 894 | static int read_node_page(struct page *page, int rw) |
895 | { | 895 | { |
896 | struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); | 896 | struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); |
897 | struct node_info ni; | 897 | struct node_info ni; |
@@ -906,7 +906,7 @@ static int read_node_page(struct page *page, int type) | |||
906 | if (PageUptodate(page)) | 906 | if (PageUptodate(page)) |
907 | return LOCKED_PAGE; | 907 | return LOCKED_PAGE; |
908 | 908 | ||
909 | return f2fs_readpage(sbi, page, ni.blk_addr, type); | 909 | return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); |
910 | } | 910 | } |
911 | 911 | ||
912 | /* | 912 | /* |
@@ -1136,8 +1136,8 @@ continue_unlock: | |||
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | if (wrote) | 1138 | if (wrote) |
1139 | f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL); | 1139 | f2fs_submit_merged_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL, |
1140 | 1140 | WRITE); | |
1141 | return nwritten; | 1141 | return nwritten; |
1142 | } | 1142 | } |
1143 | 1143 | ||
@@ -1592,7 +1592,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi, | |||
1592 | */ | 1592 | */ |
1593 | ClearPageUptodate(page); | 1593 | ClearPageUptodate(page); |
1594 | 1594 | ||
1595 | if (f2fs_readpage(sbi, page, addr, READ_SYNC)) | 1595 | if (f2fs_submit_page_bio(sbi, page, addr, READ_SYNC)) |
1596 | goto out; | 1596 | goto out; |
1597 | 1597 | ||
1598 | lock_page(page); | 1598 | lock_page(page); |
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index fdc81161f254..c209b8652927 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c | |||
@@ -143,7 +143,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) | |||
143 | while (1) { | 143 | while (1) { |
144 | struct fsync_inode_entry *entry; | 144 | struct fsync_inode_entry *entry; |
145 | 145 | ||
146 | err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC); | 146 | err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); |
147 | if (err) | 147 | if (err) |
148 | goto out; | 148 | goto out; |
149 | 149 | ||
@@ -386,7 +386,7 @@ static int recover_data(struct f2fs_sb_info *sbi, | |||
386 | while (1) { | 386 | while (1) { |
387 | struct fsync_inode_entry *entry; | 387 | struct fsync_inode_entry *entry; |
388 | 388 | ||
389 | err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC); | 389 | err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); |
390 | if (err) | 390 | if (err) |
391 | goto out; | 391 | goto out; |
392 | 392 | ||
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 0db40271f0d8..ca9adf5914cc 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -787,146 +787,6 @@ static const struct segment_allocation default_salloc_ops = { | |||
787 | .allocate_segment = allocate_segment_by_default, | 787 | .allocate_segment = allocate_segment_by_default, |
788 | }; | 788 | }; |
789 | 789 | ||
790 | static void f2fs_end_io_write(struct bio *bio, int err) | ||
791 | { | ||
792 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
793 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
794 | struct f2fs_sb_info *sbi = F2FS_SB(bvec->bv_page->mapping->host->i_sb); | ||
795 | |||
796 | do { | ||
797 | struct page *page = bvec->bv_page; | ||
798 | |||
799 | if (--bvec >= bio->bi_io_vec) | ||
800 | prefetchw(&bvec->bv_page->flags); | ||
801 | if (!uptodate) { | ||
802 | SetPageError(page); | ||
803 | if (page->mapping) | ||
804 | set_bit(AS_EIO, &page->mapping->flags); | ||
805 | |||
806 | set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); | ||
807 | sbi->sb->s_flags |= MS_RDONLY; | ||
808 | } | ||
809 | end_page_writeback(page); | ||
810 | dec_page_count(sbi, F2FS_WRITEBACK); | ||
811 | } while (bvec >= bio->bi_io_vec); | ||
812 | |||
813 | if (bio->bi_private) | ||
814 | complete(bio->bi_private); | ||
815 | |||
816 | if (!get_pages(sbi, F2FS_WRITEBACK) && | ||
817 | !list_empty(&sbi->cp_wait.task_list)) | ||
818 | wake_up(&sbi->cp_wait); | ||
819 | |||
820 | bio_put(bio); | ||
821 | } | ||
822 | |||
823 | struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages) | ||
824 | { | ||
825 | struct bio *bio; | ||
826 | |||
827 | /* No failure on bio allocation */ | ||
828 | bio = bio_alloc(GFP_NOIO, npages); | ||
829 | bio->bi_bdev = bdev; | ||
830 | bio->bi_private = NULL; | ||
831 | |||
832 | return bio; | ||
833 | } | ||
834 | |||
835 | static void do_submit_bio(struct f2fs_sb_info *sbi, | ||
836 | enum page_type type, bool sync) | ||
837 | { | ||
838 | int rw = sync ? WRITE_SYNC : WRITE; | ||
839 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | ||
840 | struct f2fs_bio_info *io = &sbi->write_io[btype]; | ||
841 | |||
842 | if (!io->bio) | ||
843 | return; | ||
844 | |||
845 | if (type >= META_FLUSH) | ||
846 | rw = WRITE_FLUSH_FUA; | ||
847 | |||
848 | if (btype == META) | ||
849 | rw |= REQ_META; | ||
850 | |||
851 | trace_f2fs_submit_write_bio(sbi->sb, rw, btype, io->bio); | ||
852 | |||
853 | /* | ||
854 | * META_FLUSH is only from the checkpoint procedure, and we should wait | ||
855 | * this metadata bio for FS consistency. | ||
856 | */ | ||
857 | if (type == META_FLUSH) { | ||
858 | DECLARE_COMPLETION_ONSTACK(wait); | ||
859 | io->bio->bi_private = &wait; | ||
860 | submit_bio(rw, io->bio); | ||
861 | wait_for_completion(&wait); | ||
862 | } else { | ||
863 | submit_bio(rw, io->bio); | ||
864 | } | ||
865 | io->bio = NULL; | ||
866 | } | ||
867 | |||
868 | void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync) | ||
869 | { | ||
870 | struct f2fs_bio_info *io = &sbi->write_io[PAGE_TYPE_OF_BIO(type)]; | ||
871 | |||
872 | if (!io->bio) | ||
873 | return; | ||
874 | |||
875 | mutex_lock(&io->io_mutex); | ||
876 | do_submit_bio(sbi, type, sync); | ||
877 | mutex_unlock(&io->io_mutex); | ||
878 | } | ||
879 | |||
880 | static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, | ||
881 | block_t blk_addr, enum page_type type) | ||
882 | { | ||
883 | struct block_device *bdev = sbi->sb->s_bdev; | ||
884 | struct f2fs_bio_info *io = &sbi->write_io[type]; | ||
885 | int bio_blocks; | ||
886 | |||
887 | verify_block_addr(sbi, blk_addr); | ||
888 | |||
889 | mutex_lock(&io->io_mutex); | ||
890 | |||
891 | inc_page_count(sbi, F2FS_WRITEBACK); | ||
892 | |||
893 | if (io->bio && io->last_block_in_bio != blk_addr - 1) | ||
894 | do_submit_bio(sbi, type, false); | ||
895 | alloc_new: | ||
896 | if (io->bio == NULL) { | ||
897 | bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); | ||
898 | io->bio = f2fs_bio_alloc(bdev, bio_blocks); | ||
899 | io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); | ||
900 | io->bio->bi_end_io = f2fs_end_io_write; | ||
901 | /* | ||
902 | * The end_io will be assigned at the sumbission phase. | ||
903 | * Until then, let bio_add_page() merge consecutive IOs as much | ||
904 | * as possible. | ||
905 | */ | ||
906 | } | ||
907 | |||
908 | if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < | ||
909 | PAGE_CACHE_SIZE) { | ||
910 | do_submit_bio(sbi, type, false); | ||
911 | goto alloc_new; | ||
912 | } | ||
913 | |||
914 | io->last_block_in_bio = blk_addr; | ||
915 | |||
916 | mutex_unlock(&io->io_mutex); | ||
917 | trace_f2fs_submit_write_page(page, WRITE, type, blk_addr); | ||
918 | } | ||
919 | |||
920 | void f2fs_wait_on_page_writeback(struct page *page, | ||
921 | enum page_type type, bool sync) | ||
922 | { | ||
923 | struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); | ||
924 | if (PageWriteback(page)) { | ||
925 | f2fs_submit_bio(sbi, type, sync); | ||
926 | wait_on_page_writeback(page); | ||
927 | } | ||
928 | } | ||
929 | |||
930 | static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) | 790 | static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) |
931 | { | 791 | { |
932 | struct curseg_info *curseg = CURSEG_I(sbi, type); | 792 | struct curseg_info *curseg = CURSEG_I(sbi, type); |
@@ -1040,7 +900,7 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, | |||
1040 | fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); | 900 | fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); |
1041 | 901 | ||
1042 | /* writeout dirty page into bdev */ | 902 | /* writeout dirty page into bdev */ |
1043 | submit_write_page(sbi, page, *new_blkaddr, p_type); | 903 | f2fs_submit_page_mbio(sbi, page, *new_blkaddr, p_type, WRITE); |
1044 | 904 | ||
1045 | mutex_unlock(&curseg->curseg_mutex); | 905 | mutex_unlock(&curseg->curseg_mutex); |
1046 | } | 906 | } |
@@ -1048,7 +908,7 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, | |||
1048 | void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) | 908 | void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) |
1049 | { | 909 | { |
1050 | set_page_writeback(page); | 910 | set_page_writeback(page); |
1051 | submit_write_page(sbi, page, page->index, META); | 911 | f2fs_submit_page_mbio(sbi, page, page->index, META, WRITE); |
1052 | } | 912 | } |
1053 | 913 | ||
1054 | void write_node_page(struct f2fs_sb_info *sbi, struct page *page, | 914 | void write_node_page(struct f2fs_sb_info *sbi, struct page *page, |
@@ -1078,7 +938,7 @@ void write_data_page(struct inode *inode, struct page *page, | |||
1078 | void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page, | 938 | void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page, |
1079 | block_t old_blk_addr) | 939 | block_t old_blk_addr) |
1080 | { | 940 | { |
1081 | submit_write_page(sbi, page, old_blk_addr, DATA); | 941 | f2fs_submit_page_mbio(sbi, page, old_blk_addr, DATA, WRITE); |
1082 | } | 942 | } |
1083 | 943 | ||
1084 | void recover_data_page(struct f2fs_sb_info *sbi, | 944 | void recover_data_page(struct f2fs_sb_info *sbi, |
@@ -1165,8 +1025,8 @@ void rewrite_node_page(struct f2fs_sb_info *sbi, | |||
1165 | 1025 | ||
1166 | /* rewrite node page */ | 1026 | /* rewrite node page */ |
1167 | set_page_writeback(page); | 1027 | set_page_writeback(page); |
1168 | submit_write_page(sbi, page, new_blkaddr, NODE); | 1028 | f2fs_submit_page_mbio(sbi, page, new_blkaddr, NODE, WRITE); |
1169 | f2fs_submit_bio(sbi, NODE, true); | 1029 | f2fs_submit_merged_bio(sbi, NODE, true, WRITE); |
1170 | refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); | 1030 | refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); |
1171 | 1031 | ||
1172 | locate_dirty_segment(sbi, old_cursegno); | 1032 | locate_dirty_segment(sbi, old_cursegno); |
@@ -1176,6 +1036,16 @@ void rewrite_node_page(struct f2fs_sb_info *sbi, | |||
1176 | mutex_unlock(&curseg->curseg_mutex); | 1036 | mutex_unlock(&curseg->curseg_mutex); |
1177 | } | 1037 | } |
1178 | 1038 | ||
1039 | void f2fs_wait_on_page_writeback(struct page *page, | ||
1040 | enum page_type type, bool sync) | ||
1041 | { | ||
1042 | struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); | ||
1043 | if (PageWriteback(page)) { | ||
1044 | f2fs_submit_merged_bio(sbi, type, sync, WRITE); | ||
1045 | wait_on_page_writeback(page); | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1179 | static int read_compacted_summaries(struct f2fs_sb_info *sbi) | 1049 | static int read_compacted_summaries(struct f2fs_sb_info *sbi) |
1180 | { | 1050 | { |
1181 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | 1051 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
@@ -1723,13 +1593,13 @@ repeat: | |||
1723 | continue; | 1593 | continue; |
1724 | } | 1594 | } |
1725 | 1595 | ||
1726 | submit_read_page(sbi, page, blk_addr, READ_SYNC | REQ_META); | 1596 | f2fs_submit_page_mbio(sbi, page, blk_addr, META, READ); |
1727 | 1597 | ||
1728 | mark_page_accessed(page); | 1598 | mark_page_accessed(page); |
1729 | f2fs_put_page(page, 0); | 1599 | f2fs_put_page(page, 0); |
1730 | } | 1600 | } |
1731 | 1601 | ||
1732 | f2fs_submit_read_bio(sbi, READ_SYNC | REQ_META); | 1602 | f2fs_submit_merged_bio(sbi, META, true, READ); |
1733 | return blkno - start; | 1603 | return blkno - start; |
1734 | } | 1604 | } |
1735 | 1605 | ||