aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
authorGu Zheng <guz.fnst@cn.fujitsu.com>2013-12-20 04:39:59 -0500
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-12-22 20:18:07 -0500
commit940a6d34b31b96f0748a4b688a551a0890b2b229 (patch)
tree1db17fcefd8d15d3733877b4f6fab3f395ffeac4 /fs/f2fs/data.c
parentba0697ec98f0244ea180ce336f399d1a24eb7bf8 (diff)
f2fs: move all the bio initialization into __bio_alloc
Move all the bio initialization into __bio_alloc, and some minor cleanups are also added. v3: Use 'bool' rather than 'int' as Kim suggested. v2: Use 'is_read' rather than 'rw' as Yu Chao suggested. Remove the needless initialization of bio->bi_private. Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c92
1 files changed, 42 insertions, 50 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index a0950bcbf568..154a4f93a548 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -24,20 +24,6 @@
24#include "segment.h" 24#include "segment.h"
25#include <trace/events/f2fs.h> 25#include <trace/events/f2fs.h>
26 26
27/*
28 * Low-level block read/write IO operations.
29 */
30static struct bio *__bio_alloc(struct block_device *bdev, int npages)
31{
32 struct bio *bio;
33
34 /* No failure on bio allocation */
35 bio = bio_alloc(GFP_NOIO, npages);
36 bio->bi_bdev = bdev;
37 bio->bi_private = NULL;
38 return bio;
39}
40
41static void f2fs_read_end_io(struct bio *bio, int err) 27static void f2fs_read_end_io(struct bio *bio, int err)
42{ 28{
43 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 29 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -93,6 +79,24 @@ static void f2fs_write_end_io(struct bio *bio, int err)
93 bio_put(bio); 79 bio_put(bio);
94} 80}
95 81
82/*
83 * Low-level block read/write IO operations.
84 */
85static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
86 int npages, bool is_read)
87{
88 struct bio *bio;
89
90 /* No failure on bio allocation */
91 bio = bio_alloc(GFP_NOIO, npages);
92
93 bio->bi_bdev = sbi->sb->s_bdev;
94 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
95 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
96
97 return bio;
98}
99
96static void __submit_merged_bio(struct f2fs_bio_info *io) 100static void __submit_merged_bio(struct f2fs_bio_info *io)
97{ 101{
98 struct f2fs_io_info *fio = &io->fio; 102 struct f2fs_io_info *fio = &io->fio;
@@ -104,25 +108,26 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
104 rw = fio->rw | fio->rw_flag; 108 rw = fio->rw | fio->rw_flag;
105 109
106 if (is_read_io(rw)) { 110 if (is_read_io(rw)) {
107 trace_f2fs_submit_read_bio(io->sbi->sb, rw, fio->type, io->bio); 111 trace_f2fs_submit_read_bio(io->sbi->sb, rw,
112 fio->type, io->bio);
108 submit_bio(rw, io->bio); 113 submit_bio(rw, io->bio);
109 io->bio = NULL;
110 return;
111 }
112 trace_f2fs_submit_write_bio(io->sbi->sb, rw, fio->type, io->bio);
113
114 /*
115 * META_FLUSH is only from the checkpoint procedure, and we should wait
116 * this metadata bio for FS consistency.
117 */
118 if (fio->type == META_FLUSH) {
119 DECLARE_COMPLETION_ONSTACK(wait);
120 io->bio->bi_private = &wait;
121 submit_bio(rw, io->bio);
122 wait_for_completion(&wait);
123 } else { 114 } else {
124 submit_bio(rw, io->bio); 115 trace_f2fs_submit_write_bio(io->sbi->sb, rw,
116 fio->type, io->bio);
117 /*
118 * META_FLUSH is only from the checkpoint procedure, and we
119 * should wait this metadata bio for FS consistency.
120 */
121 if (fio->type == META_FLUSH) {
122 DECLARE_COMPLETION_ONSTACK(wait);
123 io->bio->bi_private = &wait;
124 submit_bio(rw, io->bio);
125 wait_for_completion(&wait);
126 } else {
127 submit_bio(rw, io->bio);
128 }
125 } 129 }
130
126 io->bio = NULL; 131 io->bio = NULL;
127} 132}
128 133
@@ -152,17 +157,12 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
152int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, 157int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
153 block_t blk_addr, int rw) 158 block_t blk_addr, int rw)
154{ 159{
155 struct block_device *bdev = sbi->sb->s_bdev;
156 struct bio *bio; 160 struct bio *bio;
157 161
158 trace_f2fs_submit_page_bio(page, blk_addr, rw); 162 trace_f2fs_submit_page_bio(page, blk_addr, rw);
159 163
160 /* Allocate a new bio */ 164 /* Allocate a new bio */
161 bio = __bio_alloc(bdev, 1); 165 bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
162
163 /* Initialize the bio */
164 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
165 bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io : f2fs_write_end_io;
166 166
167 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 167 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
168 bio_put(bio); 168 bio_put(bio);
@@ -178,17 +178,16 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
178 block_t blk_addr, struct f2fs_io_info *fio) 178 block_t blk_addr, struct f2fs_io_info *fio)
179{ 179{
180 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 180 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
181 struct block_device *bdev = sbi->sb->s_bdev;
182 struct f2fs_bio_info *io; 181 struct f2fs_bio_info *io;
183 int bio_blocks; 182 bool is_read = is_read_io(fio->rw);
184 183
185 io = is_read_io(fio->rw) ? &sbi->read_io : &sbi->write_io[btype]; 184 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
186 185
187 verify_block_addr(sbi, blk_addr); 186 verify_block_addr(sbi, blk_addr);
188 187
189 mutex_lock(&io->io_mutex); 188 mutex_lock(&io->io_mutex);
190 189
191 if (!is_read_io(fio->rw)) 190 if (!is_read)
192 inc_page_count(sbi, F2FS_WRITEBACK); 191 inc_page_count(sbi, F2FS_WRITEBACK);
193 192
194 if (io->bio && (io->last_block_in_bio != blk_addr - 1 || 193 if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
@@ -196,17 +195,10 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
196 __submit_merged_bio(io); 195 __submit_merged_bio(io);
197alloc_new: 196alloc_new:
198 if (io->bio == NULL) { 197 if (io->bio == NULL) {
199 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 198 int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
200 io->bio = __bio_alloc(bdev, bio_blocks); 199
201 io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 200 io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
202 io->bio->bi_end_io = is_read_io(fio->rw) ? f2fs_read_end_io :
203 f2fs_write_end_io;
204 io->fio = *fio; 201 io->fio = *fio;
205 /*
206 * The end_io will be assigned at the sumbission phase.
207 * Until then, let bio_add_page() merge consecutive IOs as much
208 * as possible.
209 */
210 } 202 }
211 203
212 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < 204 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <