diff options
author | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2013-03-08 07:29:23 -0500 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk.kim@samsung.com> | 2013-03-20 05:30:06 -0400 |
commit | 393ff91f57c87d48ffed30878be6e3e486d3a00a (patch) | |
tree | c80fe33bcf8546ebce9ab6fc043b99889e67536f /fs/f2fs/data.c | |
parent | 25c0a6e529b56ca010e1f46239edd07c1b484b63 (diff) |
f2fs: reduce unncessary locking pages during read
This patch reduces redundant locking and unlocking pages during read operations.
In f2fs_readpage, let's use wait_on_page_locked() instead of lock_page.
And then, when we need to modify any data finally, let's lock the page so that
we can avoid lock contention.
[readpage rule]
- The f2fs_readpage returns unlocked page, or released page too in error cases.
- Its caller should handle read error, -EIO, after locking the page, which
indicates read completion.
- Its caller should check PageUptodate after grab_cache_page.
Signed-off-by: Changman Lee <cm224.lee@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r-- | fs/f2fs/data.c | 58 |
1 files changed, 30 insertions, 28 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 277966a8547a..c8e20b618913 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -199,12 +199,17 @@ struct page *find_data_page(struct inode *inode, pgoff_t index) | |||
199 | if (!page) | 199 | if (!page) |
200 | return ERR_PTR(-ENOMEM); | 200 | return ERR_PTR(-ENOMEM); |
201 | 201 | ||
202 | if (PageUptodate(page)) { | ||
203 | unlock_page(page); | ||
204 | return page; | ||
205 | } | ||
206 | |||
202 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); | 207 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); |
203 | if (err) { | 208 | wait_on_page_locked(page); |
204 | f2fs_put_page(page, 1); | 209 | if (!PageUptodate(page)) { |
205 | return ERR_PTR(err); | 210 | f2fs_put_page(page, 0); |
211 | return ERR_PTR(-EIO); | ||
206 | } | 212 | } |
207 | unlock_page(page); | ||
208 | return page; | 213 | return page; |
209 | } | 214 | } |
210 | 215 | ||
@@ -241,9 +246,13 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) | |||
241 | BUG_ON(dn.data_blkaddr == NULL_ADDR); | 246 | BUG_ON(dn.data_blkaddr == NULL_ADDR); |
242 | 247 | ||
243 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); | 248 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); |
244 | if (err) { | 249 | if (err) |
245 | f2fs_put_page(page, 1); | ||
246 | return ERR_PTR(err); | 250 | return ERR_PTR(err); |
251 | |||
252 | lock_page(page); | ||
253 | if (!PageUptodate(page)) { | ||
254 | f2fs_put_page(page, 1); | ||
255 | return ERR_PTR(-EIO); | ||
247 | } | 256 | } |
248 | return page; | 257 | return page; |
249 | } | 258 | } |
@@ -283,14 +292,17 @@ struct page *get_new_data_page(struct inode *inode, pgoff_t index, | |||
283 | 292 | ||
284 | if (dn.data_blkaddr == NEW_ADDR) { | 293 | if (dn.data_blkaddr == NEW_ADDR) { |
285 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | 294 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
295 | SetPageUptodate(page); | ||
286 | } else { | 296 | } else { |
287 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); | 297 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); |
288 | if (err) { | 298 | if (err) |
289 | f2fs_put_page(page, 1); | ||
290 | return ERR_PTR(err); | 299 | return ERR_PTR(err); |
300 | lock_page(page); | ||
301 | if (!PageUptodate(page)) { | ||
302 | f2fs_put_page(page, 1); | ||
303 | return ERR_PTR(-EIO); | ||
291 | } | 304 | } |
292 | } | 305 | } |
293 | SetPageUptodate(page); | ||
294 | 306 | ||
295 | if (new_i_size && | 307 | if (new_i_size && |
296 | i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { | 308 | i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { |
@@ -325,22 +337,14 @@ static void read_end_io(struct bio *bio, int err) | |||
325 | 337 | ||
326 | /* | 338 | /* |
327 | * Fill the locked page with data located in the block address. | 339 | * Fill the locked page with data located in the block address. |
328 | * Read operation is synchronous, and caller must unlock the page. | 340 | * Return unlocked page. |
329 | */ | 341 | */ |
330 | int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, | 342 | int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, |
331 | block_t blk_addr, int type) | 343 | block_t blk_addr, int type) |
332 | { | 344 | { |
333 | struct block_device *bdev = sbi->sb->s_bdev; | 345 | struct block_device *bdev = sbi->sb->s_bdev; |
334 | bool sync = (type == READ_SYNC); | ||
335 | struct bio *bio; | 346 | struct bio *bio; |
336 | 347 | ||
337 | /* This page can be already read by other threads */ | ||
338 | if (PageUptodate(page)) { | ||
339 | if (!sync) | ||
340 | unlock_page(page); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | down_read(&sbi->bio_sem); | 348 | down_read(&sbi->bio_sem); |
345 | 349 | ||
346 | /* Allocate a new bio */ | 350 | /* Allocate a new bio */ |
@@ -354,18 +358,12 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, | |||
354 | kfree(bio->bi_private); | 358 | kfree(bio->bi_private); |
355 | bio_put(bio); | 359 | bio_put(bio); |
356 | up_read(&sbi->bio_sem); | 360 | up_read(&sbi->bio_sem); |
361 | f2fs_put_page(page, 1); | ||
357 | return -EFAULT; | 362 | return -EFAULT; |
358 | } | 363 | } |
359 | 364 | ||
360 | submit_bio(type, bio); | 365 | submit_bio(type, bio); |
361 | up_read(&sbi->bio_sem); | 366 | up_read(&sbi->bio_sem); |
362 | |||
363 | /* wait for read completion if sync */ | ||
364 | if (sync) { | ||
365 | lock_page(page); | ||
366 | if (PageError(page)) | ||
367 | return -EIO; | ||
368 | } | ||
369 | return 0; | 367 | return 0; |
370 | } | 368 | } |
371 | 369 | ||
@@ -636,18 +634,22 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, | |||
636 | 634 | ||
637 | /* Reading beyond i_size is simple: memset to zero */ | 635 | /* Reading beyond i_size is simple: memset to zero */ |
638 | zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); | 636 | zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); |
639 | return 0; | 637 | goto out; |
640 | } | 638 | } |
641 | 639 | ||
642 | if (dn.data_blkaddr == NEW_ADDR) { | 640 | if (dn.data_blkaddr == NEW_ADDR) { |
643 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | 641 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
644 | } else { | 642 | } else { |
645 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); | 643 | err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); |
646 | if (err) { | 644 | if (err) |
647 | f2fs_put_page(page, 1); | ||
648 | return err; | 645 | return err; |
646 | lock_page(page); | ||
647 | if (!PageUptodate(page)) { | ||
648 | f2fs_put_page(page, 1); | ||
649 | return -EIO; | ||
649 | } | 650 | } |
650 | } | 651 | } |
652 | out: | ||
651 | SetPageUptodate(page); | 653 | SetPageUptodate(page); |
652 | clear_cold_data(page); | 654 | clear_cold_data(page); |
653 | return 0; | 655 | return 0; |