aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-03-08 07:29:23 -0500
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-03-20 05:30:06 -0400
commit393ff91f57c87d48ffed30878be6e3e486d3a00a (patch)
treec80fe33bcf8546ebce9ab6fc043b99889e67536f
parent25c0a6e529b56ca010e1f46239edd07c1b484b63 (diff)
f2fs: reduce unncessary locking pages during read
This patch reduces redundant locking and unlocking pages during read operations. In f2fs_readpage, let's use wait_on_page_locked() instead of lock_page. And then, when we need to modify any data finally, let's lock the page so that we can avoid lock contention. [readpage rule] - The f2fs_readpage returns unlocked page, or released page too in error cases. - Its caller should handle read error, -EIO, after locking the page, which indicates read completion. - Its caller should check PageUptodate after grab_cache_page. Signed-off-by: Changman Lee <cm224.lee@samsung.com> Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
-rw-r--r--fs/f2fs/checkpoint.c12
-rw-r--r--fs/f2fs/data.c58
-rw-r--r--fs/f2fs/node.c58
-rw-r--r--fs/f2fs/recovery.c31
4 files changed, 91 insertions, 68 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 2b6fc131e2ce..d947e66ee8a8 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -57,13 +57,15 @@ repeat:
57 cond_resched(); 57 cond_resched();
58 goto repeat; 58 goto repeat;
59 } 59 }
60 if (f2fs_readpage(sbi, page, index, READ_SYNC)) { 60 if (PageUptodate(page))
61 f2fs_put_page(page, 1); 61 goto out;
62
63 if (f2fs_readpage(sbi, page, index, READ_SYNC))
62 goto repeat; 64 goto repeat;
63 }
64 mark_page_accessed(page);
65 65
66 /* We do not allow returning an errorneous page */ 66 lock_page(page);
67out:
68 mark_page_accessed(page);
67 return page; 69 return page;
68} 70}
69 71
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 277966a8547a..c8e20b618913 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -199,12 +199,17 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
199 if (!page) 199 if (!page)
200 return ERR_PTR(-ENOMEM); 200 return ERR_PTR(-ENOMEM);
201 201
202 if (PageUptodate(page)) {
203 unlock_page(page);
204 return page;
205 }
206
202 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); 207 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
203 if (err) { 208 wait_on_page_locked(page);
204 f2fs_put_page(page, 1); 209 if (!PageUptodate(page)) {
205 return ERR_PTR(err); 210 f2fs_put_page(page, 0);
211 return ERR_PTR(-EIO);
206 } 212 }
207 unlock_page(page);
208 return page; 213 return page;
209} 214}
210 215
@@ -241,9 +246,13 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
241 BUG_ON(dn.data_blkaddr == NULL_ADDR); 246 BUG_ON(dn.data_blkaddr == NULL_ADDR);
242 247
243 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); 248 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
244 if (err) { 249 if (err)
245 f2fs_put_page(page, 1);
246 return ERR_PTR(err); 250 return ERR_PTR(err);
251
252 lock_page(page);
253 if (!PageUptodate(page)) {
254 f2fs_put_page(page, 1);
255 return ERR_PTR(-EIO);
247 } 256 }
248 return page; 257 return page;
249} 258}
@@ -283,14 +292,17 @@ struct page *get_new_data_page(struct inode *inode, pgoff_t index,
283 292
284 if (dn.data_blkaddr == NEW_ADDR) { 293 if (dn.data_blkaddr == NEW_ADDR) {
285 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 294 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
295 SetPageUptodate(page);
286 } else { 296 } else {
287 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); 297 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
288 if (err) { 298 if (err)
289 f2fs_put_page(page, 1);
290 return ERR_PTR(err); 299 return ERR_PTR(err);
300 lock_page(page);
301 if (!PageUptodate(page)) {
302 f2fs_put_page(page, 1);
303 return ERR_PTR(-EIO);
291 } 304 }
292 } 305 }
293 SetPageUptodate(page);
294 306
295 if (new_i_size && 307 if (new_i_size &&
296 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { 308 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
@@ -325,22 +337,14 @@ static void read_end_io(struct bio *bio, int err)
325 337
326/* 338/*
327 * Fill the locked page with data located in the block address. 339 * Fill the locked page with data located in the block address.
328 * Read operation is synchronous, and caller must unlock the page. 340 * Return unlocked page.
329 */ 341 */
330int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, 342int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
331 block_t blk_addr, int type) 343 block_t blk_addr, int type)
332{ 344{
333 struct block_device *bdev = sbi->sb->s_bdev; 345 struct block_device *bdev = sbi->sb->s_bdev;
334 bool sync = (type == READ_SYNC);
335 struct bio *bio; 346 struct bio *bio;
336 347
337 /* This page can be already read by other threads */
338 if (PageUptodate(page)) {
339 if (!sync)
340 unlock_page(page);
341 return 0;
342 }
343
344 down_read(&sbi->bio_sem); 348 down_read(&sbi->bio_sem);
345 349
346 /* Allocate a new bio */ 350 /* Allocate a new bio */
@@ -354,18 +358,12 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
354 kfree(bio->bi_private); 358 kfree(bio->bi_private);
355 bio_put(bio); 359 bio_put(bio);
356 up_read(&sbi->bio_sem); 360 up_read(&sbi->bio_sem);
361 f2fs_put_page(page, 1);
357 return -EFAULT; 362 return -EFAULT;
358 } 363 }
359 364
360 submit_bio(type, bio); 365 submit_bio(type, bio);
361 up_read(&sbi->bio_sem); 366 up_read(&sbi->bio_sem);
362
363 /* wait for read completion if sync */
364 if (sync) {
365 lock_page(page);
366 if (PageError(page))
367 return -EIO;
368 }
369 return 0; 367 return 0;
370} 368}
371 369
@@ -636,18 +634,22 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
636 634
637 /* Reading beyond i_size is simple: memset to zero */ 635 /* Reading beyond i_size is simple: memset to zero */
638 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 636 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
639 return 0; 637 goto out;
640 } 638 }
641 639
642 if (dn.data_blkaddr == NEW_ADDR) { 640 if (dn.data_blkaddr == NEW_ADDR) {
643 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 641 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
644 } else { 642 } else {
645 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC); 643 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
646 if (err) { 644 if (err)
647 f2fs_put_page(page, 1);
648 return err; 645 return err;
646 lock_page(page);
647 if (!PageUptodate(page)) {
648 f2fs_put_page(page, 1);
649 return -EIO;
649 } 650 }
650 } 651 }
652out:
651 SetPageUptodate(page); 653 SetPageUptodate(page);
652 clear_cold_data(page); 654 clear_cold_data(page);
653 return 0; 655 return 0;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index a3cb1ff34f8e..9e6ed6708fa8 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -100,10 +100,13 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
100 page = grab_cache_page(mapping, index); 100 page = grab_cache_page(mapping, index);
101 if (!page) 101 if (!page)
102 continue; 102 continue;
103 if (f2fs_readpage(sbi, page, index, READ)) { 103 if (PageUptodate(page)) {
104 f2fs_put_page(page, 1); 104 f2fs_put_page(page, 1);
105 continue; 105 continue;
106 } 106 }
107 if (f2fs_readpage(sbi, page, index, READ))
108 continue;
109
107 f2fs_put_page(page, 0); 110 f2fs_put_page(page, 0);
108 } 111 }
109} 112}
@@ -851,8 +854,16 @@ static int read_node_page(struct page *page, int type)
851 854
852 get_node_info(sbi, page->index, &ni); 855 get_node_info(sbi, page->index, &ni);
853 856
854 if (ni.blk_addr == NULL_ADDR) 857 if (ni.blk_addr == NULL_ADDR) {
858 f2fs_put_page(page, 1);
855 return -ENOENT; 859 return -ENOENT;
860 }
861
862 if (PageUptodate(page)) {
863 unlock_page(page);
864 return 0;
865 }
866
856 return f2fs_readpage(sbi, page, ni.blk_addr, type); 867 return f2fs_readpage(sbi, page, ni.blk_addr, type);
857} 868}
858 869
@@ -865,19 +876,18 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
865 struct page *apage; 876 struct page *apage;
866 877
867 apage = find_get_page(mapping, nid); 878 apage = find_get_page(mapping, nid);
868 if (apage && PageUptodate(apage)) 879 if (apage && PageUptodate(apage)) {
869 goto release_out; 880 f2fs_put_page(apage, 0);
881 return;
882 }
870 f2fs_put_page(apage, 0); 883 f2fs_put_page(apage, 0);
871 884
872 apage = grab_cache_page(mapping, nid); 885 apage = grab_cache_page(mapping, nid);
873 if (!apage) 886 if (!apage)
874 return; 887 return;
875 888
876 if (read_node_page(apage, READA)) 889 if (read_node_page(apage, READA) == 0)
877 unlock_page(apage); 890 f2fs_put_page(apage, 0);
878
879release_out:
880 f2fs_put_page(apage, 0);
881 return; 891 return;
882} 892}
883 893
@@ -892,11 +902,14 @@ struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
892 return ERR_PTR(-ENOMEM); 902 return ERR_PTR(-ENOMEM);
893 903
894 err = read_node_page(page, READ_SYNC); 904 err = read_node_page(page, READ_SYNC);
895 if (err) { 905 if (err)
896 f2fs_put_page(page, 1);
897 return ERR_PTR(err); 906 return ERR_PTR(err);
898 }
899 907
908 lock_page(page);
909 if (!PageUptodate(page)) {
910 f2fs_put_page(page, 1);
911 return ERR_PTR(-EIO);
912 }
900 BUG_ON(nid != nid_of_node(page)); 913 BUG_ON(nid != nid_of_node(page));
901 mark_page_accessed(page); 914 mark_page_accessed(page);
902 return page; 915 return page;
@@ -928,11 +941,8 @@ repeat:
928 goto page_hit; 941 goto page_hit;
929 942
930 err = read_node_page(page, READ_SYNC); 943 err = read_node_page(page, READ_SYNC);
931 unlock_page(page); 944 if (err)
932 if (err) {
933 f2fs_put_page(page, 0);
934 return ERR_PTR(err); 945 return ERR_PTR(err);
935 }
936 946
937 /* Then, try readahead for siblings of the desired node */ 947 /* Then, try readahead for siblings of the desired node */
938 end = start + MAX_RA_NODE; 948 end = start + MAX_RA_NODE;
@@ -957,6 +967,7 @@ page_hit:
957 f2fs_put_page(page, 1); 967 f2fs_put_page(page, 1);
958 goto repeat; 968 goto repeat;
959 } 969 }
970 mark_page_accessed(page);
960 return page; 971 return page;
961} 972}
962 973
@@ -1473,23 +1484,24 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
1473 sum_entry = &sum->entries[0]; 1484 sum_entry = &sum->entries[0];
1474 1485
1475 for (i = 0; i < last_offset; i++, sum_entry++) { 1486 for (i = 0; i < last_offset; i++, sum_entry++) {
1487 /*
1488 * In order to read next node page,
1489 * we must clear PageUptodate flag.
1490 */
1491 ClearPageUptodate(page);
1492
1476 if (f2fs_readpage(sbi, page, addr, READ_SYNC)) 1493 if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1477 goto out; 1494 goto out;
1478 1495
1496 lock_page(page);
1479 rn = (struct f2fs_node *)page_address(page); 1497 rn = (struct f2fs_node *)page_address(page);
1480 sum_entry->nid = rn->footer.nid; 1498 sum_entry->nid = rn->footer.nid;
1481 sum_entry->version = 0; 1499 sum_entry->version = 0;
1482 sum_entry->ofs_in_node = 0; 1500 sum_entry->ofs_in_node = 0;
1483 addr++; 1501 addr++;
1484
1485 /*
1486 * In order to read next node page,
1487 * we must clear PageUptodate flag.
1488 */
1489 ClearPageUptodate(page);
1490 } 1502 }
1491out:
1492 unlock_page(page); 1503 unlock_page(page);
1504out:
1493 __free_pages(page, 0); 1505 __free_pages(page, 0);
1494 return 0; 1506 return 0;
1495} 1507}
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 6b82e2034cfd..2d86eb26c493 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -112,11 +112,16 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
112 while (1) { 112 while (1) {
113 struct fsync_inode_entry *entry; 113 struct fsync_inode_entry *entry;
114 114
115 if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC)) 115 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
116 if (err)
116 goto out; 117 goto out;
117 118
118 if (cp_ver != cpver_of_node(page)) 119 lock_page(page);
119 goto out; 120
121 if (cp_ver != cpver_of_node(page)) {
122 err = -EINVAL;
123 goto unlock_out;
124 }
120 125
121 if (!is_fsync_dnode(page)) 126 if (!is_fsync_dnode(page))
122 goto next; 127 goto next;
@@ -131,7 +136,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
131 if (IS_INODE(page) && is_dent_dnode(page)) { 136 if (IS_INODE(page) && is_dent_dnode(page)) {
132 if (recover_inode_page(sbi, page)) { 137 if (recover_inode_page(sbi, page)) {
133 err = -ENOMEM; 138 err = -ENOMEM;
134 goto out; 139 goto unlock_out;
135 } 140 }
136 } 141 }
137 142
@@ -139,14 +144,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
139 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); 144 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
140 if (!entry) { 145 if (!entry) {
141 err = -ENOMEM; 146 err = -ENOMEM;
142 goto out; 147 goto unlock_out;
143 } 148 }
144 149
145 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); 150 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
146 if (IS_ERR(entry->inode)) { 151 if (IS_ERR(entry->inode)) {
147 err = PTR_ERR(entry->inode); 152 err = PTR_ERR(entry->inode);
148 kmem_cache_free(fsync_entry_slab, entry); 153 kmem_cache_free(fsync_entry_slab, entry);
149 goto out; 154 goto unlock_out;
150 } 155 }
151 156
152 list_add_tail(&entry->list, head); 157 list_add_tail(&entry->list, head);
@@ -155,15 +160,15 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
155 if (IS_INODE(page)) { 160 if (IS_INODE(page)) {
156 err = recover_inode(entry->inode, page); 161 err = recover_inode(entry->inode, page);
157 if (err) 162 if (err)
158 goto out; 163 goto unlock_out;
159 } 164 }
160next: 165next:
161 /* check next segment */ 166 /* check next segment */
162 blkaddr = next_blkaddr_of_node(page); 167 blkaddr = next_blkaddr_of_node(page);
163 ClearPageUptodate(page);
164 } 168 }
165out: 169unlock_out:
166 unlock_page(page); 170 unlock_page(page);
171out:
167 __free_pages(page, 0); 172 __free_pages(page, 0);
168 return err; 173 return err;
169} 174}
@@ -319,8 +324,10 @@ static void recover_data(struct f2fs_sb_info *sbi,
319 if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC)) 324 if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
320 goto out; 325 goto out;
321 326
327 lock_page(page);
328
322 if (cp_ver != cpver_of_node(page)) 329 if (cp_ver != cpver_of_node(page))
323 goto out; 330 goto unlock_out;
324 331
325 entry = get_fsync_inode(head, ino_of_node(page)); 332 entry = get_fsync_inode(head, ino_of_node(page));
326 if (!entry) 333 if (!entry)
@@ -336,10 +343,10 @@ static void recover_data(struct f2fs_sb_info *sbi,
336next: 343next:
337 /* check next segment */ 344 /* check next segment */
338 blkaddr = next_blkaddr_of_node(page); 345 blkaddr = next_blkaddr_of_node(page);
339 ClearPageUptodate(page);
340 } 346 }
341out: 347unlock_out:
342 unlock_page(page); 348 unlock_page(page);
349out:
343 __free_pages(page, 0); 350 __free_pages(page, 0);
344 351
345 allocate_new_segments(sbi); 352 allocate_new_segments(sbi);