aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/extent_io.c11
-rw-r--r--fs/btrfs/file.c5
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/ext4/mballoc.c14
-rw-r--r--fs/f2fs/checkpoint.c3
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/gfs2/aops.c1
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/ntfs/attrib.c1
-rw-r--r--fs/ntfs/file.c1
-rw-r--r--include/linux/page-flags.h1
-rw-r--r--include/linux/pagemap.h107
-rw-r--r--include/linux/swap.h1
-rw-r--r--mm/filemap.c202
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/swap.c11
17 files changed, 217 insertions, 162 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index f29a54e454d4..4cd0ac983f91 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4510,7 +4510,8 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
4510 spin_unlock(&eb->refs_lock); 4510 spin_unlock(&eb->refs_lock);
4511} 4511}
4512 4512
4513static void mark_extent_buffer_accessed(struct extent_buffer *eb) 4513static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4514 struct page *accessed)
4514{ 4515{
4515 unsigned long num_pages, i; 4516 unsigned long num_pages, i;
4516 4517
@@ -4519,7 +4520,8 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4519 num_pages = num_extent_pages(eb->start, eb->len); 4520 num_pages = num_extent_pages(eb->start, eb->len);
4520 for (i = 0; i < num_pages; i++) { 4521 for (i = 0; i < num_pages; i++) {
4521 struct page *p = extent_buffer_page(eb, i); 4522 struct page *p = extent_buffer_page(eb, i);
4522 mark_page_accessed(p); 4523 if (p != accessed)
4524 mark_page_accessed(p);
4523 } 4525 }
4524} 4526}
4525 4527
@@ -4533,7 +4535,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4533 start >> PAGE_CACHE_SHIFT); 4535 start >> PAGE_CACHE_SHIFT);
4534 if (eb && atomic_inc_not_zero(&eb->refs)) { 4536 if (eb && atomic_inc_not_zero(&eb->refs)) {
4535 rcu_read_unlock(); 4537 rcu_read_unlock();
4536 mark_extent_buffer_accessed(eb); 4538 mark_extent_buffer_accessed(eb, NULL);
4537 return eb; 4539 return eb;
4538 } 4540 }
4539 rcu_read_unlock(); 4541 rcu_read_unlock();
@@ -4581,7 +4583,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4581 spin_unlock(&mapping->private_lock); 4583 spin_unlock(&mapping->private_lock);
4582 unlock_page(p); 4584 unlock_page(p);
4583 page_cache_release(p); 4585 page_cache_release(p);
4584 mark_extent_buffer_accessed(exists); 4586 mark_extent_buffer_accessed(exists, p);
4585 goto free_eb; 4587 goto free_eb;
4586 } 4588 }
4587 4589
@@ -4596,7 +4598,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4596 attach_extent_buffer_page(eb, p); 4598 attach_extent_buffer_page(eb, p);
4597 spin_unlock(&mapping->private_lock); 4599 spin_unlock(&mapping->private_lock);
4598 WARN_ON(PageDirty(p)); 4600 WARN_ON(PageDirty(p));
4599 mark_page_accessed(p);
4600 eb->pages[i] = p; 4601 eb->pages[i] = p;
4601 if (!PageUptodate(p)) 4602 if (!PageUptodate(p))
4602 uptodate = 0; 4603 uptodate = 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index ae6af072b635..74272a3f9d9b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -470,11 +470,12 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
470 for (i = 0; i < num_pages; i++) { 470 for (i = 0; i < num_pages; i++) {
471 /* page checked is some magic around finding pages that 471 /* page checked is some magic around finding pages that
472 * have been modified without going through btrfs_set_page_dirty 472 * have been modified without going through btrfs_set_page_dirty
473 * clear it here 473 * clear it here. There should be no need to mark the pages
474 * accessed as prepare_pages should have marked them accessed
475 * in prepare_pages via find_or_create_page()
474 */ 476 */
475 ClearPageChecked(pages[i]); 477 ClearPageChecked(pages[i]);
476 unlock_page(pages[i]); 478 unlock_page(pages[i]);
477 mark_page_accessed(pages[i]);
478 page_cache_release(pages[i]); 479 page_cache_release(pages[i]);
479 } 480 }
480} 481}
diff --git a/fs/buffer.c b/fs/buffer.c
index 0d3e8d5a2299..eba6e4f621ce 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -227,7 +227,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
227 int all_mapped = 1; 227 int all_mapped = 1;
228 228
229 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); 229 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
230 page = find_get_page(bd_mapping, index); 230 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
231 if (!page) 231 if (!page)
232 goto out; 232 goto out;
233 233
@@ -1366,12 +1366,13 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1366 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1366 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1367 1367
1368 if (bh == NULL) { 1368 if (bh == NULL) {
1369 /* __find_get_block_slow will mark the page accessed */
1369 bh = __find_get_block_slow(bdev, block); 1370 bh = __find_get_block_slow(bdev, block);
1370 if (bh) 1371 if (bh)
1371 bh_lru_install(bh); 1372 bh_lru_install(bh);
1372 } 1373 } else
1373 if (bh)
1374 touch_buffer(bh); 1374 touch_buffer(bh);
1375
1375 return bh; 1376 return bh;
1376} 1377}
1377EXPORT_SYMBOL(__find_get_block); 1378EXPORT_SYMBOL(__find_get_block);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c8238a26818c..afe8a133e3d1 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1044,6 +1044,8 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1044 * allocating. If we are looking at the buddy cache we would 1044 * allocating. If we are looking at the buddy cache we would
1045 * have taken a reference using ext4_mb_load_buddy and that 1045 * have taken a reference using ext4_mb_load_buddy and that
1046 * would have pinned buddy page to page cache. 1046 * would have pinned buddy page to page cache.
1047 * The call to ext4_mb_get_buddy_page_lock will mark the
1048 * page accessed.
1047 */ 1049 */
1048 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); 1050 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1049 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1051 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
@@ -1062,7 +1064,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1062 ret = -EIO; 1064 ret = -EIO;
1063 goto err; 1065 goto err;
1064 } 1066 }
1065 mark_page_accessed(page);
1066 1067
1067 if (e4b.bd_buddy_page == NULL) { 1068 if (e4b.bd_buddy_page == NULL) {
1068 /* 1069 /*
@@ -1082,7 +1083,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1082 ret = -EIO; 1083 ret = -EIO;
1083 goto err; 1084 goto err;
1084 } 1085 }
1085 mark_page_accessed(page);
1086err: 1086err:
1087 ext4_mb_put_buddy_page_lock(&e4b); 1087 ext4_mb_put_buddy_page_lock(&e4b);
1088 return ret; 1088 return ret;
@@ -1141,7 +1141,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1141 1141
1142 /* we could use find_or_create_page(), but it locks page 1142 /* we could use find_or_create_page(), but it locks page
1143 * what we'd like to avoid in fast path ... */ 1143 * what we'd like to avoid in fast path ... */
1144 page = find_get_page(inode->i_mapping, pnum); 1144 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1145 if (page == NULL || !PageUptodate(page)) { 1145 if (page == NULL || !PageUptodate(page)) {
1146 if (page) 1146 if (page)
1147 /* 1147 /*
@@ -1176,15 +1176,16 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1176 ret = -EIO; 1176 ret = -EIO;
1177 goto err; 1177 goto err;
1178 } 1178 }
1179
1180 /* Pages marked accessed already */
1179 e4b->bd_bitmap_page = page; 1181 e4b->bd_bitmap_page = page;
1180 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1182 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1181 mark_page_accessed(page);
1182 1183
1183 block++; 1184 block++;
1184 pnum = block / blocks_per_page; 1185 pnum = block / blocks_per_page;
1185 poff = block % blocks_per_page; 1186 poff = block % blocks_per_page;
1186 1187
1187 page = find_get_page(inode->i_mapping, pnum); 1188 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1188 if (page == NULL || !PageUptodate(page)) { 1189 if (page == NULL || !PageUptodate(page)) {
1189 if (page) 1190 if (page)
1190 page_cache_release(page); 1191 page_cache_release(page);
@@ -1209,9 +1210,10 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1209 ret = -EIO; 1210 ret = -EIO;
1210 goto err; 1211 goto err;
1211 } 1212 }
1213
1214 /* Pages marked accessed already */
1212 e4b->bd_buddy_page = page; 1215 e4b->bd_buddy_page = page;
1213 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1216 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1214 mark_page_accessed(page);
1215 1217
1216 BUG_ON(e4b->bd_bitmap_page == NULL); 1218 BUG_ON(e4b->bd_bitmap_page == NULL);
1217 BUG_ON(e4b->bd_buddy_page == NULL); 1219 BUG_ON(e4b->bd_buddy_page == NULL);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 4aa521aa9bc3..c405b8f17054 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -69,7 +69,6 @@ repeat:
69 goto repeat; 69 goto repeat;
70 } 70 }
71out: 71out:
72 mark_page_accessed(page);
73 return page; 72 return page;
74} 73}
75 74
@@ -137,13 +136,11 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
137 if (!page) 136 if (!page)
138 continue; 137 continue;
139 if (PageUptodate(page)) { 138 if (PageUptodate(page)) {
140 mark_page_accessed(page);
141 f2fs_put_page(page, 1); 139 f2fs_put_page(page, 1);
142 continue; 140 continue;
143 } 141 }
144 142
145 f2fs_submit_page_mbio(sbi, page, blk_addr, &fio); 143 f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
146 mark_page_accessed(page);
147 f2fs_put_page(page, 0); 144 f2fs_put_page(page, 0);
148 } 145 }
149out: 146out:
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index a161e955c4c8..57caa6eaf47b 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -967,7 +967,6 @@ repeat:
967 goto repeat; 967 goto repeat;
968 } 968 }
969got_it: 969got_it:
970 mark_page_accessed(page);
971 return page; 970 return page;
972} 971}
973 972
@@ -1022,7 +1021,6 @@ page_hit:
1022 f2fs_put_page(page, 1); 1021 f2fs_put_page(page, 1);
1023 return ERR_PTR(-EIO); 1022 return ERR_PTR(-EIO);
1024 } 1023 }
1025 mark_page_accessed(page);
1026 return page; 1024 return page;
1027} 1025}
1028 1026
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f680d2c44e97..903cbc9cd6bd 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1089,8 +1089,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1089 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 1089 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
1090 flush_dcache_page(page); 1090 flush_dcache_page(page);
1091 1091
1092 mark_page_accessed(page);
1093
1094 if (!tmp) { 1092 if (!tmp) {
1095 unlock_page(page); 1093 unlock_page(page);
1096 page_cache_release(page); 1094 page_cache_release(page);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 5a49b037da81..492123cda64a 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -577,7 +577,6 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
577 p = kmap_atomic(page); 577 p = kmap_atomic(page);
578 memcpy(buf + copied, p + offset, amt); 578 memcpy(buf + copied, p + offset, amt);
579 kunmap_atomic(p); 579 kunmap_atomic(p);
580 mark_page_accessed(page);
581 page_cache_release(page); 580 page_cache_release(page);
582 copied += amt; 581 copied += amt;
583 index++; 582 index++;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 2cf09b63a6b4..b984a6e190bc 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -136,7 +136,8 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
136 yield(); 136 yield();
137 } 137 }
138 } else { 138 } else {
139 page = find_lock_page(mapping, index); 139 page = find_get_page_flags(mapping, index,
140 FGP_LOCK|FGP_ACCESSED);
140 if (!page) 141 if (!page)
141 return NULL; 142 return NULL;
142 } 143 }
@@ -153,7 +154,6 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
153 map_bh(bh, sdp->sd_vfs, blkno); 154 map_bh(bh, sdp->sd_vfs, blkno);
154 155
155 unlock_page(page); 156 unlock_page(page);
156 mark_page_accessed(page);
157 page_cache_release(page); 157 page_cache_release(page);
158 158
159 return bh; 159 return bh;
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index a27e3fecefaf..250ed5b20c8f 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1748,7 +1748,6 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1748 if (page) { 1748 if (page) {
1749 set_page_dirty(page); 1749 set_page_dirty(page);
1750 unlock_page(page); 1750 unlock_page(page);
1751 mark_page_accessed(page);
1752 page_cache_release(page); 1751 page_cache_release(page);
1753 } 1752 }
1754 ntfs_debug("Done."); 1753 ntfs_debug("Done.");
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index db9bd8a31725..86ddab916b66 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2060,7 +2060,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
2060 } 2060 }
2061 do { 2061 do {
2062 unlock_page(pages[--do_pages]); 2062 unlock_page(pages[--do_pages]);
2063 mark_page_accessed(pages[do_pages]);
2064 page_cache_release(pages[do_pages]); 2063 page_cache_release(pages[do_pages]);
2065 } while (do_pages); 2064 } while (do_pages);
2066 if (unlikely(status)) 2065 if (unlikely(status))
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4d4b39ab2341..2093eb72785e 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -198,6 +198,7 @@ struct page; /* forward declaration */
198TESTPAGEFLAG(Locked, locked) 198TESTPAGEFLAG(Locked, locked)
199PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) 199PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
200PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 200PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
201 __SETPAGEFLAG(Referenced, referenced)
201PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 202PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
202PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) 203PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
203PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) 204PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c16fb6d06e36..0a97b583ee8d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -259,12 +259,109 @@ pgoff_t page_cache_next_hole(struct address_space *mapping,
259pgoff_t page_cache_prev_hole(struct address_space *mapping, 259pgoff_t page_cache_prev_hole(struct address_space *mapping,
260 pgoff_t index, unsigned long max_scan); 260 pgoff_t index, unsigned long max_scan);
261 261
262#define FGP_ACCESSED 0x00000001
263#define FGP_LOCK 0x00000002
264#define FGP_CREAT 0x00000004
265#define FGP_WRITE 0x00000008
266#define FGP_NOFS 0x00000010
267#define FGP_NOWAIT 0x00000020
268
269struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
270 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
271
272/**
273 * find_get_page - find and get a page reference
274 * @mapping: the address_space to search
275 * @offset: the page index
276 *
277 * Looks up the page cache slot at @mapping & @offset. If there is a
278 * page cache page, it is returned with an increased refcount.
279 *
280 * Otherwise, %NULL is returned.
281 */
282static inline struct page *find_get_page(struct address_space *mapping,
283 pgoff_t offset)
284{
285 return pagecache_get_page(mapping, offset, 0, 0, 0);
286}
287
288static inline struct page *find_get_page_flags(struct address_space *mapping,
289 pgoff_t offset, int fgp_flags)
290{
291 return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
292}
293
294/**
295 * find_lock_page - locate, pin and lock a pagecache page
296 * pagecache_get_page - find and get a page reference
297 * @mapping: the address_space to search
298 * @offset: the page index
299 *
300 * Looks up the page cache slot at @mapping & @offset. If there is a
301 * page cache page, it is returned locked and with an increased
302 * refcount.
303 *
304 * Otherwise, %NULL is returned.
305 *
306 * find_lock_page() may sleep.
307 */
308static inline struct page *find_lock_page(struct address_space *mapping,
309 pgoff_t offset)
310{
311 return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
312}
313
314/**
315 * find_or_create_page - locate or add a pagecache page
316 * @mapping: the page's address_space
317 * @index: the page's index into the mapping
318 * @gfp_mask: page allocation mode
319 *
320 * Looks up the page cache slot at @mapping & @offset. If there is a
321 * page cache page, it is returned locked and with an increased
322 * refcount.
323 *
324 * If the page is not present, a new page is allocated using @gfp_mask
325 * and added to the page cache and the VM's LRU list. The page is
326 * returned locked and with an increased refcount.
327 *
328 * On memory exhaustion, %NULL is returned.
329 *
330 * find_or_create_page() may sleep, even if @gfp_flags specifies an
331 * atomic allocation!
332 */
333static inline struct page *find_or_create_page(struct address_space *mapping,
334 pgoff_t offset, gfp_t gfp_mask)
335{
336 return pagecache_get_page(mapping, offset,
337 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
338 gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
339}
340
341/**
342 * grab_cache_page_nowait - returns locked page at given index in given cache
343 * @mapping: target address_space
344 * @index: the page index
345 *
346 * Same as grab_cache_page(), but do not wait if the page is unavailable.
347 * This is intended for speculative data generators, where the data can
348 * be regenerated if the page couldn't be grabbed. This routine should
349 * be safe to call while holding the lock for another page.
350 *
351 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
352 * and deadlock against the caller's locked page.
353 */
354static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
355 pgoff_t index)
356{
357 return pagecache_get_page(mapping, index,
358 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
359 mapping_gfp_mask(mapping),
360 GFP_NOFS);
361}
362
262struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); 363struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
263struct page *find_get_page(struct address_space *mapping, pgoff_t offset);
264struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); 364struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
265struct page *find_lock_page(struct address_space *mapping, pgoff_t offset);
266struct page *find_or_create_page(struct address_space *mapping, pgoff_t index,
267 gfp_t gfp_mask);
268unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 365unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
269 unsigned int nr_entries, struct page **entries, 366 unsigned int nr_entries, struct page **entries,
270 pgoff_t *indices); 367 pgoff_t *indices);
@@ -287,8 +384,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
287 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 384 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
288} 385}
289 386
290extern struct page * grab_cache_page_nowait(struct address_space *mapping,
291 pgoff_t index);
292extern struct page * read_cache_page(struct address_space *mapping, 387extern struct page * read_cache_page(struct address_space *mapping,
293 pgoff_t index, filler_t *filler, void *data); 388 pgoff_t index, filler_t *filler, void *data);
294extern struct page * read_cache_page_gfp(struct address_space *mapping, 389extern struct page * read_cache_page_gfp(struct address_space *mapping,
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 97cf16164c46..4348d95e571f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -311,6 +311,7 @@ extern void lru_add_page_tail(struct page *page, struct page *page_tail,
311 struct lruvec *lruvec, struct list_head *head); 311 struct lruvec *lruvec, struct list_head *head);
312extern void activate_page(struct page *); 312extern void activate_page(struct page *);
313extern void mark_page_accessed(struct page *); 313extern void mark_page_accessed(struct page *);
314extern void init_page_accessed(struct page *page);
314extern void lru_add_drain(void); 315extern void lru_add_drain(void);
315extern void lru_add_drain_cpu(int cpu); 316extern void lru_add_drain_cpu(int cpu);
316extern void lru_add_drain_all(void); 317extern void lru_add_drain_all(void);
diff --git a/mm/filemap.c b/mm/filemap.c
index 47d235b357a7..0fcd792103f3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -982,26 +982,6 @@ out:
982EXPORT_SYMBOL(find_get_entry); 982EXPORT_SYMBOL(find_get_entry);
983 983
984/** 984/**
985 * find_get_page - find and get a page reference
986 * @mapping: the address_space to search
987 * @offset: the page index
988 *
989 * Looks up the page cache slot at @mapping & @offset. If there is a
990 * page cache page, it is returned with an increased refcount.
991 *
992 * Otherwise, %NULL is returned.
993 */
994struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
995{
996 struct page *page = find_get_entry(mapping, offset);
997
998 if (radix_tree_exceptional_entry(page))
999 page = NULL;
1000 return page;
1001}
1002EXPORT_SYMBOL(find_get_page);
1003
1004/**
1005 * find_lock_entry - locate, pin and lock a page cache entry 985 * find_lock_entry - locate, pin and lock a page cache entry
1006 * @mapping: the address_space to search 986 * @mapping: the address_space to search
1007 * @offset: the page cache index 987 * @offset: the page cache index
@@ -1038,66 +1018,84 @@ repeat:
1038EXPORT_SYMBOL(find_lock_entry); 1018EXPORT_SYMBOL(find_lock_entry);
1039 1019
1040/** 1020/**
1041 * find_lock_page - locate, pin and lock a pagecache page 1021 * pagecache_get_page - find and get a page reference
1042 * @mapping: the address_space to search 1022 * @mapping: the address_space to search
1043 * @offset: the page index 1023 * @offset: the page index
1024 * @fgp_flags: PCG flags
1025 * @gfp_mask: gfp mask to use if a page is to be allocated
1044 * 1026 *
1045 * Looks up the page cache slot at @mapping & @offset. If there is a 1027 * Looks up the page cache slot at @mapping & @offset.
1046 * page cache page, it is returned locked and with an increased
1047 * refcount.
1048 *
1049 * Otherwise, %NULL is returned.
1050 *
1051 * find_lock_page() may sleep.
1052 */
1053struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
1054{
1055 struct page *page = find_lock_entry(mapping, offset);
1056
1057 if (radix_tree_exceptional_entry(page))
1058 page = NULL;
1059 return page;
1060}
1061EXPORT_SYMBOL(find_lock_page);
1062
1063/**
1064 * find_or_create_page - locate or add a pagecache page
1065 * @mapping: the page's address_space
1066 * @index: the page's index into the mapping
1067 * @gfp_mask: page allocation mode
1068 * 1028 *
1069 * Looks up the page cache slot at @mapping & @offset. If there is a 1029 * PCG flags modify how the page is returned
1070 * page cache page, it is returned locked and with an increased
1071 * refcount.
1072 * 1030 *
1073 * If the page is not present, a new page is allocated using @gfp_mask 1031 * FGP_ACCESSED: the page will be marked accessed
1074 * and added to the page cache and the VM's LRU list. The page is 1032 * FGP_LOCK: Page is return locked
1075 * returned locked and with an increased refcount. 1033 * FGP_CREAT: If page is not present then a new page is allocated using
1034 * @gfp_mask and added to the page cache and the VM's LRU
1035 * list. The page is returned locked and with an increased
1036 * refcount. Otherwise, %NULL is returned.
1076 * 1037 *
1077 * On memory exhaustion, %NULL is returned. 1038 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1039 * if the GFP flags specified for FGP_CREAT are atomic.
1078 * 1040 *
1079 * find_or_create_page() may sleep, even if @gfp_flags specifies an 1041 * If there is a page cache page, it is returned with an increased refcount.
1080 * atomic allocation!
1081 */ 1042 */
1082struct page *find_or_create_page(struct address_space *mapping, 1043struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1083 pgoff_t index, gfp_t gfp_mask) 1044 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
1084{ 1045{
1085 struct page *page; 1046 struct page *page;
1086 int err; 1047
1087repeat: 1048repeat:
1088 page = find_lock_page(mapping, index); 1049 page = find_get_entry(mapping, offset);
1089 if (!page) { 1050 if (radix_tree_exceptional_entry(page))
1090 page = __page_cache_alloc(gfp_mask); 1051 page = NULL;
1052 if (!page)
1053 goto no_page;
1054
1055 if (fgp_flags & FGP_LOCK) {
1056 if (fgp_flags & FGP_NOWAIT) {
1057 if (!trylock_page(page)) {
1058 page_cache_release(page);
1059 return NULL;
1060 }
1061 } else {
1062 lock_page(page);
1063 }
1064
1065 /* Has the page been truncated? */
1066 if (unlikely(page->mapping != mapping)) {
1067 unlock_page(page);
1068 page_cache_release(page);
1069 goto repeat;
1070 }
1071 VM_BUG_ON_PAGE(page->index != offset, page);
1072 }
1073
1074 if (page && (fgp_flags & FGP_ACCESSED))
1075 mark_page_accessed(page);
1076
1077no_page:
1078 if (!page && (fgp_flags & FGP_CREAT)) {
1079 int err;
1080 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1081 cache_gfp_mask |= __GFP_WRITE;
1082 if (fgp_flags & FGP_NOFS) {
1083 cache_gfp_mask &= ~__GFP_FS;
1084 radix_gfp_mask &= ~__GFP_FS;
1085 }
1086
1087 page = __page_cache_alloc(cache_gfp_mask);
1091 if (!page) 1088 if (!page)
1092 return NULL; 1089 return NULL;
1093 /* 1090
1094 * We want a regular kernel memory (not highmem or DMA etc) 1091 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1095 * allocation for the radix tree nodes, but we need to honour 1092 fgp_flags |= FGP_LOCK;
1096 * the context-specific requirements the caller has asked for. 1093
1097 * GFP_RECLAIM_MASK collects those requirements. 1094 /* Init accessed so avoit atomic mark_page_accessed later */
1098 */ 1095 if (fgp_flags & FGP_ACCESSED)
1099 err = add_to_page_cache_lru(page, mapping, index, 1096 init_page_accessed(page);
1100 (gfp_mask & GFP_RECLAIM_MASK)); 1097
1098 err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
1101 if (unlikely(err)) { 1099 if (unlikely(err)) {
1102 page_cache_release(page); 1100 page_cache_release(page);
1103 page = NULL; 1101 page = NULL;
@@ -1105,9 +1103,10 @@ repeat:
1105 goto repeat; 1103 goto repeat;
1106 } 1104 }
1107 } 1105 }
1106
1108 return page; 1107 return page;
1109} 1108}
1110EXPORT_SYMBOL(find_or_create_page); 1109EXPORT_SYMBOL(pagecache_get_page);
1111 1110
1112/** 1111/**
1113 * find_get_entries - gang pagecache lookup 1112 * find_get_entries - gang pagecache lookup
@@ -1404,39 +1403,6 @@ repeat:
1404} 1403}
1405EXPORT_SYMBOL(find_get_pages_tag); 1404EXPORT_SYMBOL(find_get_pages_tag);
1406 1405
1407/**
1408 * grab_cache_page_nowait - returns locked page at given index in given cache
1409 * @mapping: target address_space
1410 * @index: the page index
1411 *
1412 * Same as grab_cache_page(), but do not wait if the page is unavailable.
1413 * This is intended for speculative data generators, where the data can
1414 * be regenerated if the page couldn't be grabbed. This routine should
1415 * be safe to call while holding the lock for another page.
1416 *
1417 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
1418 * and deadlock against the caller's locked page.
1419 */
1420struct page *
1421grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1422{
1423 struct page *page = find_get_page(mapping, index);
1424
1425 if (page) {
1426 if (trylock_page(page))
1427 return page;
1428 page_cache_release(page);
1429 return NULL;
1430 }
1431 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1432 if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1433 page_cache_release(page);
1434 page = NULL;
1435 }
1436 return page;
1437}
1438EXPORT_SYMBOL(grab_cache_page_nowait);
1439
1440/* 1406/*
1441 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1407 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1442 * a _large_ part of the i/o request. Imagine the worst scenario: 1408 * a _large_ part of the i/o request. Imagine the worst scenario:
@@ -2406,7 +2372,6 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
2406{ 2372{
2407 const struct address_space_operations *aops = mapping->a_ops; 2373 const struct address_space_operations *aops = mapping->a_ops;
2408 2374
2409 mark_page_accessed(page);
2410 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2375 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2411} 2376}
2412EXPORT_SYMBOL(pagecache_write_end); 2377EXPORT_SYMBOL(pagecache_write_end);
@@ -2488,34 +2453,18 @@ EXPORT_SYMBOL(generic_file_direct_write);
2488struct page *grab_cache_page_write_begin(struct address_space *mapping, 2453struct page *grab_cache_page_write_begin(struct address_space *mapping,
2489 pgoff_t index, unsigned flags) 2454 pgoff_t index, unsigned flags)
2490{ 2455{
2491 int status;
2492 gfp_t gfp_mask;
2493 struct page *page; 2456 struct page *page;
2494 gfp_t gfp_notmask = 0; 2457 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
2495 2458
2496 gfp_mask = mapping_gfp_mask(mapping);
2497 if (mapping_cap_account_dirty(mapping))
2498 gfp_mask |= __GFP_WRITE;
2499 if (flags & AOP_FLAG_NOFS) 2459 if (flags & AOP_FLAG_NOFS)
2500 gfp_notmask = __GFP_FS; 2460 fgp_flags |= FGP_NOFS;
2501repeat: 2461
2502 page = find_lock_page(mapping, index); 2462 page = pagecache_get_page(mapping, index, fgp_flags,
2463 mapping_gfp_mask(mapping),
2464 GFP_KERNEL);
2503 if (page) 2465 if (page)
2504 goto found; 2466 wait_for_stable_page(page);
2505 2467
2506 page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
2507 if (!page)
2508 return NULL;
2509 status = add_to_page_cache_lru(page, mapping, index,
2510 GFP_KERNEL & ~gfp_notmask);
2511 if (unlikely(status)) {
2512 page_cache_release(page);
2513 if (status == -EEXIST)
2514 goto repeat;
2515 return NULL;
2516 }
2517found:
2518 wait_for_stable_page(page);
2519 return page; 2468 return page;
2520} 2469}
2521EXPORT_SYMBOL(grab_cache_page_write_begin); 2470EXPORT_SYMBOL(grab_cache_page_write_begin);
@@ -2564,7 +2513,7 @@ again:
2564 2513
2565 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2514 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2566 &page, &fsdata); 2515 &page, &fsdata);
2567 if (unlikely(status)) 2516 if (unlikely(status < 0))
2568 break; 2517 break;
2569 2518
2570 if (mapping_writably_mapped(mapping)) 2519 if (mapping_writably_mapped(mapping))
@@ -2573,7 +2522,6 @@ again:
2573 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2522 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2574 flush_dcache_page(page); 2523 flush_dcache_page(page);
2575 2524
2576 mark_page_accessed(page);
2577 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2525 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2578 page, fsdata); 2526 page, fsdata);
2579 if (unlikely(status < 0)) 2527 if (unlikely(status < 0))
diff --git a/mm/shmem.c b/mm/shmem.c
index f47fb38c4889..5402481c28d1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1372,9 +1372,13 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
1372 loff_t pos, unsigned len, unsigned flags, 1372 loff_t pos, unsigned len, unsigned flags,
1373 struct page **pagep, void **fsdata) 1373 struct page **pagep, void **fsdata)
1374{ 1374{
1375 int ret;
1375 struct inode *inode = mapping->host; 1376 struct inode *inode = mapping->host;
1376 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1377 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1377 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1378 ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1379 if (ret == 0 && *pagep)
1380 init_page_accessed(*pagep);
1381 return ret;
1378} 1382}
1379 1383
1380static int 1384static int
diff --git a/mm/swap.c b/mm/swap.c
index 1fb25f8bb155..9e8e3472248b 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -614,6 +614,17 @@ void mark_page_accessed(struct page *page)
614} 614}
615EXPORT_SYMBOL(mark_page_accessed); 615EXPORT_SYMBOL(mark_page_accessed);
616 616
617/*
618 * Used to mark_page_accessed(page) that is not visible yet and when it is
619 * still safe to use non-atomic ops
620 */
621void init_page_accessed(struct page *page)
622{
623 if (!PageReferenced(page))
624 __SetPageReferenced(page);
625}
626EXPORT_SYMBOL(init_page_accessed);
627
617static void __lru_cache_add(struct page *page) 628static void __lru_cache_add(struct page *page)
618{ 629{
619 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 630 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);