aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2010-03-05 16:42:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:27 -0500
commit31c0569c3b0b6cc8a867ac6665ca081553f7984c (patch)
treec3d3e02f941fed0f91981d55d93540d2acaaecbd
parentdfc8d636cdb95f7b792d5ba8c9f3b295809c125d (diff)
vmscan: drop page_mapping_inuse()
page_mapping_inuse() is a historic predicate function for pages that are about to be reclaimed or deactivated. According to it, a page is in use when it is mapped into page tables OR part of swap cache OR backing an mmapped file. This function is used in combination with page_referenced(), which checks for young bits in ptes and the page descriptor itself for the PG_referenced bit. Thus, checking for unmapped swap cache pages is meaningless as PG_referenced is not set for anonymous pages and unmapped pages do not have young ptes. The test makes no difference. Protecting file pages that are not by themselves mapped but are part of a mapped file is also a historic leftover for short-lived things like the exec() code in libc. However, the VM now does reference accounting and activation of pages at unmap time and thus the special treatment on reclaim is obsolete. This patch drops page_mapping_inuse() and switches the two callsites to use page_mapped() directly. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmscan.c25
1 files changed, 2 insertions, 23 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ba4e87df3fc6..d9a0e0d3aac7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -262,27 +262,6 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
262 return ret; 262 return ret;
263} 263}
264 264
265/* Called without lock on whether page is mapped, so answer is unstable */
266static inline int page_mapping_inuse(struct page *page)
267{
268 struct address_space *mapping;
269
270 /* Page is in somebody's page tables. */
271 if (page_mapped(page))
272 return 1;
273
274 /* Be more reluctant to reclaim swapcache than pagecache */
275 if (PageSwapCache(page))
276 return 1;
277
278 mapping = page_mapping(page);
279 if (!mapping)
280 return 0;
281
282 /* File is mmap'd by somebody? */
283 return mapping_mapped(mapping);
284}
285
286static inline int is_page_cache_freeable(struct page *page) 265static inline int is_page_cache_freeable(struct page *page)
287{ 266{
288 /* 267 /*
@@ -606,7 +585,7 @@ static enum page_references page_check_references(struct page *page,
606 if (vm_flags & VM_LOCKED) 585 if (vm_flags & VM_LOCKED)
607 return PAGEREF_RECLAIM; 586 return PAGEREF_RECLAIM;
608 587
609 if (page_mapping_inuse(page)) 588 if (page_mapped(page))
610 return PAGEREF_ACTIVATE; 589 return PAGEREF_ACTIVATE;
611 590
612 /* Reclaim if clean, defer dirty pages to writeback */ 591 /* Reclaim if clean, defer dirty pages to writeback */
@@ -1381,7 +1360,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1381 } 1360 }
1382 1361
1383 /* page_referenced clears PageReferenced */ 1362 /* page_referenced clears PageReferenced */
1384 if (page_mapping_inuse(page) && 1363 if (page_mapped(page) &&
1385 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { 1364 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1386 nr_rotated++; 1365 nr_rotated++;
1387 /* 1366 /*