aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2009-01-06 17:39:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:00 -0500
commit4917e5d0499b5ae7b26b56fccaefddf9aec9369c (patch)
tree78f55c2d3019d1811b5447cae130c0f647916a37 /mm
parent64cdd548ffe26849d4cd113ac640f60606063b14 (diff)
mm: more likely reclaim MADV_SEQUENTIAL mappings
File pages mapped only in sequentially read mappings are perfect reclaim canditates. This patch makes these mappings behave like weak references, their pages will be reclaimed unless they have a strong reference from a normal mapping as well. It changes the reclaim and the unmap path where they check if the page has been referenced. In both cases, accesses through sequentially read mappings will be ignored. Benchmark results from KOSAKI Motohiro: http://marc.info/?l=linux-mm&m=122485301925098&w=2 Signed-off-by: Johannes Weiner <hannes@saeurebad.de> Signed-off-by: Rik van Riel <riel@redhat.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c3
-rw-r--r--mm/rmap.c13
2 files changed, 13 insertions, 3 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 5e0e91cc6b67..99e8d5c7b312 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -767,7 +767,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
767 else { 767 else {
768 if (pte_dirty(ptent)) 768 if (pte_dirty(ptent))
769 set_page_dirty(page); 769 set_page_dirty(page);
770 if (pte_young(ptent)) 770 if (pte_young(ptent) &&
771 likely(!VM_SequentialReadHint(vma)))
771 mark_page_accessed(page); 772 mark_page_accessed(page);
772 file_rss--; 773 file_rss--;
773 } 774 }
diff --git a/mm/rmap.c b/mm/rmap.c
index 53c56dacd725..f01e92244c53 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -360,8 +360,17 @@ static int page_referenced_one(struct page *page,
360 goto out_unmap; 360 goto out_unmap;
361 } 361 }
362 362
363 if (ptep_clear_flush_young_notify(vma, address, pte)) 363 if (ptep_clear_flush_young_notify(vma, address, pte)) {
364 referenced++; 364 /*
365 * Don't treat a reference through a sequentially read
366 * mapping as such. If the page has been used in
367 * another mapping, we will catch it; if this other
368 * mapping is already gone, the unmap path will have
369 * set PG_referenced or activated the page.
370 */
371 if (likely(!VM_SequentialReadHint(vma)))
372 referenced++;
373 }
365 374
366 /* Pretend the page is referenced if the task has the 375 /* Pretend the page is referenced if the task has the
367 swap token and is in the middle of a page fault. */ 376 swap token and is in the middle of a page fault. */