diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2009-06-16 18:33:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:44 -0400 |
commit | 6fe6b7e35785e3232ffe7f81d3893f1316710a02 (patch) | |
tree | 6f47c03735504d8aab8f7b048465b87cc5b15861 /mm/vmscan.c | |
parent | 608e8e66a154cbc3d591a59dcebfd9cbc9e3431a (diff) |
vmscan: report vm_flags in page_referenced()
Collect vma->vm_flags of the VMAs that actually referenced the page.
This is preparing for more informed reclaim heuristics, eg. to protect
executable file pages more aggressively. For now only the VM_EXEC bit
will be used by the caller.
Thanks to Johannes, Peter and Minchan for all the good tips.
Acked-by: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 52339dd7bf85..6be2068f61c8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -577,6 +577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
577 | struct pagevec freed_pvec; | 577 | struct pagevec freed_pvec; |
578 | int pgactivate = 0; | 578 | int pgactivate = 0; |
579 | unsigned long nr_reclaimed = 0; | 579 | unsigned long nr_reclaimed = 0; |
580 | unsigned long vm_flags; | ||
580 | 581 | ||
581 | cond_resched(); | 582 | cond_resched(); |
582 | 583 | ||
@@ -627,7 +628,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
627 | goto keep_locked; | 628 | goto keep_locked; |
628 | } | 629 | } |
629 | 630 | ||
630 | referenced = page_referenced(page, 1, sc->mem_cgroup); | 631 | referenced = page_referenced(page, 1, |
632 | sc->mem_cgroup, &vm_flags); | ||
631 | /* In active use or really unfreeable? Activate it. */ | 633 | /* In active use or really unfreeable? Activate it. */ |
632 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && | 634 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && |
633 | referenced && page_mapping_inuse(page)) | 635 | referenced && page_mapping_inuse(page)) |
@@ -1208,6 +1210,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1208 | { | 1210 | { |
1209 | unsigned long pgmoved; | 1211 | unsigned long pgmoved; |
1210 | unsigned long pgscanned; | 1212 | unsigned long pgscanned; |
1213 | unsigned long vm_flags; | ||
1211 | LIST_HEAD(l_hold); /* The pages which were snipped off */ | 1214 | LIST_HEAD(l_hold); /* The pages which were snipped off */ |
1212 | LIST_HEAD(l_inactive); | 1215 | LIST_HEAD(l_inactive); |
1213 | struct page *page; | 1216 | struct page *page; |
@@ -1248,7 +1251,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1248 | 1251 | ||
1249 | /* page_referenced clears PageReferenced */ | 1252 | /* page_referenced clears PageReferenced */ |
1250 | if (page_mapping_inuse(page) && | 1253 | if (page_mapping_inuse(page) && |
1251 | page_referenced(page, 0, sc->mem_cgroup)) | 1254 | page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) |
1252 | pgmoved++; | 1255 | pgmoved++; |
1253 | 1256 | ||
1254 | list_add(&page->lru, &l_inactive); | 1257 | list_add(&page->lru, &l_inactive); |