aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-06-16 18:33:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:44 -0400
commit6fe6b7e35785e3232ffe7f81d3893f1316710a02 (patch)
tree6f47c03735504d8aab8f7b048465b87cc5b15861
parent608e8e66a154cbc3d591a59dcebfd9cbc9e3431a (diff)
vmscan: report vm_flags in page_referenced()
Collect vma->vm_flags of the VMAs that actually referenced the page. This is preparing for more informed reclaim heuristics, eg. to protect executable file pages more aggressively. For now only the VM_EXEC bit will be used by the caller. Thanks to Johannes, Peter and Minchan for all the good tips. Acked-by: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/rmap.h5
-rw-r--r--mm/rmap.c37
-rw-r--r--mm/vmscan.c7
3 files changed, 34 insertions, 15 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 619379a1dd98..216d024f830d 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
83/* 83/*
84 * Called from mm/vmscan.c to handle paging out 84 * Called from mm/vmscan.c to handle paging out
85 */ 85 */
86int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt); 86int page_referenced(struct page *, int is_locked,
87 struct mem_cgroup *cnt, unsigned long *vm_flags);
87int try_to_unmap(struct page *, int ignore_refs); 88int try_to_unmap(struct page *, int ignore_refs);
88 89
89/* 90/*
@@ -117,7 +118,7 @@ int try_to_munlock(struct page *);
117#define anon_vma_prepare(vma) (0) 118#define anon_vma_prepare(vma) (0)
118#define anon_vma_link(vma) do {} while (0) 119#define anon_vma_link(vma) do {} while (0)
119 120
120#define page_referenced(page,l,cnt) TestClearPageReferenced(page) 121#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
121#define try_to_unmap(page, refs) SWAP_FAIL 122#define try_to_unmap(page, refs) SWAP_FAIL
122 123
123static inline int page_mkclean(struct page *page) 124static inline int page_mkclean(struct page *page)
diff --git a/mm/rmap.c b/mm/rmap.c
index 316c9d6930ad..c9ccc1a72dc3 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -333,7 +333,9 @@ static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
333 * repeatedly from either page_referenced_anon or page_referenced_file. 333 * repeatedly from either page_referenced_anon or page_referenced_file.
334 */ 334 */
335static int page_referenced_one(struct page *page, 335static int page_referenced_one(struct page *page,
336 struct vm_area_struct *vma, unsigned int *mapcount) 336 struct vm_area_struct *vma,
337 unsigned int *mapcount,
338 unsigned long *vm_flags)
337{ 339{
338 struct mm_struct *mm = vma->vm_mm; 340 struct mm_struct *mm = vma->vm_mm;
339 unsigned long address; 341 unsigned long address;
@@ -381,11 +383,14 @@ out_unmap:
381 (*mapcount)--; 383 (*mapcount)--;
382 pte_unmap_unlock(pte, ptl); 384 pte_unmap_unlock(pte, ptl);
383out: 385out:
386 if (referenced)
387 *vm_flags |= vma->vm_flags;
384 return referenced; 388 return referenced;
385} 389}
386 390
387static int page_referenced_anon(struct page *page, 391static int page_referenced_anon(struct page *page,
388 struct mem_cgroup *mem_cont) 392 struct mem_cgroup *mem_cont,
393 unsigned long *vm_flags)
389{ 394{
390 unsigned int mapcount; 395 unsigned int mapcount;
391 struct anon_vma *anon_vma; 396 struct anon_vma *anon_vma;
@@ -405,7 +410,8 @@ static int page_referenced_anon(struct page *page,
405 */ 410 */
406 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 411 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
407 continue; 412 continue;
408 referenced += page_referenced_one(page, vma, &mapcount); 413 referenced += page_referenced_one(page, vma,
414 &mapcount, vm_flags);
409 if (!mapcount) 415 if (!mapcount)
410 break; 416 break;
411 } 417 }
@@ -418,6 +424,7 @@ static int page_referenced_anon(struct page *page,
418 * page_referenced_file - referenced check for object-based rmap 424 * page_referenced_file - referenced check for object-based rmap
419 * @page: the page we're checking references on. 425 * @page: the page we're checking references on.
420 * @mem_cont: target memory controller 426 * @mem_cont: target memory controller
427 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
421 * 428 *
422 * For an object-based mapped page, find all the places it is mapped and 429 * For an object-based mapped page, find all the places it is mapped and
423 * check/clear the referenced flag. This is done by following the page->mapping 430 * check/clear the referenced flag. This is done by following the page->mapping
@@ -427,7 +434,8 @@ static int page_referenced_anon(struct page *page,
427 * This function is only called from page_referenced for object-based pages. 434 * This function is only called from page_referenced for object-based pages.
428 */ 435 */
429static int page_referenced_file(struct page *page, 436static int page_referenced_file(struct page *page,
430 struct mem_cgroup *mem_cont) 437 struct mem_cgroup *mem_cont,
438 unsigned long *vm_flags)
431{ 439{
432 unsigned int mapcount; 440 unsigned int mapcount;
433 struct address_space *mapping = page->mapping; 441 struct address_space *mapping = page->mapping;
@@ -467,7 +475,8 @@ static int page_referenced_file(struct page *page,
467 */ 475 */
468 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 476 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
469 continue; 477 continue;
470 referenced += page_referenced_one(page, vma, &mapcount); 478 referenced += page_referenced_one(page, vma,
479 &mapcount, vm_flags);
471 if (!mapcount) 480 if (!mapcount)
472 break; 481 break;
473 } 482 }
@@ -481,29 +490,35 @@ static int page_referenced_file(struct page *page,
481 * @page: the page to test 490 * @page: the page to test
482 * @is_locked: caller holds lock on the page 491 * @is_locked: caller holds lock on the page
483 * @mem_cont: target memory controller 492 * @mem_cont: target memory controller
493 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
484 * 494 *
485 * Quick test_and_clear_referenced for all mappings to a page, 495 * Quick test_and_clear_referenced for all mappings to a page,
486 * returns the number of ptes which referenced the page. 496 * returns the number of ptes which referenced the page.
487 */ 497 */
488int page_referenced(struct page *page, int is_locked, 498int page_referenced(struct page *page,
489 struct mem_cgroup *mem_cont) 499 int is_locked,
500 struct mem_cgroup *mem_cont,
501 unsigned long *vm_flags)
490{ 502{
491 int referenced = 0; 503 int referenced = 0;
492 504
493 if (TestClearPageReferenced(page)) 505 if (TestClearPageReferenced(page))
494 referenced++; 506 referenced++;
495 507
508 *vm_flags = 0;
496 if (page_mapped(page) && page->mapping) { 509 if (page_mapped(page) && page->mapping) {
497 if (PageAnon(page)) 510 if (PageAnon(page))
498 referenced += page_referenced_anon(page, mem_cont); 511 referenced += page_referenced_anon(page, mem_cont,
512 vm_flags);
499 else if (is_locked) 513 else if (is_locked)
500 referenced += page_referenced_file(page, mem_cont); 514 referenced += page_referenced_file(page, mem_cont,
515 vm_flags);
501 else if (!trylock_page(page)) 516 else if (!trylock_page(page))
502 referenced++; 517 referenced++;
503 else { 518 else {
504 if (page->mapping) 519 if (page->mapping)
505 referenced += 520 referenced += page_referenced_file(page,
506 page_referenced_file(page, mem_cont); 521 mem_cont, vm_flags);
507 unlock_page(page); 522 unlock_page(page);
508 } 523 }
509 } 524 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 52339dd7bf85..6be2068f61c8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -577,6 +577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
577 struct pagevec freed_pvec; 577 struct pagevec freed_pvec;
578 int pgactivate = 0; 578 int pgactivate = 0;
579 unsigned long nr_reclaimed = 0; 579 unsigned long nr_reclaimed = 0;
580 unsigned long vm_flags;
580 581
581 cond_resched(); 582 cond_resched();
582 583
@@ -627,7 +628,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
627 goto keep_locked; 628 goto keep_locked;
628 } 629 }
629 630
630 referenced = page_referenced(page, 1, sc->mem_cgroup); 631 referenced = page_referenced(page, 1,
632 sc->mem_cgroup, &vm_flags);
631 /* In active use or really unfreeable? Activate it. */ 633 /* In active use or really unfreeable? Activate it. */
632 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && 634 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
633 referenced && page_mapping_inuse(page)) 635 referenced && page_mapping_inuse(page))
@@ -1208,6 +1210,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1208{ 1210{
1209 unsigned long pgmoved; 1211 unsigned long pgmoved;
1210 unsigned long pgscanned; 1212 unsigned long pgscanned;
1213 unsigned long vm_flags;
1211 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1214 LIST_HEAD(l_hold); /* The pages which were snipped off */
1212 LIST_HEAD(l_inactive); 1215 LIST_HEAD(l_inactive);
1213 struct page *page; 1216 struct page *page;
@@ -1248,7 +1251,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1248 1251
1249 /* page_referenced clears PageReferenced */ 1252 /* page_referenced clears PageReferenced */
1250 if (page_mapping_inuse(page) && 1253 if (page_mapping_inuse(page) &&
1251 page_referenced(page, 0, sc->mem_cgroup)) 1254 page_referenced(page, 0, sc->mem_cgroup, &vm_flags))
1252 pgmoved++; 1255 pgmoved++;
1253 1256
1254 list_add(&page->lru, &l_inactive); 1257 list_add(&page->lru, &l_inactive);