aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c45
1 files changed, 35 insertions, 10 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d9a0e0d3aac7..79c809895fba 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -561,18 +561,18 @@ redo:
561enum page_references { 561enum page_references {
562 PAGEREF_RECLAIM, 562 PAGEREF_RECLAIM,
563 PAGEREF_RECLAIM_CLEAN, 563 PAGEREF_RECLAIM_CLEAN,
564 PAGEREF_KEEP,
564 PAGEREF_ACTIVATE, 565 PAGEREF_ACTIVATE,
565}; 566};
566 567
567static enum page_references page_check_references(struct page *page, 568static enum page_references page_check_references(struct page *page,
568 struct scan_control *sc) 569 struct scan_control *sc)
569{ 570{
571 int referenced_ptes, referenced_page;
570 unsigned long vm_flags; 572 unsigned long vm_flags;
571 int referenced;
572 573
573 referenced = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); 574 referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
574 if (!referenced) 575 referenced_page = TestClearPageReferenced(page);
575 return PAGEREF_RECLAIM;
576 576
577 /* Lumpy reclaim - ignore references */ 577 /* Lumpy reclaim - ignore references */
578 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 578 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
@@ -585,11 +585,36 @@ static enum page_references page_check_references(struct page *page,
585 if (vm_flags & VM_LOCKED) 585 if (vm_flags & VM_LOCKED)
586 return PAGEREF_RECLAIM; 586 return PAGEREF_RECLAIM;
587 587
588 if (page_mapped(page)) 588 if (referenced_ptes) {
589 return PAGEREF_ACTIVATE; 589 if (PageAnon(page))
590 return PAGEREF_ACTIVATE;
591 /*
592 * All mapped pages start out with page table
593 * references from the instantiating fault, so we need
594 * to look twice if a mapped file page is used more
595 * than once.
596 *
597 * Mark it and spare it for another trip around the
598 * inactive list. Another page table reference will
599 * lead to its activation.
600 *
601 * Note: the mark is set for activated pages as well
602 * so that recently deactivated but used pages are
603 * quickly recovered.
604 */
605 SetPageReferenced(page);
606
607 if (referenced_page)
608 return PAGEREF_ACTIVATE;
609
610 return PAGEREF_KEEP;
611 }
590 612
591 /* Reclaim if clean, defer dirty pages to writeback */ 613 /* Reclaim if clean, defer dirty pages to writeback */
592 return PAGEREF_RECLAIM_CLEAN; 614 if (referenced_page)
615 return PAGEREF_RECLAIM_CLEAN;
616
617 return PAGEREF_RECLAIM;
593} 618}
594 619
595/* 620/*
@@ -657,6 +682,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
657 switch (references) { 682 switch (references) {
658 case PAGEREF_ACTIVATE: 683 case PAGEREF_ACTIVATE:
659 goto activate_locked; 684 goto activate_locked;
685 case PAGEREF_KEEP:
686 goto keep_locked;
660 case PAGEREF_RECLAIM: 687 case PAGEREF_RECLAIM:
661 case PAGEREF_RECLAIM_CLEAN: 688 case PAGEREF_RECLAIM_CLEAN:
662 ; /* try to reclaim the page below */ 689 ; /* try to reclaim the page below */
@@ -1359,9 +1386,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1359 continue; 1386 continue;
1360 } 1387 }
1361 1388
1362 /* page_referenced clears PageReferenced */ 1389 if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1363 if (page_mapped(page) &&
1364 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1365 nr_rotated++; 1390 nr_rotated++;
1366 /* 1391 /*
1367 * Identify referenced, file-backed active pages and 1392 * Identify referenced, file-backed active pages and