diff options
-rw-r--r-- | include/linux/rmap.h | 2 | ||||
-rw-r--r-- | mm/rmap.c | 3 | ||||
-rw-r--r-- | mm/vmscan.c | 45 |
3 files changed, 36 insertions, 14 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 72be23b1480a..d25bd224d370 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -209,7 +209,7 @@ static inline int page_referenced(struct page *page, int is_locked, | |||
209 | unsigned long *vm_flags) | 209 | unsigned long *vm_flags) |
210 | { | 210 | { |
211 | *vm_flags = 0; | 211 | *vm_flags = 0; |
212 | return TestClearPageReferenced(page); | 212 | return 0; |
213 | } | 213 | } |
214 | 214 | ||
215 | #define try_to_unmap(page, refs) SWAP_FAIL | 215 | #define try_to_unmap(page, refs) SWAP_FAIL |
@@ -601,9 +601,6 @@ int page_referenced(struct page *page, | |||
601 | int referenced = 0; | 601 | int referenced = 0; |
602 | int we_locked = 0; | 602 | int we_locked = 0; |
603 | 603 | ||
604 | if (TestClearPageReferenced(page)) | ||
605 | referenced++; | ||
606 | |||
607 | *vm_flags = 0; | 604 | *vm_flags = 0; |
608 | if (page_mapped(page) && page_rmapping(page)) { | 605 | if (page_mapped(page) && page_rmapping(page)) { |
609 | if (!is_locked && (!PageAnon(page) || PageKsm(page))) { | 606 | if (!is_locked && (!PageAnon(page) || PageKsm(page))) { |
diff --git a/mm/vmscan.c b/mm/vmscan.c index d9a0e0d3aac7..79c809895fba 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -561,18 +561,18 @@ redo: | |||
561 | enum page_references { | 561 | enum page_references { |
562 | PAGEREF_RECLAIM, | 562 | PAGEREF_RECLAIM, |
563 | PAGEREF_RECLAIM_CLEAN, | 563 | PAGEREF_RECLAIM_CLEAN, |
564 | PAGEREF_KEEP, | ||
564 | PAGEREF_ACTIVATE, | 565 | PAGEREF_ACTIVATE, |
565 | }; | 566 | }; |
566 | 567 | ||
567 | static enum page_references page_check_references(struct page *page, | 568 | static enum page_references page_check_references(struct page *page, |
568 | struct scan_control *sc) | 569 | struct scan_control *sc) |
569 | { | 570 | { |
571 | int referenced_ptes, referenced_page; | ||
570 | unsigned long vm_flags; | 572 | unsigned long vm_flags; |
571 | int referenced; | ||
572 | 573 | ||
573 | referenced = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); | 574 | referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); |
574 | if (!referenced) | 575 | referenced_page = TestClearPageReferenced(page); |
575 | return PAGEREF_RECLAIM; | ||
576 | 576 | ||
577 | /* Lumpy reclaim - ignore references */ | 577 | /* Lumpy reclaim - ignore references */ |
578 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) | 578 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) |
@@ -585,11 +585,36 @@ static enum page_references page_check_references(struct page *page, | |||
585 | if (vm_flags & VM_LOCKED) | 585 | if (vm_flags & VM_LOCKED) |
586 | return PAGEREF_RECLAIM; | 586 | return PAGEREF_RECLAIM; |
587 | 587 | ||
588 | if (page_mapped(page)) | 588 | if (referenced_ptes) { |
589 | return PAGEREF_ACTIVATE; | 589 | if (PageAnon(page)) |
590 | return PAGEREF_ACTIVATE; | ||
591 | /* | ||
592 | * All mapped pages start out with page table | ||
593 | * references from the instantiating fault, so we need | ||
594 | * to look twice if a mapped file page is used more | ||
595 | * than once. | ||
596 | * | ||
597 | * Mark it and spare it for another trip around the | ||
598 | * inactive list. Another page table reference will | ||
599 | * lead to its activation. | ||
600 | * | ||
601 | * Note: the mark is set for activated pages as well | ||
602 | * so that recently deactivated but used pages are | ||
603 | * quickly recovered. | ||
604 | */ | ||
605 | SetPageReferenced(page); | ||
606 | |||
607 | if (referenced_page) | ||
608 | return PAGEREF_ACTIVATE; | ||
609 | |||
610 | return PAGEREF_KEEP; | ||
611 | } | ||
590 | 612 | ||
591 | /* Reclaim if clean, defer dirty pages to writeback */ | 613 | /* Reclaim if clean, defer dirty pages to writeback */ |
592 | return PAGEREF_RECLAIM_CLEAN; | 614 | if (referenced_page) |
615 | return PAGEREF_RECLAIM_CLEAN; | ||
616 | |||
617 | return PAGEREF_RECLAIM; | ||
593 | } | 618 | } |
594 | 619 | ||
595 | /* | 620 | /* |
@@ -657,6 +682,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
657 | switch (references) { | 682 | switch (references) { |
658 | case PAGEREF_ACTIVATE: | 683 | case PAGEREF_ACTIVATE: |
659 | goto activate_locked; | 684 | goto activate_locked; |
685 | case PAGEREF_KEEP: | ||
686 | goto keep_locked; | ||
660 | case PAGEREF_RECLAIM: | 687 | case PAGEREF_RECLAIM: |
661 | case PAGEREF_RECLAIM_CLEAN: | 688 | case PAGEREF_RECLAIM_CLEAN: |
662 | ; /* try to reclaim the page below */ | 689 | ; /* try to reclaim the page below */ |
@@ -1359,9 +1386,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1359 | continue; | 1386 | continue; |
1360 | } | 1387 | } |
1361 | 1388 | ||
1362 | /* page_referenced clears PageReferenced */ | 1389 | if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { |
1363 | if (page_mapped(page) && | ||
1364 | page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { | ||
1365 | nr_rotated++; | 1390 | nr_rotated++; |
1366 | /* | 1391 | /* |
1367 | * Identify referenced, file-backed active pages and | 1392 | * Identify referenced, file-backed active pages and |