diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2010-03-05 16:42:22 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-06 14:26:27 -0500 |
commit | 645747462435d84c6c6a64269ed49cc3015f753d (patch) | |
tree | 4cbbddcddd429704dd4f205f6371bb329dcb0ff1 | |
parent | 31c0569c3b0b6cc8a867ac6665ca081553f7984c (diff) |
vmscan: detect mapped file pages used only once
The VM currently assumes that an inactive, mapped and referenced file page
is in use and promotes it to the active list.
However, every mapped file page starts out like this and thus a problem
arises when workloads create a stream of such pages that are used only for
a short time. By flooding the active list with those pages, the VM
quickly gets into trouble finding eligible reclaim canditates. The result
is long allocation latencies and eviction of the wrong pages.
This patch reuses the PG_referenced page flag (used for unmapped file
pages) to implement a usage detection that scales with the speed of LRU
list cycling (i.e. memory pressure).
If the scanner encounters those pages, the flag is set and the page cycled
again on the inactive list. Only if it returns with another page table
reference it is activated. Otherwise it is reclaimed as 'not recently
used cache'.
This effectively changes the minimum lifetime of a used-once mapped file
page from a full memory cycle to an inactive list cycle, which allows it
to occur in linear streams without affecting the stable working set of the
system.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/rmap.h | 2 | ||||
-rw-r--r-- | mm/rmap.c | 3 | ||||
-rw-r--r-- | mm/vmscan.c | 45 |
3 files changed, 36 insertions, 14 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 72be23b1480a..d25bd224d370 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -209,7 +209,7 @@ static inline int page_referenced(struct page *page, int is_locked, | |||
209 | unsigned long *vm_flags) | 209 | unsigned long *vm_flags) |
210 | { | 210 | { |
211 | *vm_flags = 0; | 211 | *vm_flags = 0; |
212 | return TestClearPageReferenced(page); | 212 | return 0; |
213 | } | 213 | } |
214 | 214 | ||
215 | #define try_to_unmap(page, refs) SWAP_FAIL | 215 | #define try_to_unmap(page, refs) SWAP_FAIL |
@@ -601,9 +601,6 @@ int page_referenced(struct page *page, | |||
601 | int referenced = 0; | 601 | int referenced = 0; |
602 | int we_locked = 0; | 602 | int we_locked = 0; |
603 | 603 | ||
604 | if (TestClearPageReferenced(page)) | ||
605 | referenced++; | ||
606 | |||
607 | *vm_flags = 0; | 604 | *vm_flags = 0; |
608 | if (page_mapped(page) && page_rmapping(page)) { | 605 | if (page_mapped(page) && page_rmapping(page)) { |
609 | if (!is_locked && (!PageAnon(page) || PageKsm(page))) { | 606 | if (!is_locked && (!PageAnon(page) || PageKsm(page))) { |
diff --git a/mm/vmscan.c b/mm/vmscan.c index d9a0e0d3aac7..79c809895fba 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -561,18 +561,18 @@ redo: | |||
561 | enum page_references { | 561 | enum page_references { |
562 | PAGEREF_RECLAIM, | 562 | PAGEREF_RECLAIM, |
563 | PAGEREF_RECLAIM_CLEAN, | 563 | PAGEREF_RECLAIM_CLEAN, |
564 | PAGEREF_KEEP, | ||
564 | PAGEREF_ACTIVATE, | 565 | PAGEREF_ACTIVATE, |
565 | }; | 566 | }; |
566 | 567 | ||
567 | static enum page_references page_check_references(struct page *page, | 568 | static enum page_references page_check_references(struct page *page, |
568 | struct scan_control *sc) | 569 | struct scan_control *sc) |
569 | { | 570 | { |
571 | int referenced_ptes, referenced_page; | ||
570 | unsigned long vm_flags; | 572 | unsigned long vm_flags; |
571 | int referenced; | ||
572 | 573 | ||
573 | referenced = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); | 574 | referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); |
574 | if (!referenced) | 575 | referenced_page = TestClearPageReferenced(page); |
575 | return PAGEREF_RECLAIM; | ||
576 | 576 | ||
577 | /* Lumpy reclaim - ignore references */ | 577 | /* Lumpy reclaim - ignore references */ |
578 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) | 578 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) |
@@ -585,11 +585,36 @@ static enum page_references page_check_references(struct page *page, | |||
585 | if (vm_flags & VM_LOCKED) | 585 | if (vm_flags & VM_LOCKED) |
586 | return PAGEREF_RECLAIM; | 586 | return PAGEREF_RECLAIM; |
587 | 587 | ||
588 | if (page_mapped(page)) | 588 | if (referenced_ptes) { |
589 | return PAGEREF_ACTIVATE; | 589 | if (PageAnon(page)) |
590 | return PAGEREF_ACTIVATE; | ||
591 | /* | ||
592 | * All mapped pages start out with page table | ||
593 | * references from the instantiating fault, so we need | ||
594 | * to look twice if a mapped file page is used more | ||
595 | * than once. | ||
596 | * | ||
597 | * Mark it and spare it for another trip around the | ||
598 | * inactive list. Another page table reference will | ||
599 | * lead to its activation. | ||
600 | * | ||
601 | * Note: the mark is set for activated pages as well | ||
602 | * so that recently deactivated but used pages are | ||
603 | * quickly recovered. | ||
604 | */ | ||
605 | SetPageReferenced(page); | ||
606 | |||
607 | if (referenced_page) | ||
608 | return PAGEREF_ACTIVATE; | ||
609 | |||
610 | return PAGEREF_KEEP; | ||
611 | } | ||
590 | 612 | ||
591 | /* Reclaim if clean, defer dirty pages to writeback */ | 613 | /* Reclaim if clean, defer dirty pages to writeback */ |
592 | return PAGEREF_RECLAIM_CLEAN; | 614 | if (referenced_page) |
615 | return PAGEREF_RECLAIM_CLEAN; | ||
616 | |||
617 | return PAGEREF_RECLAIM; | ||
593 | } | 618 | } |
594 | 619 | ||
595 | /* | 620 | /* |
@@ -657,6 +682,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
657 | switch (references) { | 682 | switch (references) { |
658 | case PAGEREF_ACTIVATE: | 683 | case PAGEREF_ACTIVATE: |
659 | goto activate_locked; | 684 | goto activate_locked; |
685 | case PAGEREF_KEEP: | ||
686 | goto keep_locked; | ||
660 | case PAGEREF_RECLAIM: | 687 | case PAGEREF_RECLAIM: |
661 | case PAGEREF_RECLAIM_CLEAN: | 688 | case PAGEREF_RECLAIM_CLEAN: |
662 | ; /* try to reclaim the page below */ | 689 | ; /* try to reclaim the page below */ |
@@ -1359,9 +1386,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1359 | continue; | 1386 | continue; |
1360 | } | 1387 | } |
1361 | 1388 | ||
1362 | /* page_referenced clears PageReferenced */ | 1389 | if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { |
1363 | if (page_mapped(page) && | ||
1364 | page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { | ||
1365 | nr_rotated++; | 1390 | nr_rotated++; |
1366 | /* | 1391 | /* |
1367 | * Identify referenced, file-backed active pages and | 1392 | * Identify referenced, file-backed active pages and |