diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2014-04-03 17:47:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-03 19:21:01 -0400 |
commit | a528910e12ec7ee203095eb1711468a66b9b60b0 (patch) | |
tree | c9ceed84994f015e991161c91b87d47d93cd6491 /mm/filemap.c | |
parent | 91b0abe36a7b2b3b02d7500925a5f8455334f0e5 (diff) |
mm: thrash detection-based file cache sizing
The VM maintains cached filesystem pages on two types of lists. One
list holds the pages recently faulted into the cache, the other list
holds pages that have been referenced repeatedly on that first list.
The idea is to prefer reclaiming young pages over those that have shown
to benefit from caching in the past. We call the recently usedbut
ultimately was not significantly better than a FIFO policy and still
thrashed cache based on eviction speed, rather than actual demand for
cache.
This patch solves one half of the problem by decoupling the ability to
detect working set changes from the inactive list size. By maintaining
a history of recently evicted file pages it can detect frequently used
pages with an arbitrarily small inactive list size, and subsequently
apply pressure on the active list based on actual demand for cache, not
just overall eviction speed.
Every zone maintains a counter that tracks inactive list aging speed.
When a page is evicted, a snapshot of this counter is stored in the
now-empty page cache radix tree slot. On refault, the minimum access
distance of the page can be assessed, to evaluate whether the page
should be part of the active list or not.
This fixes the VM's blindness towards working set changes in excess of
the inactive list. And it's the foundation to further improve the
protection ability and reduce the minimum inactive list size of 50%.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Bob Liu <bob.liu@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Luigi Semenzato <semenzato@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Metin Doslu <metin@citusdata.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Ozgun Erdogan <ozgun@citusdata.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin <klamm@yandex-team.ru>
Cc: Ryan Mallon <rmallon@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 61 |
1 files changed, 44 insertions, 17 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 05c44aa44188..a603c4d7d3c9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -469,7 +469,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
469 | EXPORT_SYMBOL_GPL(replace_page_cache_page); | 469 | EXPORT_SYMBOL_GPL(replace_page_cache_page); |
470 | 470 | ||
471 | static int page_cache_tree_insert(struct address_space *mapping, | 471 | static int page_cache_tree_insert(struct address_space *mapping, |
472 | struct page *page) | 472 | struct page *page, void **shadowp) |
473 | { | 473 | { |
474 | void **slot; | 474 | void **slot; |
475 | int error; | 475 | int error; |
@@ -484,6 +484,8 @@ static int page_cache_tree_insert(struct address_space *mapping, | |||
484 | radix_tree_replace_slot(slot, page); | 484 | radix_tree_replace_slot(slot, page); |
485 | mapping->nrshadows--; | 485 | mapping->nrshadows--; |
486 | mapping->nrpages++; | 486 | mapping->nrpages++; |
487 | if (shadowp) | ||
488 | *shadowp = p; | ||
487 | return 0; | 489 | return 0; |
488 | } | 490 | } |
489 | error = radix_tree_insert(&mapping->page_tree, page->index, page); | 491 | error = radix_tree_insert(&mapping->page_tree, page->index, page); |
@@ -492,18 +494,10 @@ static int page_cache_tree_insert(struct address_space *mapping, | |||
492 | return error; | 494 | return error; |
493 | } | 495 | } |
494 | 496 | ||
495 | /** | 497 | static int __add_to_page_cache_locked(struct page *page, |
496 | * add_to_page_cache_locked - add a locked page to the pagecache | 498 | struct address_space *mapping, |
497 | * @page: page to add | 499 | pgoff_t offset, gfp_t gfp_mask, |
498 | * @mapping: the page's address_space | 500 | void **shadowp) |
499 | * @offset: page index | ||
500 | * @gfp_mask: page allocation mode | ||
501 | * | ||
502 | * This function is used to add a page to the pagecache. It must be locked. | ||
503 | * This function does not add the page to the LRU. The caller must do that. | ||
504 | */ | ||
505 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
506 | pgoff_t offset, gfp_t gfp_mask) | ||
507 | { | 501 | { |
508 | int error; | 502 | int error; |
509 | 503 | ||
@@ -526,7 +520,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
526 | page->index = offset; | 520 | page->index = offset; |
527 | 521 | ||
528 | spin_lock_irq(&mapping->tree_lock); | 522 | spin_lock_irq(&mapping->tree_lock); |
529 | error = page_cache_tree_insert(mapping, page); | 523 | error = page_cache_tree_insert(mapping, page, shadowp); |
530 | radix_tree_preload_end(); | 524 | radix_tree_preload_end(); |
531 | if (unlikely(error)) | 525 | if (unlikely(error)) |
532 | goto err_insert; | 526 | goto err_insert; |
@@ -542,16 +536,49 @@ err_insert: | |||
542 | page_cache_release(page); | 536 | page_cache_release(page); |
543 | return error; | 537 | return error; |
544 | } | 538 | } |
539 | |||
540 | /** | ||
541 | * add_to_page_cache_locked - add a locked page to the pagecache | ||
542 | * @page: page to add | ||
543 | * @mapping: the page's address_space | ||
544 | * @offset: page index | ||
545 | * @gfp_mask: page allocation mode | ||
546 | * | ||
547 | * This function is used to add a page to the pagecache. It must be locked. | ||
548 | * This function does not add the page to the LRU. The caller must do that. | ||
549 | */ | ||
550 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | ||
551 | pgoff_t offset, gfp_t gfp_mask) | ||
552 | { | ||
553 | return __add_to_page_cache_locked(page, mapping, offset, | ||
554 | gfp_mask, NULL); | ||
555 | } | ||
545 | EXPORT_SYMBOL(add_to_page_cache_locked); | 556 | EXPORT_SYMBOL(add_to_page_cache_locked); |
546 | 557 | ||
547 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 558 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
548 | pgoff_t offset, gfp_t gfp_mask) | 559 | pgoff_t offset, gfp_t gfp_mask) |
549 | { | 560 | { |
561 | void *shadow = NULL; | ||
550 | int ret; | 562 | int ret; |
551 | 563 | ||
552 | ret = add_to_page_cache(page, mapping, offset, gfp_mask); | 564 | __set_page_locked(page); |
553 | if (ret == 0) | 565 | ret = __add_to_page_cache_locked(page, mapping, offset, |
554 | lru_cache_add_file(page); | 566 | gfp_mask, &shadow); |
567 | if (unlikely(ret)) | ||
568 | __clear_page_locked(page); | ||
569 | else { | ||
570 | /* | ||
571 | * The page might have been evicted from cache only | ||
572 | * recently, in which case it should be activated like | ||
573 | * any other repeatedly accessed page. | ||
574 | */ | ||
575 | if (shadow && workingset_refault(shadow)) { | ||
576 | SetPageActive(page); | ||
577 | workingset_activation(page); | ||
578 | } else | ||
579 | ClearPageActive(page); | ||
580 | lru_cache_add(page); | ||
581 | } | ||
555 | return ret; | 582 | return ret; |
556 | } | 583 | } |
557 | EXPORT_SYMBOL_GPL(add_to_page_cache_lru); | 584 | EXPORT_SYMBOL_GPL(add_to_page_cache_lru); |