summaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c106
1 files changed, 68 insertions, 38 deletions
diff --git a/mm/swap.c b/mm/swap.c
index dfd7d71d6841..4a1d0d2c52fa 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -34,10 +34,13 @@
34 34
35#include "internal.h" 35#include "internal.h"
36 36
37#define CREATE_TRACE_POINTS
38#include <trace/events/pagemap.h>
39
37/* How many pages do we try to swap or page in/out together? */ 40/* How many pages do we try to swap or page in/out together? */
38int page_cluster; 41int page_cluster;
39 42
40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); 43static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 44static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
42static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); 45static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
43 46
@@ -384,6 +387,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
384 SetPageActive(page); 387 SetPageActive(page);
385 lru += LRU_ACTIVE; 388 lru += LRU_ACTIVE;
386 add_page_to_lru_list(page, lruvec, lru); 389 add_page_to_lru_list(page, lruvec, lru);
390 trace_mm_lru_activate(page, page_to_pfn(page));
387 391
388 __count_vm_event(PGACTIVATE); 392 __count_vm_event(PGACTIVATE);
389 update_page_reclaim_stat(lruvec, file, 1); 393 update_page_reclaim_stat(lruvec, file, 1);
@@ -428,6 +432,33 @@ void activate_page(struct page *page)
428} 432}
429#endif 433#endif
430 434
435static void __lru_cache_activate_page(struct page *page)
436{
437 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
438 int i;
439
440 /*
441 * Search backwards on the optimistic assumption that the page being
442 * activated has just been added to this pagevec. Note that only
443 * the local pagevec is examined as a !PageLRU page could be in the
444 * process of being released, reclaimed, migrated or on a remote
445 * pagevec that is currently being drained. Furthermore, marking
446 * a remote pagevec's page PageActive potentially hits a race where
447 * a page is marked PageActive just after it is added to the inactive
448 * list causing accounting errors and BUG_ON checks to trigger.
449 */
450 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
451 struct page *pagevec_page = pvec->pages[i];
452
453 if (pagevec_page == page) {
454 SetPageActive(page);
455 break;
456 }
457 }
458
459 put_cpu_var(lru_add_pvec);
460}
461
431/* 462/*
432 * Mark a page as having seen activity. 463 * Mark a page as having seen activity.
433 * 464 *
@@ -438,8 +469,18 @@ void activate_page(struct page *page)
438void mark_page_accessed(struct page *page) 469void mark_page_accessed(struct page *page)
439{ 470{
440 if (!PageActive(page) && !PageUnevictable(page) && 471 if (!PageActive(page) && !PageUnevictable(page) &&
441 PageReferenced(page) && PageLRU(page)) { 472 PageReferenced(page)) {
442 activate_page(page); 473
474 /*
475 * If the page is on the LRU, queue it for activation via
476 * activate_page_pvecs. Otherwise, assume the page is on a
477 * pagevec, mark it active and it'll be moved to the active
478 * LRU on the next drain.
479 */
480 if (PageLRU(page))
481 activate_page(page);
482 else
483 __lru_cache_activate_page(page);
443 ClearPageReferenced(page); 484 ClearPageReferenced(page);
444 } else if (!PageReferenced(page)) { 485 } else if (!PageReferenced(page)) {
445 SetPageReferenced(page); 486 SetPageReferenced(page);
@@ -448,42 +489,37 @@ void mark_page_accessed(struct page *page)
448EXPORT_SYMBOL(mark_page_accessed); 489EXPORT_SYMBOL(mark_page_accessed);
449 490
450/* 491/*
451 * Order of operations is important: flush the pagevec when it's already 492 * Queue the page for addition to the LRU via pagevec. The decision on whether
452 * full, not when adding the last page, to make sure that last page is 493 * to add the page to the [in]active [file|anon] list is deferred until the
453 * not added to the LRU directly when passed to this function. Because 494 * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
454 * mark_page_accessed() (called after this when writing) only activates 495 * have the page added to the active list using mark_page_accessed().
455 * pages that are on the LRU, linear writes in subpage chunks would see
456 * every PAGEVEC_SIZE page activated, which is unexpected.
457 */ 496 */
458void __lru_cache_add(struct page *page, enum lru_list lru) 497void __lru_cache_add(struct page *page)
459{ 498{
460 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; 499 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
461 500
462 page_cache_get(page); 501 page_cache_get(page);
463 if (!pagevec_space(pvec)) 502 if (!pagevec_space(pvec))
464 __pagevec_lru_add(pvec, lru); 503 __pagevec_lru_add(pvec);
465 pagevec_add(pvec, page); 504 pagevec_add(pvec, page);
466 put_cpu_var(lru_add_pvecs); 505 put_cpu_var(lru_add_pvec);
467} 506}
468EXPORT_SYMBOL(__lru_cache_add); 507EXPORT_SYMBOL(__lru_cache_add);
469 508
470/** 509/**
471 * lru_cache_add_lru - add a page to a page list 510 * lru_cache_add - add a page to a page list
472 * @page: the page to be added to the LRU. 511 * @page: the page to be added to the LRU.
473 * @lru: the LRU list to which the page is added.
474 */ 512 */
475void lru_cache_add_lru(struct page *page, enum lru_list lru) 513void lru_cache_add(struct page *page)
476{ 514{
477 if (PageActive(page)) { 515 if (PageActive(page)) {
478 VM_BUG_ON(PageUnevictable(page)); 516 VM_BUG_ON(PageUnevictable(page));
479 ClearPageActive(page);
480 } else if (PageUnevictable(page)) { 517 } else if (PageUnevictable(page)) {
481 VM_BUG_ON(PageActive(page)); 518 VM_BUG_ON(PageActive(page));
482 ClearPageUnevictable(page);
483 } 519 }
484 520
485 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); 521 VM_BUG_ON(PageLRU(page));
486 __lru_cache_add(page, lru); 522 __lru_cache_add(page);
487} 523}
488 524
489/** 525/**
@@ -583,15 +619,10 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
583 */ 619 */
584void lru_add_drain_cpu(int cpu) 620void lru_add_drain_cpu(int cpu)
585{ 621{
586 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); 622 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
587 struct pagevec *pvec;
588 int lru;
589 623
590 for_each_lru(lru) { 624 if (pagevec_count(pvec))
591 pvec = &pvecs[lru - LRU_BASE]; 625 __pagevec_lru_add(pvec);
592 if (pagevec_count(pvec))
593 __pagevec_lru_add(pvec, lru);
594 }
595 626
596 pvec = &per_cpu(lru_rotate_pvecs, cpu); 627 pvec = &per_cpu(lru_rotate_pvecs, cpu);
597 if (pagevec_count(pvec)) { 628 if (pagevec_count(pvec)) {
@@ -708,6 +739,9 @@ void release_pages(struct page **pages, int nr, int cold)
708 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 739 del_page_from_lru_list(page, lruvec, page_off_lru(page));
709 } 740 }
710 741
742 /* Clear Active bit in case of parallel mark_page_accessed */
743 ClearPageActive(page);
744
711 list_add(&page->lru, &pages_to_free); 745 list_add(&page->lru, &pages_to_free);
712 } 746 }
713 if (zone) 747 if (zone)
@@ -795,30 +829,26 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
795static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 829static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
796 void *arg) 830 void *arg)
797{ 831{
798 enum lru_list lru = (enum lru_list)arg; 832 int file = page_is_file_cache(page);
799 int file = is_file_lru(lru); 833 int active = PageActive(page);
800 int active = is_active_lru(lru); 834 enum lru_list lru = page_lru(page);
801 835
802 VM_BUG_ON(PageActive(page));
803 VM_BUG_ON(PageUnevictable(page)); 836 VM_BUG_ON(PageUnevictable(page));
804 VM_BUG_ON(PageLRU(page)); 837 VM_BUG_ON(PageLRU(page));
805 838
806 SetPageLRU(page); 839 SetPageLRU(page);
807 if (active)
808 SetPageActive(page);
809 add_page_to_lru_list(page, lruvec, lru); 840 add_page_to_lru_list(page, lruvec, lru);
810 update_page_reclaim_stat(lruvec, file, active); 841 update_page_reclaim_stat(lruvec, file, active);
842 trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page));
811} 843}
812 844
813/* 845/*
814 * Add the passed pages to the LRU, then drop the caller's refcount 846 * Add the passed pages to the LRU, then drop the caller's refcount
815 * on them. Reinitialises the caller's pagevec. 847 * on them. Reinitialises the caller's pagevec.
816 */ 848 */
817void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) 849void __pagevec_lru_add(struct pagevec *pvec)
818{ 850{
819 VM_BUG_ON(is_unevictable_lru(lru)); 851 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
820
821 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
822} 852}
823EXPORT_SYMBOL(__pagevec_lru_add); 853EXPORT_SYMBOL(__pagevec_lru_add);
824 854