diff options
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 47 |
1 files changed, 21 insertions, 26 deletions
@@ -40,7 +40,7 @@ | |||
40 | /* How many pages do we try to swap or page in/out together? */ | 40 | /* How many pages do we try to swap or page in/out together? */ |
41 | int page_cluster; | 41 | int page_cluster; |
42 | 42 | ||
43 | static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); | 43 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); |
44 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); | 44 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
45 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); | 45 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); |
46 | 46 | ||
@@ -452,22 +452,25 @@ void mark_page_accessed(struct page *page) | |||
452 | EXPORT_SYMBOL(mark_page_accessed); | 452 | EXPORT_SYMBOL(mark_page_accessed); |
453 | 453 | ||
454 | /* | 454 | /* |
455 | * Order of operations is important: flush the pagevec when it's already | 455 | * Queue the page for addition to the LRU via pagevec. The decision on whether |
456 | * full, not when adding the last page, to make sure that last page is | 456 | * to add the page to the [in]active [file|anon] list is deferred until the |
457 | * not added to the LRU directly when passed to this function. Because | 457 | * pagevec is drained. This gives a chance for the caller of __lru_cache_add() |
458 | * mark_page_accessed() (called after this when writing) only activates | 458 | * have the page added to the active list using mark_page_accessed(). |
459 | * pages that are on the LRU, linear writes in subpage chunks would see | ||
460 | * every PAGEVEC_SIZE page activated, which is unexpected. | ||
461 | */ | 459 | */ |
462 | void __lru_cache_add(struct page *page, enum lru_list lru) | 460 | void __lru_cache_add(struct page *page, enum lru_list lru) |
463 | { | 461 | { |
464 | struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; | 462 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
463 | |||
464 | if (is_active_lru(lru)) | ||
465 | SetPageActive(page); | ||
466 | else | ||
467 | ClearPageActive(page); | ||
465 | 468 | ||
466 | page_cache_get(page); | 469 | page_cache_get(page); |
467 | if (!pagevec_space(pvec)) | 470 | if (!pagevec_space(pvec)) |
468 | __pagevec_lru_add(pvec, lru); | 471 | __pagevec_lru_add(pvec, lru); |
469 | pagevec_add(pvec, page); | 472 | pagevec_add(pvec, page); |
470 | put_cpu_var(lru_add_pvecs); | 473 | put_cpu_var(lru_add_pvec); |
471 | } | 474 | } |
472 | EXPORT_SYMBOL(__lru_cache_add); | 475 | EXPORT_SYMBOL(__lru_cache_add); |
473 | 476 | ||
@@ -480,13 +483,11 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru) | |||
480 | { | 483 | { |
481 | if (PageActive(page)) { | 484 | if (PageActive(page)) { |
482 | VM_BUG_ON(PageUnevictable(page)); | 485 | VM_BUG_ON(PageUnevictable(page)); |
483 | ClearPageActive(page); | ||
484 | } else if (PageUnevictable(page)) { | 486 | } else if (PageUnevictable(page)) { |
485 | VM_BUG_ON(PageActive(page)); | 487 | VM_BUG_ON(PageActive(page)); |
486 | ClearPageUnevictable(page); | ||
487 | } | 488 | } |
488 | 489 | ||
489 | VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); | 490 | VM_BUG_ON(PageLRU(page)); |
490 | __lru_cache_add(page, lru); | 491 | __lru_cache_add(page, lru); |
491 | } | 492 | } |
492 | 493 | ||
@@ -587,15 +588,10 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, | |||
587 | */ | 588 | */ |
588 | void lru_add_drain_cpu(int cpu) | 589 | void lru_add_drain_cpu(int cpu) |
589 | { | 590 | { |
590 | struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); | 591 | struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); |
591 | struct pagevec *pvec; | ||
592 | int lru; | ||
593 | 592 | ||
594 | for_each_lru(lru) { | 593 | if (pagevec_count(pvec)) |
595 | pvec = &pvecs[lru - LRU_BASE]; | 594 | __pagevec_lru_add(pvec, NR_LRU_LISTS); |
596 | if (pagevec_count(pvec)) | ||
597 | __pagevec_lru_add(pvec, lru); | ||
598 | } | ||
599 | 595 | ||
600 | pvec = &per_cpu(lru_rotate_pvecs, cpu); | 596 | pvec = &per_cpu(lru_rotate_pvecs, cpu); |
601 | if (pagevec_count(pvec)) { | 597 | if (pagevec_count(pvec)) { |
@@ -799,17 +795,16 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, | |||
799 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, | 795 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, |
800 | void *arg) | 796 | void *arg) |
801 | { | 797 | { |
802 | enum lru_list lru = (enum lru_list)arg; | 798 | enum lru_list requested_lru = (enum lru_list)arg; |
803 | int file = is_file_lru(lru); | 799 | int file = page_is_file_cache(page); |
804 | int active = is_active_lru(lru); | 800 | int active = PageActive(page); |
801 | enum lru_list lru = page_lru(page); | ||
805 | 802 | ||
806 | VM_BUG_ON(PageActive(page)); | 803 | WARN_ON_ONCE(requested_lru < NR_LRU_LISTS && requested_lru != lru); |
807 | VM_BUG_ON(PageUnevictable(page)); | 804 | VM_BUG_ON(PageUnevictable(page)); |
808 | VM_BUG_ON(PageLRU(page)); | 805 | VM_BUG_ON(PageLRU(page)); |
809 | 806 | ||
810 | SetPageLRU(page); | 807 | SetPageLRU(page); |
811 | if (active) | ||
812 | SetPageActive(page); | ||
813 | add_page_to_lru_list(page, lruvec, lru); | 808 | add_page_to_lru_list(page, lruvec, lru); |
814 | update_page_reclaim_stat(lruvec, file, active); | 809 | update_page_reclaim_stat(lruvec, file, active); |
815 | trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page)); | 810 | trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page)); |