aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c59
1 files changed, 38 insertions, 21 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 5dabf444d724..98d08b4579fa 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -46,7 +46,7 @@ int page_cluster;
46static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); 46static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
47static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 47static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
48static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); 48static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
49static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); 49static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
50#ifdef CONFIG_SMP 50#ifdef CONFIG_SMP
51static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 51static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
52#endif 52#endif
@@ -97,6 +97,16 @@ static void __put_compound_page(struct page *page)
97 97
98void __put_page(struct page *page) 98void __put_page(struct page *page)
99{ 99{
100 if (is_zone_device_page(page)) {
101 put_dev_pagemap(page->pgmap);
102
103 /*
104 * The page belongs to the device that created pgmap. Do
105 * not return it to page allocator.
106 */
107 return;
108 }
109
100 if (unlikely(PageCompound(page))) 110 if (unlikely(PageCompound(page)))
101 __put_compound_page(page); 111 __put_compound_page(page);
102 else 112 else
@@ -561,20 +571,27 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
561} 571}
562 572
563 573
564static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, 574static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
565 void *arg) 575 void *arg)
566{ 576{
567 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 577 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
568 int file = page_is_file_cache(page); 578 !PageUnevictable(page)) {
569 int lru = page_lru_base_type(page); 579 bool active = PageActive(page);
570 580
571 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); 581 del_page_from_lru_list(page, lruvec,
582 LRU_INACTIVE_ANON + active);
572 ClearPageActive(page); 583 ClearPageActive(page);
573 ClearPageReferenced(page); 584 ClearPageReferenced(page);
574 add_page_to_lru_list(page, lruvec, lru); 585 /*
586 * lazyfree pages are clean anonymous pages. They have
587 * SwapBacked flag cleared to distinguish normal anonymous
588 * pages
589 */
590 ClearPageSwapBacked(page);
591 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
575 592
576 __count_vm_event(PGDEACTIVATE); 593 __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
577 update_page_reclaim_stat(lruvec, file, 0); 594 update_page_reclaim_stat(lruvec, 1, 0);
578 } 595 }
579} 596}
580 597
@@ -604,9 +621,9 @@ void lru_add_drain_cpu(int cpu)
604 if (pagevec_count(pvec)) 621 if (pagevec_count(pvec))
605 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 622 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
606 623
607 pvec = &per_cpu(lru_deactivate_pvecs, cpu); 624 pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
608 if (pagevec_count(pvec)) 625 if (pagevec_count(pvec))
609 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 626 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
610 627
611 activate_page_drain(cpu); 628 activate_page_drain(cpu);
612} 629}
@@ -638,22 +655,22 @@ void deactivate_file_page(struct page *page)
638} 655}
639 656
640/** 657/**
641 * deactivate_page - deactivate a page 658 * mark_page_lazyfree - make an anon page lazyfree
642 * @page: page to deactivate 659 * @page: page to deactivate
643 * 660 *
644 * deactivate_page() moves @page to the inactive list if @page was on the active 661 * mark_page_lazyfree() moves @page to the inactive file list.
645 * list and was not an unevictable page. This is done to accelerate the reclaim 662 * This is done to accelerate the reclaim of @page.
646 * of @page.
647 */ 663 */
648void deactivate_page(struct page *page) 664void mark_page_lazyfree(struct page *page)
649{ 665{
650 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 666 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
651 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 667 !PageUnevictable(page)) {
668 struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
652 669
653 get_page(page); 670 get_page(page);
654 if (!pagevec_add(pvec, page) || PageCompound(page)) 671 if (!pagevec_add(pvec, page) || PageCompound(page))
655 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 672 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
656 put_cpu_var(lru_deactivate_pvecs); 673 put_cpu_var(lru_lazyfree_pvecs);
657 } 674 }
658} 675}
659 676
@@ -693,7 +710,7 @@ void lru_add_drain_all(void)
693 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || 710 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
694 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || 711 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
695 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || 712 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
696 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 713 pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
697 need_activate_page_drain(cpu)) { 714 need_activate_page_drain(cpu)) {
698 INIT_WORK(work, lru_add_drain_per_cpu); 715 INIT_WORK(work, lru_add_drain_per_cpu);
699 queue_work_on(cpu, mm_percpu_wq, work); 716 queue_work_on(cpu, mm_percpu_wq, work);