diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 29 | ||||
-rw-r--r-- | mm/rmap.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 6 |
4 files changed, 18 insertions, 23 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6a3c4a1d513f..f265d37b3152 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2611,7 +2611,7 @@ void mark_free_pages(struct zone *zone) | |||
2611 | } | 2611 | } |
2612 | #endif /* CONFIG_PM */ | 2612 | #endif /* CONFIG_PM */ |
2613 | 2613 | ||
2614 | static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn) | 2614 | static bool free_unref_page_prepare(struct page *page, unsigned long pfn) |
2615 | { | 2615 | { |
2616 | int migratetype; | 2616 | int migratetype; |
2617 | 2617 | ||
@@ -2623,8 +2623,7 @@ static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn) | |||
2623 | return true; | 2623 | return true; |
2624 | } | 2624 | } |
2625 | 2625 | ||
2626 | static void free_hot_cold_page_commit(struct page *page, unsigned long pfn, | 2626 | static void free_unref_page_commit(struct page *page, unsigned long pfn) |
2627 | bool cold) | ||
2628 | { | 2627 | { |
2629 | struct zone *zone = page_zone(page); | 2628 | struct zone *zone = page_zone(page); |
2630 | struct per_cpu_pages *pcp; | 2629 | struct per_cpu_pages *pcp; |
@@ -2649,10 +2648,7 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn, | |||
2649 | } | 2648 | } |
2650 | 2649 | ||
2651 | pcp = &this_cpu_ptr(zone->pageset)->pcp; | 2650 | pcp = &this_cpu_ptr(zone->pageset)->pcp; |
2652 | if (!cold) | 2651 | list_add(&page->lru, &pcp->lists[migratetype]); |
2653 | list_add(&page->lru, &pcp->lists[migratetype]); | ||
2654 | else | ||
2655 | list_add_tail(&page->lru, &pcp->lists[migratetype]); | ||
2656 | pcp->count++; | 2652 | pcp->count++; |
2657 | if (pcp->count >= pcp->high) { | 2653 | if (pcp->count >= pcp->high) { |
2658 | unsigned long batch = READ_ONCE(pcp->batch); | 2654 | unsigned long batch = READ_ONCE(pcp->batch); |
@@ -2663,25 +2659,24 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn, | |||
2663 | 2659 | ||
2664 | /* | 2660 | /* |
2665 | * Free a 0-order page | 2661 | * Free a 0-order page |
2666 | * cold == true ? free a cold page : free a hot page | ||
2667 | */ | 2662 | */ |
2668 | void free_hot_cold_page(struct page *page, bool cold) | 2663 | void free_unref_page(struct page *page) |
2669 | { | 2664 | { |
2670 | unsigned long flags; | 2665 | unsigned long flags; |
2671 | unsigned long pfn = page_to_pfn(page); | 2666 | unsigned long pfn = page_to_pfn(page); |
2672 | 2667 | ||
2673 | if (!free_hot_cold_page_prepare(page, pfn)) | 2668 | if (!free_unref_page_prepare(page, pfn)) |
2674 | return; | 2669 | return; |
2675 | 2670 | ||
2676 | local_irq_save(flags); | 2671 | local_irq_save(flags); |
2677 | free_hot_cold_page_commit(page, pfn, cold); | 2672 | free_unref_page_commit(page, pfn); |
2678 | local_irq_restore(flags); | 2673 | local_irq_restore(flags); |
2679 | } | 2674 | } |
2680 | 2675 | ||
2681 | /* | 2676 | /* |
2682 | * Free a list of 0-order pages | 2677 | * Free a list of 0-order pages |
2683 | */ | 2678 | */ |
2684 | void free_hot_cold_page_list(struct list_head *list, bool cold) | 2679 | void free_unref_page_list(struct list_head *list) |
2685 | { | 2680 | { |
2686 | struct page *page, *next; | 2681 | struct page *page, *next; |
2687 | unsigned long flags, pfn; | 2682 | unsigned long flags, pfn; |
@@ -2689,7 +2684,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold) | |||
2689 | /* Prepare pages for freeing */ | 2684 | /* Prepare pages for freeing */ |
2690 | list_for_each_entry_safe(page, next, list, lru) { | 2685 | list_for_each_entry_safe(page, next, list, lru) { |
2691 | pfn = page_to_pfn(page); | 2686 | pfn = page_to_pfn(page); |
2692 | if (!free_hot_cold_page_prepare(page, pfn)) | 2687 | if (!free_unref_page_prepare(page, pfn)) |
2693 | list_del(&page->lru); | 2688 | list_del(&page->lru); |
2694 | set_page_private(page, pfn); | 2689 | set_page_private(page, pfn); |
2695 | } | 2690 | } |
@@ -2699,8 +2694,8 @@ void free_hot_cold_page_list(struct list_head *list, bool cold) | |||
2699 | unsigned long pfn = page_private(page); | 2694 | unsigned long pfn = page_private(page); |
2700 | 2695 | ||
2701 | set_page_private(page, 0); | 2696 | set_page_private(page, 0); |
2702 | trace_mm_page_free_batched(page, cold); | 2697 | trace_mm_page_free_batched(page); |
2703 | free_hot_cold_page_commit(page, pfn, cold); | 2698 | free_unref_page_commit(page, pfn); |
2704 | } | 2699 | } |
2705 | local_irq_restore(flags); | 2700 | local_irq_restore(flags); |
2706 | } | 2701 | } |
@@ -4301,7 +4296,7 @@ void __free_pages(struct page *page, unsigned int order) | |||
4301 | { | 4296 | { |
4302 | if (put_page_testzero(page)) { | 4297 | if (put_page_testzero(page)) { |
4303 | if (order == 0) | 4298 | if (order == 0) |
4304 | free_hot_cold_page(page, false); | 4299 | free_unref_page(page); |
4305 | else | 4300 | else |
4306 | __free_pages_ok(page, order); | 4301 | __free_pages_ok(page, order); |
4307 | } | 4302 | } |
@@ -4359,7 +4354,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count) | |||
4359 | unsigned int order = compound_order(page); | 4354 | unsigned int order = compound_order(page); |
4360 | 4355 | ||
4361 | if (order == 0) | 4356 | if (order == 0) |
4362 | free_hot_cold_page(page, false); | 4357 | free_unref_page(page); |
4363 | else | 4358 | else |
4364 | __free_pages_ok(page, order); | 4359 | __free_pages_ok(page, order); |
4365 | } | 4360 | } |
@@ -1321,7 +1321,7 @@ void page_remove_rmap(struct page *page, bool compound) | |||
1321 | * It would be tidy to reset the PageAnon mapping here, | 1321 | * It would be tidy to reset the PageAnon mapping here, |
1322 | * but that might overwrite a racing page_add_anon_rmap | 1322 | * but that might overwrite a racing page_add_anon_rmap |
1323 | * which increments mapcount after us but sets mapping | 1323 | * which increments mapcount after us but sets mapping |
1324 | * before us: so leave the reset to free_hot_cold_page, | 1324 | * before us: so leave the reset to free_unref_page, |
1325 | * and remember that it's only reliable while mapped. | 1325 | * and remember that it's only reliable while mapped. |
1326 | * Leaving it set also helps swapoff to reinstate ptes | 1326 | * Leaving it set also helps swapoff to reinstate ptes |
1327 | * faster for those pages still in swapcache. | 1327 | * faster for those pages still in swapcache. |
@@ -76,7 +76,7 @@ static void __page_cache_release(struct page *page) | |||
76 | static void __put_single_page(struct page *page) | 76 | static void __put_single_page(struct page *page) |
77 | { | 77 | { |
78 | __page_cache_release(page); | 78 | __page_cache_release(page); |
79 | free_hot_cold_page(page, false); | 79 | free_unref_page(page); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void __put_compound_page(struct page *page) | 82 | static void __put_compound_page(struct page *page) |
@@ -817,7 +817,7 @@ void release_pages(struct page **pages, int nr) | |||
817 | spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); | 817 | spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); |
818 | 818 | ||
819 | mem_cgroup_uncharge_list(&pages_to_free); | 819 | mem_cgroup_uncharge_list(&pages_to_free); |
820 | free_hot_cold_page_list(&pages_to_free, 0); | 820 | free_unref_page_list(&pages_to_free); |
821 | } | 821 | } |
822 | EXPORT_SYMBOL(release_pages); | 822 | EXPORT_SYMBOL(release_pages); |
823 | 823 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 2852b8c5a917..c02c850ea349 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1349,7 +1349,7 @@ keep: | |||
1349 | 1349 | ||
1350 | mem_cgroup_uncharge_list(&free_pages); | 1350 | mem_cgroup_uncharge_list(&free_pages); |
1351 | try_to_unmap_flush(); | 1351 | try_to_unmap_flush(); |
1352 | free_hot_cold_page_list(&free_pages, true); | 1352 | free_unref_page_list(&free_pages); |
1353 | 1353 | ||
1354 | list_splice(&ret_pages, page_list); | 1354 | list_splice(&ret_pages, page_list); |
1355 | count_vm_events(PGACTIVATE, pgactivate); | 1355 | count_vm_events(PGACTIVATE, pgactivate); |
@@ -1824,7 +1824,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1824 | spin_unlock_irq(&pgdat->lru_lock); | 1824 | spin_unlock_irq(&pgdat->lru_lock); |
1825 | 1825 | ||
1826 | mem_cgroup_uncharge_list(&page_list); | 1826 | mem_cgroup_uncharge_list(&page_list); |
1827 | free_hot_cold_page_list(&page_list, true); | 1827 | free_unref_page_list(&page_list); |
1828 | 1828 | ||
1829 | /* | 1829 | /* |
1830 | * If reclaim is isolating dirty pages under writeback, it implies | 1830 | * If reclaim is isolating dirty pages under writeback, it implies |
@@ -2063,7 +2063,7 @@ static void shrink_active_list(unsigned long nr_to_scan, | |||
2063 | spin_unlock_irq(&pgdat->lru_lock); | 2063 | spin_unlock_irq(&pgdat->lru_lock); |
2064 | 2064 | ||
2065 | mem_cgroup_uncharge_list(&l_hold); | 2065 | mem_cgroup_uncharge_list(&l_hold); |
2066 | free_hot_cold_page_list(&l_hold, true); | 2066 | free_unref_page_list(&l_hold); |
2067 | trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, | 2067 | trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, |
2068 | nr_deactivate, nr_rotated, sc->priority, file); | 2068 | nr_deactivate, nr_rotated, sc->priority, file); |
2069 | } | 2069 | } |