summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2017-11-15 20:37:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 21:21:06 -0500
commit2d4894b5d2ae0fe1725ea7abd57b33bfbbe45492 (patch)
treeab55401d2d6181491fa9e767561275c2992680e6 /mm/page_alloc.c
parentc6f92f9fbe7dbcc8903a67229aa88b4077ae4422 (diff)
mm: remove cold parameter from free_hot_cold_page*
Most callers users of free_hot_cold_page claim the pages being released are cache hot. The exception is the page reclaim paths where it is likely that enough pages will be freed in the near future that the per-cpu lists are going to be recycled and the cache hotness information is lost. As no one really cares about the hotness of pages being released to the allocator, just ditch the parameter. The APIs are renamed to indicate that it's no longer about hot/cold pages. It should also be less confusing as there are subtle differences between them. __free_pages drops a reference and frees a page when the refcount reaches zero. free_hot_cold_page handled pages whose refcount was already zero which is non-obvious from the name. free_unref_page should be more obvious. No performance impact is expected as the overhead is marginal. The parameter is removed simply because it is a bit stupid to have a useless parameter copied everywhere. [mgorman@techsingularity.net: add pages to head, not tail] Link: http://lkml.kernel.org/r/20171019154321.qtpzaeftoyyw4iey@techsingularity.net Link: http://lkml.kernel.org/r/20171018075952.10627-8-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andi Kleen <ak@linux.intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6a3c4a1d513f..f265d37b3152 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2611,7 +2611,7 @@ void mark_free_pages(struct zone *zone)
2611} 2611}
2612#endif /* CONFIG_PM */ 2612#endif /* CONFIG_PM */
2613 2613
2614static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn) 2614static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
2615{ 2615{
2616 int migratetype; 2616 int migratetype;
2617 2617
@@ -2623,8 +2623,7 @@ static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn)
2623 return true; 2623 return true;
2624} 2624}
2625 2625
2626static void free_hot_cold_page_commit(struct page *page, unsigned long pfn, 2626static void free_unref_page_commit(struct page *page, unsigned long pfn)
2627 bool cold)
2628{ 2627{
2629 struct zone *zone = page_zone(page); 2628 struct zone *zone = page_zone(page);
2630 struct per_cpu_pages *pcp; 2629 struct per_cpu_pages *pcp;
@@ -2649,10 +2648,7 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn,
2649 } 2648 }
2650 2649
2651 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2650 pcp = &this_cpu_ptr(zone->pageset)->pcp;
2652 if (!cold) 2651 list_add(&page->lru, &pcp->lists[migratetype]);
2653 list_add(&page->lru, &pcp->lists[migratetype]);
2654 else
2655 list_add_tail(&page->lru, &pcp->lists[migratetype]);
2656 pcp->count++; 2652 pcp->count++;
2657 if (pcp->count >= pcp->high) { 2653 if (pcp->count >= pcp->high) {
2658 unsigned long batch = READ_ONCE(pcp->batch); 2654 unsigned long batch = READ_ONCE(pcp->batch);
@@ -2663,25 +2659,24 @@ static void free_hot_cold_page_commit(struct page *page, unsigned long pfn,
2663 2659
2664/* 2660/*
2665 * Free a 0-order page 2661 * Free a 0-order page
2666 * cold == true ? free a cold page : free a hot page
2667 */ 2662 */
2668void free_hot_cold_page(struct page *page, bool cold) 2663void free_unref_page(struct page *page)
2669{ 2664{
2670 unsigned long flags; 2665 unsigned long flags;
2671 unsigned long pfn = page_to_pfn(page); 2666 unsigned long pfn = page_to_pfn(page);
2672 2667
2673 if (!free_hot_cold_page_prepare(page, pfn)) 2668 if (!free_unref_page_prepare(page, pfn))
2674 return; 2669 return;
2675 2670
2676 local_irq_save(flags); 2671 local_irq_save(flags);
2677 free_hot_cold_page_commit(page, pfn, cold); 2672 free_unref_page_commit(page, pfn);
2678 local_irq_restore(flags); 2673 local_irq_restore(flags);
2679} 2674}
2680 2675
2681/* 2676/*
2682 * Free a list of 0-order pages 2677 * Free a list of 0-order pages
2683 */ 2678 */
2684void free_hot_cold_page_list(struct list_head *list, bool cold) 2679void free_unref_page_list(struct list_head *list)
2685{ 2680{
2686 struct page *page, *next; 2681 struct page *page, *next;
2687 unsigned long flags, pfn; 2682 unsigned long flags, pfn;
@@ -2689,7 +2684,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
2689 /* Prepare pages for freeing */ 2684 /* Prepare pages for freeing */
2690 list_for_each_entry_safe(page, next, list, lru) { 2685 list_for_each_entry_safe(page, next, list, lru) {
2691 pfn = page_to_pfn(page); 2686 pfn = page_to_pfn(page);
2692 if (!free_hot_cold_page_prepare(page, pfn)) 2687 if (!free_unref_page_prepare(page, pfn))
2693 list_del(&page->lru); 2688 list_del(&page->lru);
2694 set_page_private(page, pfn); 2689 set_page_private(page, pfn);
2695 } 2690 }
@@ -2699,8 +2694,8 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
2699 unsigned long pfn = page_private(page); 2694 unsigned long pfn = page_private(page);
2700 2695
2701 set_page_private(page, 0); 2696 set_page_private(page, 0);
2702 trace_mm_page_free_batched(page, cold); 2697 trace_mm_page_free_batched(page);
2703 free_hot_cold_page_commit(page, pfn, cold); 2698 free_unref_page_commit(page, pfn);
2704 } 2699 }
2705 local_irq_restore(flags); 2700 local_irq_restore(flags);
2706} 2701}
@@ -4301,7 +4296,7 @@ void __free_pages(struct page *page, unsigned int order)
4301{ 4296{
4302 if (put_page_testzero(page)) { 4297 if (put_page_testzero(page)) {
4303 if (order == 0) 4298 if (order == 0)
4304 free_hot_cold_page(page, false); 4299 free_unref_page(page);
4305 else 4300 else
4306 __free_pages_ok(page, order); 4301 __free_pages_ok(page, order);
4307 } 4302 }
@@ -4359,7 +4354,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
4359 unsigned int order = compound_order(page); 4354 unsigned int order = compound_order(page);
4360 4355
4361 if (order == 0) 4356 if (order == 0)
4362 free_hot_cold_page(page, false); 4357 free_unref_page(page);
4363 else 4358 else
4364 __free_pages_ok(page, order); 4359 __free_pages_ok(page, order);
4365 } 4360 }