aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorArun KS <arunks@codeaurora.org>2019-03-05 18:42:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:14 -0500
commita9cd410a3d296846a8125aa43d97a573a354c472 (patch)
treee595bcf29faa43aefe2fd0962e11a3a8d8f0c9e2 /mm/page_alloc.c
parent278d7756dff0b4c8089c46abad20a79bcfa66b5b (diff)
mm/page_alloc.c: memory hotplug: free pages as higher order
When freeing pages are done with higher order, time spent on coalescing pages by buddy allocator can be reduced. With section size of 256MB, hot add latency of a single section shows improvement from 50-60 ms to less than 1 ms, hence improving the hot add latency by 60 times. Modify external providers of online callback to align with the change. [arunks@codeaurora.org: v11] Link: http://lkml.kernel.org/r/1547792588-18032-1-git-send-email-arunks@codeaurora.org [akpm@linux-foundation.org: remove unused local, per Arun] [akpm@linux-foundation.org: avoid return of void-returning __free_pages_core(), per Oscar] [akpm@linux-foundation.org: fix it for mm-convert-totalram_pages-and-totalhigh_pages-variables-to-atomic.patch] [arunks@codeaurora.org: v8] Link: http://lkml.kernel.org/r/1547032395-24582-1-git-send-email-arunks@codeaurora.org [arunks@codeaurora.org: v9] Link: http://lkml.kernel.org/r/1547098543-26452-1-git-send-email-arunks@codeaurora.org Link: http://lkml.kernel.org/r/1538727006-5727-1-git-send-email-arunks@codeaurora.org Signed-off-by: Arun KS <arunks@codeaurora.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> Cc: K. Y. Srinivasan <kys@microsoft.com> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mathieu Malaterre <malat@debian.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Souptick Joarder <jrdr.linux@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Aaron Lu <aaron.lu@intel.com> Cc: Srivatsa Vaddagiri <vatsa@codeaurora.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 10d0f2ed9f69..5361bd078493 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1303,7 +1303,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
1303 local_irq_restore(flags); 1303 local_irq_restore(flags);
1304} 1304}
1305 1305
1306static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1306void __free_pages_core(struct page *page, unsigned int order)
1307{ 1307{
1308 unsigned int nr_pages = 1 << order; 1308 unsigned int nr_pages = 1 << order;
1309 struct page *p = page; 1309 struct page *p = page;
@@ -1382,7 +1382,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
1382{ 1382{
1383 if (early_page_uninitialised(pfn)) 1383 if (early_page_uninitialised(pfn))
1384 return; 1384 return;
1385 return __free_pages_boot_core(page, order); 1385 __free_pages_core(page, order);
1386} 1386}
1387 1387
1388/* 1388/*
@@ -1472,14 +1472,14 @@ static void __init deferred_free_range(unsigned long pfn,
1472 if (nr_pages == pageblock_nr_pages && 1472 if (nr_pages == pageblock_nr_pages &&
1473 (pfn & (pageblock_nr_pages - 1)) == 0) { 1473 (pfn & (pageblock_nr_pages - 1)) == 0) {
1474 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1474 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1475 __free_pages_boot_core(page, pageblock_order); 1475 __free_pages_core(page, pageblock_order);
1476 return; 1476 return;
1477 } 1477 }
1478 1478
1479 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1479 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1480 if ((pfn & (pageblock_nr_pages - 1)) == 0) 1480 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1481 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1481 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1482 __free_pages_boot_core(page, 0); 1482 __free_pages_core(page, 0);
1483 } 1483 }
1484} 1484}
1485 1485