aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@linux.intel.com>2018-10-26 18:07:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 19:26:34 -0400
commitd483da5bc78b86fe4200d2947f193a745f711713 (patch)
treecca0e6188532f0408ab21fd90ca7f5241c03fad4 /mm/page_alloc.c
parentf682a97a00591def7cefbb5003dc04045028e405 (diff)
mm: create non-atomic version of SetPageReserved for init use
It doesn't make much sense to use the atomic SetPageReserved at init time when we are using memset to clear the memory and manipulating the page flags via simple "&=" and "|=" operations in __init_single_page. This patch adds a non-atomic version __SetPageReserved that can be used during page init and shows about a 10% improvement in initialization times on the systems I have available for testing. On those systems I saw initialization times drop from around 35 seconds to around 32 seconds to initialize a 3TB block of persistent memory. I believe the main advantage of this is that it allows for more compiler optimization as the __set_bit operation can be reordered whereas the atomic version cannot. I tried adding a bit of documentation based on f1dd2cd13c4 ("mm, memory_hotplug: do not associate hotadded memory to zones until online"). Ideally the reserved flag should be set earlier since there is a brief window where the page is initialization via __init_single_page and we have not set the PG_Reserved flag. I'm leaving that for a future patch set as that will require a more significant refactor. Link: http://lkml.kernel.org/r/20180925202018.3576.11607.stgit@localhost.localdomain Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> Reviewed-by: Pavel Tatashin <pavel.tatashin@microsoft.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eb6c50cc8880..cee1abf85d72 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1232,7 +1232,12 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1232 /* Avoid false-positive PageTail() */ 1232 /* Avoid false-positive PageTail() */
1233 INIT_LIST_HEAD(&page->lru); 1233 INIT_LIST_HEAD(&page->lru);
1234 1234
1235 SetPageReserved(page); 1235 /*
1236 * no need for atomic set_bit because the struct
1237 * page is not visible yet so nobody should
1238 * access it yet.
1239 */
1240 __SetPageReserved(page);
1236 } 1241 }
1237 } 1242 }
1238} 1243}
@@ -5508,7 +5513,7 @@ not_early:
5508 page = pfn_to_page(pfn); 5513 page = pfn_to_page(pfn);
5509 __init_single_page(page, pfn, zone, nid); 5514 __init_single_page(page, pfn, zone, nid);
5510 if (context == MEMMAP_HOTPLUG) 5515 if (context == MEMMAP_HOTPLUG)
5511 SetPageReserved(page); 5516 __SetPageReserved(page);
5512 5517
5513 /* 5518 /*
5514 * Mark the block movable so that blocks are reserved for 5519 * Mark the block movable so that blocks are reserved for