diff options
author | Pavel Tatashin <pasha.tatashin@oracle.com> | 2018-04-05 19:23:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-06 00:36:25 -0400 |
commit | d0dc12e86b3197a14a908d4fe7cb35b73dda82b5 (patch) | |
tree | 28bd3b5c716e3e1c04fab511c3a41033bdc85993 /mm/page_alloc.c | |
parent | fc44f7f9231a73821fc858f5bc48883a9e78f6de (diff) |
mm/memory_hotplug: optimize memory hotplug
During memory hotplugging we traverse struct pages three times:
1. memset(0) in sparse_add_one_section()
2. loop in __add_section() to set do: set_page_node(page, nid); and
SetPageReserved(page);
3. loop in memmap_init_zone() to call __init_single_pfn()
This patch removes the first two loops, and leaves only loop 3. All
struct pages are initialized in one place, the same as it is done during
boot.
The benefits:
- We improve memory hotplug performance because we are not evicting the
cache several times and also reduce loop branching overhead.
- Remove condition from hotpath in __init_single_pfn(), that was added
in order to fix the problem that was reported by Bharata in the above
email thread, thus also improve performance during normal boot.
- Make memory hotplug more similar to the boot memory initialization
path because we zero and initialize struct pages only in one
function.
- Simplifies memory hotplug struct page initialization code, and thus
enables future improvements, such as multi-threading the
initialization of struct pages in order to improve hotplug
performance even further on larger machines.
[pasha.tatashin@oracle.com: v5]
Link: http://lkml.kernel.org/r/20180228030308.1116-7-pasha.tatashin@oracle.com
Link: http://lkml.kernel.org/r/20180215165920.8570-7-pasha.tatashin@oracle.com
Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 28 |
1 files changed, 10 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3183eb2f579c..a3e2ba4f76bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1143,10 +1143,9 @@ static void free_one_page(struct zone *zone, | |||
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | static void __meminit __init_single_page(struct page *page, unsigned long pfn, | 1145 | static void __meminit __init_single_page(struct page *page, unsigned long pfn, |
1146 | unsigned long zone, int nid, bool zero) | 1146 | unsigned long zone, int nid) |
1147 | { | 1147 | { |
1148 | if (zero) | 1148 | mm_zero_struct_page(page); |
1149 | mm_zero_struct_page(page); | ||
1150 | set_page_links(page, zone, nid, pfn); | 1149 | set_page_links(page, zone, nid, pfn); |
1151 | init_page_count(page); | 1150 | init_page_count(page); |
1152 | page_mapcount_reset(page); | 1151 | page_mapcount_reset(page); |
@@ -1160,12 +1159,6 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn, | |||
1160 | #endif | 1159 | #endif |
1161 | } | 1160 | } |
1162 | 1161 | ||
1163 | static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, | ||
1164 | int nid, bool zero) | ||
1165 | { | ||
1166 | return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero); | ||
1167 | } | ||
1168 | |||
1169 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | 1162 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
1170 | static void __meminit init_reserved_page(unsigned long pfn) | 1163 | static void __meminit init_reserved_page(unsigned long pfn) |
1171 | { | 1164 | { |
@@ -1184,7 +1177,7 @@ static void __meminit init_reserved_page(unsigned long pfn) | |||
1184 | if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) | 1177 | if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) |
1185 | break; | 1178 | break; |
1186 | } | 1179 | } |
1187 | __init_single_pfn(pfn, zid, nid, true); | 1180 | __init_single_page(pfn_to_page(pfn), pfn, zid, nid); |
1188 | } | 1181 | } |
1189 | #else | 1182 | #else |
1190 | static inline void init_reserved_page(unsigned long pfn) | 1183 | static inline void init_reserved_page(unsigned long pfn) |
@@ -1501,7 +1494,7 @@ static unsigned long __init deferred_init_pages(int nid, int zid, | |||
1501 | } else { | 1494 | } else { |
1502 | page++; | 1495 | page++; |
1503 | } | 1496 | } |
1504 | __init_single_page(page, pfn, zid, nid, true); | 1497 | __init_single_page(page, pfn, zid, nid); |
1505 | nr_pages++; | 1498 | nr_pages++; |
1506 | } | 1499 | } |
1507 | return (nr_pages); | 1500 | return (nr_pages); |
@@ -5434,6 +5427,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
5434 | pg_data_t *pgdat = NODE_DATA(nid); | 5427 | pg_data_t *pgdat = NODE_DATA(nid); |
5435 | unsigned long pfn; | 5428 | unsigned long pfn; |
5436 | unsigned long nr_initialised = 0; | 5429 | unsigned long nr_initialised = 0; |
5430 | struct page *page; | ||
5437 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 5431 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
5438 | struct memblock_region *r = NULL, *tmp; | 5432 | struct memblock_region *r = NULL, *tmp; |
5439 | #endif | 5433 | #endif |
@@ -5486,6 +5480,11 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
5486 | #endif | 5480 | #endif |
5487 | 5481 | ||
5488 | not_early: | 5482 | not_early: |
5483 | page = pfn_to_page(pfn); | ||
5484 | __init_single_page(page, pfn, zone, nid); | ||
5485 | if (context == MEMMAP_HOTPLUG) | ||
5486 | SetPageReserved(page); | ||
5487 | |||
5489 | /* | 5488 | /* |
5490 | * Mark the block movable so that blocks are reserved for | 5489 | * Mark the block movable so that blocks are reserved for |
5491 | * movable at startup. This will force kernel allocations | 5490 | * movable at startup. This will force kernel allocations |
@@ -5502,15 +5501,8 @@ not_early: | |||
5502 | * because this is done early in sparse_add_one_section | 5501 | * because this is done early in sparse_add_one_section |
5503 | */ | 5502 | */ |
5504 | if (!(pfn & (pageblock_nr_pages - 1))) { | 5503 | if (!(pfn & (pageblock_nr_pages - 1))) { |
5505 | struct page *page = pfn_to_page(pfn); | ||
5506 | |||
5507 | __init_single_page(page, pfn, zone, nid, | ||
5508 | context != MEMMAP_HOTPLUG); | ||
5509 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | 5504 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
5510 | cond_resched(); | 5505 | cond_resched(); |
5511 | } else { | ||
5512 | __init_single_pfn(pfn, zone, nid, | ||
5513 | context != MEMMAP_HOTPLUG); | ||
5514 | } | 5506 | } |
5515 | } | 5507 | } |
5516 | } | 5508 | } |