summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2019-05-13 20:21:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 12:47:49 -0400
commit940519f0c8b757fdcbc5d14c93cdaada20ded14c (patch)
treeb6b39399a9ef9914e8c2902b6ffe300d52531913 /mm
parent5557c766abad25acc8091ccb9641b96e3b3da06f (diff)
mm, memory_hotplug: provide a more generic restrictions for memory hotplug
arch_add_memory, __add_pages take a want_memblock which controls whether the newly added memory should get the sysfs memblock user API (e.g. ZONE_DEVICE users do not want/need this interface). Some callers even want to control where do we allocate the memmap from by configuring altmap. Add a more generic hotplug context for arch_add_memory and __add_pages. struct mhp_restrictions contains flags which contains additional features to be enabled by the memory hotplug (MHP_MEMBLOCK_API currently) and altmap for alternative memmap allocator. This patch shouldn't introduce any functional change. [akpm@linux-foundation.org: build fix] Link: http://lkml.kernel.org/r/20190408082633.2864-3-osalvador@suse.de Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Oscar Salvador <osalvador@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 75f9f6590677..339d5a62d5d5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -273,12 +273,12 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
273 * add the new pages. 273 * add the new pages.
274 */ 274 */
275int __ref __add_pages(int nid, unsigned long phys_start_pfn, 275int __ref __add_pages(int nid, unsigned long phys_start_pfn,
276 unsigned long nr_pages, struct vmem_altmap *altmap, 276 unsigned long nr_pages, struct mhp_restrictions *restrictions)
277 bool want_memblock)
278{ 277{
279 unsigned long i; 278 unsigned long i;
280 int err = 0; 279 int err = 0;
281 int start_sec, end_sec; 280 int start_sec, end_sec;
281 struct vmem_altmap *altmap = restrictions->altmap;
282 282
283 /* during initialize mem_map, align hot-added range to section */ 283 /* during initialize mem_map, align hot-added range to section */
284 start_sec = pfn_to_section_nr(phys_start_pfn); 284 start_sec = pfn_to_section_nr(phys_start_pfn);
@@ -299,7 +299,7 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn,
299 299
300 for (i = start_sec; i <= end_sec; i++) { 300 for (i = start_sec; i <= end_sec; i++) {
301 err = __add_section(nid, section_nr_to_pfn(i), altmap, 301 err = __add_section(nid, section_nr_to_pfn(i), altmap,
302 want_memblock); 302 restrictions->flags & MHP_MEMBLOCK_API);
303 303
304 /* 304 /*
305 * EEXIST is finally dealt with by ioresource collision 305 * EEXIST is finally dealt with by ioresource collision
@@ -1097,6 +1097,9 @@ static int online_memory_block(struct memory_block *mem, void *arg)
1097 */ 1097 */
1098int __ref add_memory_resource(int nid, struct resource *res) 1098int __ref add_memory_resource(int nid, struct resource *res)
1099{ 1099{
1100 struct mhp_restrictions restrictions = {
1101 .flags = MHP_MEMBLOCK_API,
1102 };
1100 u64 start, size; 1103 u64 start, size;
1101 bool new_node = false; 1104 bool new_node = false;
1102 int ret; 1105 int ret;
@@ -1124,7 +1127,7 @@ int __ref add_memory_resource(int nid, struct resource *res)
1124 new_node = ret; 1127 new_node = ret;
1125 1128
1126 /* call arch's memory hotadd */ 1129 /* call arch's memory hotadd */
1127 ret = arch_add_memory(nid, start, size, NULL, true); 1130 ret = arch_add_memory(nid, start, size, &restrictions);
1128 if (ret < 0) 1131 if (ret < 0)
1129 goto error; 1132 goto error;
1130 1133