aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTang Chen <tangchen@cn.fujitsu.com>2013-02-22 19:33:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:14 -0500
commit01a178a94e8eaec351b29ee49fbb3d1c124cb7fb (patch)
treede978b65bf5d4a05c78cb5ce1180dc3fb04bd12d /mm
parent27168d38fa209073219abedbe6a9de7ba9acbfad (diff)
acpi, memory-hotplug: support getting hotplug info from SRAT
We now provide an option for users who don't want to specify physical memory address in kernel commandline. /* * For movablemem_map=acpi: * * SRAT: |_____| |_____| |_________| |_________| ...... * node id: 0 1 1 2 * hotpluggable: n y y n * movablemem_map: |_____| |_________| * * Using movablemem_map, we can prevent memblock from allocating memory * on ZONE_MOVABLE at boot time. */ So user just specify movablemem_map=acpi, and the kernel will use hotpluggable info in SRAT to determine which memory ranges should be set as ZONE_MOVABLE. If all the memory ranges in SRAT is hotpluggable, then no memory can be used by kernel. But before parsing SRAT, memblock has already reserve some memory ranges for other purposes, such as for kernel image, and so on. We cannot prevent kernel from using these memory. So we need to exclude these ranges even if these memory is hotpluggable. Furthermore, there could be several memory ranges in the single node which the kernel resides in. We may skip one range that have memory reserved by memblock, but if the rest of memory is too small, then the kernel will fail to boot. So, make the whole node which the kernel resides in un-hotpluggable. Then the kernel has enough memory to use. NOTE: Using this way will cause NUMA performance down because the whole node will be set as ZONE_MOVABLE, and kernel cannot use memory on it. If users don't want to lose NUMA performance, just don't use it. [akpm@linux-foundation.org: fix warning] [akpm@linux-foundation.org: use strcmp()] Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Wu Jianguo <wujianguo@huawei.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Len Brown <lenb@kernel.org> Cc: "Brown, Len" <len.brown@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7ea9a003ad57..a7381be21320 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -203,7 +203,10 @@ static unsigned long __meminitdata dma_reserve;
203 203
204#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 204#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
205/* Movable memory ranges, will also be used by memblock subsystem. */ 205/* Movable memory ranges, will also be used by memblock subsystem. */
206struct movablemem_map movablemem_map; 206struct movablemem_map movablemem_map = {
207 .acpi = false,
208 .nr_map = 0,
209};
207 210
208static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 211static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
209static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 212static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
@@ -5314,6 +5317,23 @@ static int __init cmdline_parse_movablemem_map(char *p)
5314 if (!p) 5317 if (!p)
5315 goto err; 5318 goto err;
5316 5319
5320 if (!strcmp(p, "acpi"))
5321 movablemem_map.acpi = true;
5322
5323 /*
5324 * If user decide to use info from BIOS, all the other user specified
5325 * ranges will be ingored.
5326 */
5327 if (movablemem_map.acpi) {
5328 if (movablemem_map.nr_map) {
5329 memset(movablemem_map.map, 0,
5330 sizeof(struct movablemem_entry)
5331 * movablemem_map.nr_map);
5332 movablemem_map.nr_map = 0;
5333 }
5334 return 0;
5335 }
5336
5317 oldp = p; 5337 oldp = p;
5318 mem_size = memparse(p, &p); 5338 mem_size = memparse(p, &p);
5319 if (p == oldp) 5339 if (p == oldp)