aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTang Chen <tangchen@cn.fujitsu.com>2013-02-22 19:33:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:14 -0500
commit27168d38fa209073219abedbe6a9de7ba9acbfad (patch)
tree6c8ff96722ce02b0c01b2ce4078777712e0df339
parente8d1955258091e4c92d5a975ebd7fd8a98f5d30f (diff)
acpi, memory-hotplug: extend movablemem_map ranges to the end of node
When implementing movablemem_map boot option, we introduced an array movablemem_map.map[] to store the memory ranges to be set as ZONE_MOVABLE. Since ZONE_MOVABLE is the latst zone of a node, if user didn't specify the whole node memory range, we need to extend it to the node end so that we can use it to prevent memblock from allocating memory in the ranges user didn't specify. We now implement movablemem_map boot option like this: /* * For movablemem_map=nn[KMG]@ss[KMG]: * * SRAT: |_____| |_____| |_________| |_________| ...... * node id: 0 1 1 2 * user specified: |__| |___| * movablemem_map: |___| |_________| |______| ...... * * Using movablemem_map, we can prevent memblock from allocating memory * on ZONE_MOVABLE at boot time. * * NOTE: In this case, SRAT info will be ingored. */ [akpm@linux-foundation.org: clean up code, fix build warning] Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Wu Jianguo <wujianguo@huawei.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Len Brown <lenb@kernel.org> Cc: "Brown, Len" <len.brown@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/mm/srat.c64
-rw-r--r--include/linux/mm.h5
-rw-r--r--mm/page_alloc.c34
3 files changed, 98 insertions, 5 deletions
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index cdd0da9dd530..3e90039e52e0 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -141,11 +141,65 @@ static inline int save_add_info(void) {return 1;}
141static inline int save_add_info(void) {return 0;} 141static inline int save_add_info(void) {return 0;}
142#endif 142#endif
143 143
144#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
145static void __init handle_movablemem(int node, u64 start, u64 end)
146{
147 int overlap;
148 unsigned long start_pfn, end_pfn;
149
150 start_pfn = PFN_DOWN(start);
151 end_pfn = PFN_UP(end);
152
153 /*
154 * For movablecore_map=nn[KMG]@ss[KMG]:
155 *
156 * SRAT: |_____| |_____| |_________| |_________| ......
157 * node id: 0 1 1 2
158 * user specified: |__| |___|
159 * movablemem_map: |___| |_________| |______| ......
160 *
161 * Using movablemem_map, we can prevent memblock from allocating memory
162 * on ZONE_MOVABLE at boot time.
163 */
164 overlap = movablemem_map_overlap(start_pfn, end_pfn);
165 if (overlap >= 0) {
166 /*
167 * If part of this range is in movablemem_map, we need to
168 * add the range after it to extend the range to the end
169 * of the node, because from the min address specified to
170 * the end of the node will be ZONE_MOVABLE.
171 */
172 start_pfn = max(start_pfn,
173 movablemem_map.map[overlap].start_pfn);
174 insert_movablemem_map(start_pfn, end_pfn);
175
176 /*
177 * Set the nodemask, so that if the address range on one node
178 * is not continuse, we can add the subsequent ranges on the
179 * same node into movablemem_map.
180 */
181 node_set(node, movablemem_map.numa_nodes_hotplug);
182 } else {
183 if (node_isset(node, movablemem_map.numa_nodes_hotplug))
184 /*
185 * Insert the range if we already have movable ranges
186 * on the same node.
187 */
188 insert_movablemem_map(start_pfn, end_pfn);
189 }
190}
191#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
192static inline void handle_movablemem(int node, u64 start, u64 end)
193{
194}
195#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
196
144/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 197/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
145int __init 198int __init
146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 199acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
147{ 200{
148 u64 start, end; 201 u64 start, end;
202 u32 hotpluggable;
149 int node, pxm; 203 int node, pxm;
150 204
151 if (srat_disabled()) 205 if (srat_disabled())
@@ -154,7 +208,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
154 goto out_err_bad_srat; 208 goto out_err_bad_srat;
155 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 209 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
156 goto out_err; 210 goto out_err;
157 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) 211 hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
212 if (hotpluggable && !save_add_info())
158 goto out_err; 213 goto out_err;
159 214
160 start = ma->base_address; 215 start = ma->base_address;
@@ -174,9 +229,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
174 229
175 node_set(node, numa_nodes_parsed); 230 node_set(node, numa_nodes_parsed);
176 231
177 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 232 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx] %s\n",
178 node, pxm, 233 node, pxm,
179 (unsigned long long) start, (unsigned long long) end - 1); 234 (unsigned long long) start, (unsigned long long) end - 1,
235 hotpluggable ? "Hot Pluggable": "");
236
237 handle_movablemem(node, start, end);
180 238
181 return 0; 239 return 0;
182out_err_bad_srat: 240out_err_bad_srat:
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ce9bd3049836..4d7377a1d084 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1368,8 +1368,13 @@ struct movablemem_entry {
1368struct movablemem_map { 1368struct movablemem_map {
1369 int nr_map; 1369 int nr_map;
1370 struct movablemem_entry map[MOVABLEMEM_MAP_MAX]; 1370 struct movablemem_entry map[MOVABLEMEM_MAP_MAX];
1371 nodemask_t numa_nodes_hotplug; /* on which nodes we specify memory */
1371}; 1372};
1372 1373
1374extern void __init insert_movablemem_map(unsigned long start_pfn,
1375 unsigned long end_pfn);
1376extern int __init movablemem_map_overlap(unsigned long start_pfn,
1377 unsigned long end_pfn);
1373#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1378#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1374 1379
1375#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1380#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 88b9962c99b3..7ea9a003ad57 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5176,6 +5176,36 @@ early_param("kernelcore", cmdline_parse_kernelcore);
5176early_param("movablecore", cmdline_parse_movablecore); 5176early_param("movablecore", cmdline_parse_movablecore);
5177 5177
5178/** 5178/**
5179 * movablemem_map_overlap() - Check if a range overlaps movablemem_map.map[].
5180 * @start_pfn: start pfn of the range to be checked
5181 * @end_pfn: end pfn of the range to be checked (exclusive)
5182 *
5183 * This function checks if a given memory range [start_pfn, end_pfn) overlaps
5184 * the movablemem_map.map[] array.
5185 *
5186 * Return: index of the first overlapped element in movablemem_map.map[]
5187 * or -1 if they don't overlap each other.
5188 */
5189int __init movablemem_map_overlap(unsigned long start_pfn,
5190 unsigned long end_pfn)
5191{
5192 int overlap;
5193
5194 if (!movablemem_map.nr_map)
5195 return -1;
5196
5197 for (overlap = 0; overlap < movablemem_map.nr_map; overlap++)
5198 if (start_pfn < movablemem_map.map[overlap].end_pfn)
5199 break;
5200
5201 if (overlap == movablemem_map.nr_map ||
5202 end_pfn <= movablemem_map.map[overlap].start_pfn)
5203 return -1;
5204
5205 return overlap;
5206}
5207
5208/**
5179 * insert_movablemem_map - Insert a memory range in to movablemem_map.map. 5209 * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
5180 * @start_pfn: start pfn of the range 5210 * @start_pfn: start pfn of the range
5181 * @end_pfn: end pfn of the range 5211 * @end_pfn: end pfn of the range
@@ -5183,8 +5213,8 @@ early_param("movablecore", cmdline_parse_movablecore);
5183 * This function will also merge the overlapped ranges, and sort the array 5213 * This function will also merge the overlapped ranges, and sort the array
5184 * by start_pfn in monotonic increasing order. 5214 * by start_pfn in monotonic increasing order.
5185 */ 5215 */
5186static void __init insert_movablemem_map(unsigned long start_pfn, 5216void __init insert_movablemem_map(unsigned long start_pfn,
5187 unsigned long end_pfn) 5217 unsigned long end_pfn)
5188{ 5218{
5189 int pos, overlap; 5219 int pos, overlap;
5190 5220