aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorTang Chen <tangchen@cn.fujitsu.com>2013-02-22 19:33:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:14 -0500
commit27168d38fa209073219abedbe6a9de7ba9acbfad (patch)
tree6c8ff96722ce02b0c01b2ce4078777712e0df339 /arch/x86/mm
parente8d1955258091e4c92d5a975ebd7fd8a98f5d30f (diff)
acpi, memory-hotplug: extend movablemem_map ranges to the end of node
When implementing movablemem_map boot option, we introduced an array movablemem_map.map[] to store the memory ranges to be set as ZONE_MOVABLE. Since ZONE_MOVABLE is the latst zone of a node, if user didn't specify the whole node memory range, we need to extend it to the node end so that we can use it to prevent memblock from allocating memory in the ranges user didn't specify. We now implement movablemem_map boot option like this: /* * For movablemem_map=nn[KMG]@ss[KMG]: * * SRAT: |_____| |_____| |_________| |_________| ...... * node id: 0 1 1 2 * user specified: |__| |___| * movablemem_map: |___| |_________| |______| ...... * * Using movablemem_map, we can prevent memblock from allocating memory * on ZONE_MOVABLE at boot time. * * NOTE: In this case, SRAT info will be ingored. */ [akpm@linux-foundation.org: clean up code, fix build warning] Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Jianguo Wu <wujianguo@huawei.com> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Wu Jianguo <wujianguo@huawei.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Len Brown <lenb@kernel.org> Cc: "Brown, Len" <len.brown@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/srat.c64
1 files changed, 61 insertions, 3 deletions
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index cdd0da9dd530..3e90039e52e0 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -141,11 +141,65 @@ static inline int save_add_info(void) {return 1;}
141static inline int save_add_info(void) {return 0;} 141static inline int save_add_info(void) {return 0;}
142#endif 142#endif
143 143
144#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
145static void __init handle_movablemem(int node, u64 start, u64 end)
146{
147 int overlap;
148 unsigned long start_pfn, end_pfn;
149
150 start_pfn = PFN_DOWN(start);
151 end_pfn = PFN_UP(end);
152
153 /*
154 * For movablecore_map=nn[KMG]@ss[KMG]:
155 *
156 * SRAT: |_____| |_____| |_________| |_________| ......
157 * node id: 0 1 1 2
158 * user specified: |__| |___|
159 * movablemem_map: |___| |_________| |______| ......
160 *
161 * Using movablemem_map, we can prevent memblock from allocating memory
162 * on ZONE_MOVABLE at boot time.
163 */
164 overlap = movablemem_map_overlap(start_pfn, end_pfn);
165 if (overlap >= 0) {
166 /*
167 * If part of this range is in movablemem_map, we need to
168 * add the range after it to extend the range to the end
169 * of the node, because from the min address specified to
170 * the end of the node will be ZONE_MOVABLE.
171 */
172 start_pfn = max(start_pfn,
173 movablemem_map.map[overlap].start_pfn);
174 insert_movablemem_map(start_pfn, end_pfn);
175
176 /*
177 * Set the nodemask, so that if the address range on one node
178 * is not continuse, we can add the subsequent ranges on the
179 * same node into movablemem_map.
180 */
181 node_set(node, movablemem_map.numa_nodes_hotplug);
182 } else {
183 if (node_isset(node, movablemem_map.numa_nodes_hotplug))
184 /*
185 * Insert the range if we already have movable ranges
186 * on the same node.
187 */
188 insert_movablemem_map(start_pfn, end_pfn);
189 }
190}
191#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
192static inline void handle_movablemem(int node, u64 start, u64 end)
193{
194}
195#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
196
144/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 197/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
145int __init 198int __init
146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 199acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
147{ 200{
148 u64 start, end; 201 u64 start, end;
202 u32 hotpluggable;
149 int node, pxm; 203 int node, pxm;
150 204
151 if (srat_disabled()) 205 if (srat_disabled())
@@ -154,7 +208,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
154 goto out_err_bad_srat; 208 goto out_err_bad_srat;
155 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 209 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
156 goto out_err; 210 goto out_err;
157 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) 211 hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
212 if (hotpluggable && !save_add_info())
158 goto out_err; 213 goto out_err;
159 214
160 start = ma->base_address; 215 start = ma->base_address;
@@ -174,9 +229,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
174 229
175 node_set(node, numa_nodes_parsed); 230 node_set(node, numa_nodes_parsed);
176 231
177 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 232 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx] %s\n",
178 node, pxm, 233 node, pxm,
179 (unsigned long long) start, (unsigned long long) end - 1); 234 (unsigned long long) start, (unsigned long long) end - 1,
235 hotpluggable ? "Hot Pluggable": "");
236
237 handle_movablemem(node, start, end);
180 238
181 return 0; 239 return 0;
182out_err_bad_srat: 240out_err_bad_srat: