aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/srat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/srat.c')
-rw-r--r--arch/x86/mm/srat.c64
1 files changed, 61 insertions, 3 deletions
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index cdd0da9dd530..3e90039e52e0 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -141,11 +141,65 @@ static inline int save_add_info(void) {return 1;}
141static inline int save_add_info(void) {return 0;} 141static inline int save_add_info(void) {return 0;}
142#endif 142#endif
143 143
144#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
145static void __init handle_movablemem(int node, u64 start, u64 end)
146{
147 int overlap;
148 unsigned long start_pfn, end_pfn;
149
150 start_pfn = PFN_DOWN(start);
151 end_pfn = PFN_UP(end);
152
153 /*
154 * For movablecore_map=nn[KMG]@ss[KMG]:
155 *
156 * SRAT: |_____| |_____| |_________| |_________| ......
157 * node id: 0 1 1 2
158 * user specified: |__| |___|
159 * movablemem_map: |___| |_________| |______| ......
160 *
161 * Using movablemem_map, we can prevent memblock from allocating memory
162 * on ZONE_MOVABLE at boot time.
163 */
164 overlap = movablemem_map_overlap(start_pfn, end_pfn);
165 if (overlap >= 0) {
166 /*
167 * If part of this range is in movablemem_map, we need to
168 * add the range after it to extend the range to the end
169 * of the node, because from the min address specified to
170 * the end of the node will be ZONE_MOVABLE.
171 */
172 start_pfn = max(start_pfn,
173 movablemem_map.map[overlap].start_pfn);
174 insert_movablemem_map(start_pfn, end_pfn);
175
176 /*
177 * Set the nodemask, so that if the address range on one node
178 * is not continuse, we can add the subsequent ranges on the
179 * same node into movablemem_map.
180 */
181 node_set(node, movablemem_map.numa_nodes_hotplug);
182 } else {
183 if (node_isset(node, movablemem_map.numa_nodes_hotplug))
184 /*
185 * Insert the range if we already have movable ranges
186 * on the same node.
187 */
188 insert_movablemem_map(start_pfn, end_pfn);
189 }
190}
191#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
192static inline void handle_movablemem(int node, u64 start, u64 end)
193{
194}
195#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
196
144/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 197/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
145int __init 198int __init
146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 199acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
147{ 200{
148 u64 start, end; 201 u64 start, end;
202 u32 hotpluggable;
149 int node, pxm; 203 int node, pxm;
150 204
151 if (srat_disabled()) 205 if (srat_disabled())
@@ -154,7 +208,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
154 goto out_err_bad_srat; 208 goto out_err_bad_srat;
155 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 209 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
156 goto out_err; 210 goto out_err;
157 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) 211 hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
212 if (hotpluggable && !save_add_info())
158 goto out_err; 213 goto out_err;
159 214
160 start = ma->base_address; 215 start = ma->base_address;
@@ -174,9 +229,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
174 229
175 node_set(node, numa_nodes_parsed); 230 node_set(node, numa_nodes_parsed);
176 231
177 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 232 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx] %s\n",
178 node, pxm, 233 node, pxm,
179 (unsigned long long) start, (unsigned long long) end - 1); 234 (unsigned long long) start, (unsigned long long) end - 1,
235 hotpluggable ? "Hot Pluggable": "");
236
237 handle_movablemem(node, start, end);
180 238
181 return 0; 239 return 0;
182out_err_bad_srat: 240out_err_bad_srat: