aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm/srat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/mm/srat.c')
-rw-r--r--arch/x86_64/mm/srat.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index f8c04d6935c9..19396a261e8d 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -23,12 +23,6 @@
23 23
24int acpi_numa __initdata; 24int acpi_numa __initdata;
25 25
26#if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \
27 defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \
28 && !defined(CONFIG_MEMORY_HOTPLUG)
29#define RESERVE_HOTADD 1
30#endif
31
32static struct acpi_table_slit *acpi_slit; 26static struct acpi_table_slit *acpi_slit;
33 27
34static nodemask_t nodes_parsed __initdata; 28static nodemask_t nodes_parsed __initdata;
@@ -36,9 +30,6 @@ static struct bootnode nodes[MAX_NUMNODES] __initdata;
36static struct bootnode nodes_add[MAX_NUMNODES] __initdata; 30static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
37static int found_add_area __initdata; 31static int found_add_area __initdata;
38int hotadd_percent __initdata = 0; 32int hotadd_percent __initdata = 0;
39#ifndef RESERVE_HOTADD
40#define hotadd_percent 0 /* Ignore all settings */
41#endif
42 33
43/* Too small nodes confuse the VM badly. Usually they result 34/* Too small nodes confuse the VM badly. Usually they result
44 from BIOS bugs. */ 35 from BIOS bugs. */
@@ -160,7 +151,7 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
160 pxm, pa->apic_id, node); 151 pxm, pa->apic_id, node);
161} 152}
162 153
163#ifdef RESERVE_HOTADD 154#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
164/* 155/*
165 * Protect against too large hotadd areas that would fill up memory. 156 * Protect against too large hotadd areas that would fill up memory.
166 */ 157 */
@@ -203,15 +194,37 @@ static int hotadd_enough_memory(struct bootnode *nd)
203 return 1; 194 return 1;
204} 195}
205 196
197static int update_end_of_memory(unsigned long end)
198{
199 found_add_area = 1;
200 if ((end >> PAGE_SHIFT) > end_pfn)
201 end_pfn = end >> PAGE_SHIFT;
202 return 1;
203}
204
205static inline int save_add_info(void)
206{
207 return hotadd_percent > 0;
208}
209#else
210int update_end_of_memory(unsigned long end) {return 0;}
211static int hotadd_enough_memory(struct bootnode *nd) {return 1;}
212#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
213static inline int save_add_info(void) {return 1;}
214#else
215static inline int save_add_info(void) {return 0;}
216#endif
217#endif
206/* 218/*
207 * It is fine to add this area to the nodes data it will be used later 219 * Update nodes_add and decide if to include add are in the zone.
220 * Both SPARSE and RESERVE need nodes_add infomation.
208 * This code supports one contigious hot add area per node. 221 * This code supports one contigious hot add area per node.
209 */ 222 */
210static int reserve_hotadd(int node, unsigned long start, unsigned long end) 223static int reserve_hotadd(int node, unsigned long start, unsigned long end)
211{ 224{
212 unsigned long s_pfn = start >> PAGE_SHIFT; 225 unsigned long s_pfn = start >> PAGE_SHIFT;
213 unsigned long e_pfn = end >> PAGE_SHIFT; 226 unsigned long e_pfn = end >> PAGE_SHIFT;
214 int changed = 0; 227 int ret = 0, changed = 0;
215 struct bootnode *nd = &nodes_add[node]; 228 struct bootnode *nd = &nodes_add[node];
216 229
217 /* I had some trouble with strange memory hotadd regions breaking 230 /* I had some trouble with strange memory hotadd regions breaking
@@ -240,7 +253,6 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
240 253
241 /* Looks good */ 254 /* Looks good */
242 255
243 found_add_area = 1;
244 if (nd->start == nd->end) { 256 if (nd->start == nd->end) {
245 nd->start = start; 257 nd->start = start;
246 nd->end = end; 258 nd->end = end;
@@ -258,14 +270,12 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
258 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); 270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
259 } 271 }
260 272
261 if ((nd->end >> PAGE_SHIFT) > end_pfn) 273 ret = update_end_of_memory(nd->end);
262 end_pfn = nd->end >> PAGE_SHIFT;
263 274
264 if (changed) 275 if (changed)
265 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); 276 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end);
266 return 0; 277 return ret;
267} 278}
268#endif
269 279
270/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
271void __init 281void __init
@@ -284,7 +294,7 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
284 } 294 }
285 if (ma->flags.enabled == 0) 295 if (ma->flags.enabled == 0)
286 return; 296 return;
287 if (ma->flags.hot_pluggable && hotadd_percent == 0) 297 if (ma->flags.hot_pluggable && !save_add_info())
288 return; 298 return;
289 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); 299 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
290 end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); 300 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
@@ -327,15 +337,13 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
327 push_node_boundaries(node, nd->start >> PAGE_SHIFT, 337 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
328 nd->end >> PAGE_SHIFT); 338 nd->end >> PAGE_SHIFT);
329 339
330#ifdef RESERVE_HOTADD 340 if (ma->flags.hot_pluggable && !reserve_hotadd(node, start, end) < 0) {
331 if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) {
332 /* Ignore hotadd region. Undo damage */ 341 /* Ignore hotadd region. Undo damage */
333 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); 342 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
334 *nd = oldnode; 343 *nd = oldnode;
335 if ((nd->start | nd->end) == 0) 344 if ((nd->start | nd->end) == 0)
336 node_clear(node, nodes_parsed); 345 node_clear(node, nodes_parsed);
337 } 346 }
338#endif
339} 347}
340 348
341/* Sanity check to catch more bad SRATs (they are amazingly common). 349/* Sanity check to catch more bad SRATs (they are amazingly common).
@@ -351,7 +359,6 @@ static int nodes_cover_memory(void)
351 unsigned long e = nodes[i].end >> PAGE_SHIFT; 359 unsigned long e = nodes[i].end >> PAGE_SHIFT;
352 pxmram += e - s; 360 pxmram += e - s;
353 pxmram -= absent_pages_in_range(s, e); 361 pxmram -= absent_pages_in_range(s, e);
354 pxmram -= nodes_add[i].end - nodes_add[i].start;
355 if ((long)pxmram < 0) 362 if ((long)pxmram < 0)
356 pxmram = 0; 363 pxmram = 0;
357 } 364 }