diff options
author | Keith Mannthey <kmannth@us.ibm.com> | 2006-10-01 02:27:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-01 03:39:18 -0400 |
commit | 71efa8fdc55e70ec6687c897a30759f0a2c2ad7e (patch) | |
tree | 93975fd0431f856f7285ad90a13cc4ea73f740d3 /arch/x86_64/mm/srat.c | |
parent | ec69acbb1191df671ff8e07c8e146619a5c53f70 (diff) |
[PATCH] hot-add-mem x86_64: Enable SPARSEMEM in srat.c
Enable x86_64 srat.c to share code between both reserve and sparsemem based
add memory paths. Both paths need the hot-add area node locality infomration
(nodes_add). This code refactors the code path to allow this.
Signed-off-by: Keith Mannthey <kmannth@us.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm/srat.c')
-rw-r--r-- | arch/x86_64/mm/srat.c | 51 |
1 files changed, 29 insertions, 22 deletions
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index f8c04d6935c9..19396a261e8d 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c | |||
@@ -23,12 +23,6 @@ | |||
23 | 23 | ||
24 | int acpi_numa __initdata; | 24 | int acpi_numa __initdata; |
25 | 25 | ||
26 | #if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \ | ||
27 | defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \ | ||
28 | && !defined(CONFIG_MEMORY_HOTPLUG) | ||
29 | #define RESERVE_HOTADD 1 | ||
30 | #endif | ||
31 | |||
32 | static struct acpi_table_slit *acpi_slit; | 26 | static struct acpi_table_slit *acpi_slit; |
33 | 27 | ||
34 | static nodemask_t nodes_parsed __initdata; | 28 | static nodemask_t nodes_parsed __initdata; |
@@ -36,9 +30,6 @@ static struct bootnode nodes[MAX_NUMNODES] __initdata; | |||
36 | static struct bootnode nodes_add[MAX_NUMNODES] __initdata; | 30 | static struct bootnode nodes_add[MAX_NUMNODES] __initdata; |
37 | static int found_add_area __initdata; | 31 | static int found_add_area __initdata; |
38 | int hotadd_percent __initdata = 0; | 32 | int hotadd_percent __initdata = 0; |
39 | #ifndef RESERVE_HOTADD | ||
40 | #define hotadd_percent 0 /* Ignore all settings */ | ||
41 | #endif | ||
42 | 33 | ||
43 | /* Too small nodes confuse the VM badly. Usually they result | 34 | /* Too small nodes confuse the VM badly. Usually they result |
44 | from BIOS bugs. */ | 35 | from BIOS bugs. */ |
@@ -160,7 +151,7 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) | |||
160 | pxm, pa->apic_id, node); | 151 | pxm, pa->apic_id, node); |
161 | } | 152 | } |
162 | 153 | ||
163 | #ifdef RESERVE_HOTADD | 154 | #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE |
164 | /* | 155 | /* |
165 | * Protect against too large hotadd areas that would fill up memory. | 156 | * Protect against too large hotadd areas that would fill up memory. |
166 | */ | 157 | */ |
@@ -203,15 +194,37 @@ static int hotadd_enough_memory(struct bootnode *nd) | |||
203 | return 1; | 194 | return 1; |
204 | } | 195 | } |
205 | 196 | ||
197 | static int update_end_of_memory(unsigned long end) | ||
198 | { | ||
199 | found_add_area = 1; | ||
200 | if ((end >> PAGE_SHIFT) > end_pfn) | ||
201 | end_pfn = end >> PAGE_SHIFT; | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | static inline int save_add_info(void) | ||
206 | { | ||
207 | return hotadd_percent > 0; | ||
208 | } | ||
209 | #else | ||
210 | int update_end_of_memory(unsigned long end) {return 0;} | ||
211 | static int hotadd_enough_memory(struct bootnode *nd) {return 1;} | ||
212 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | ||
213 | static inline int save_add_info(void) {return 1;} | ||
214 | #else | ||
215 | static inline int save_add_info(void) {return 0;} | ||
216 | #endif | ||
217 | #endif | ||
206 | /* | 218 | /* |
207 | * It is fine to add this area to the nodes data it will be used later | 219 | * Update nodes_add and decide if to include add are in the zone. |
220 | * Both SPARSE and RESERVE need nodes_add infomation. | ||
208 | * This code supports one contigious hot add area per node. | 221 | * This code supports one contigious hot add area per node. |
209 | */ | 222 | */ |
210 | static int reserve_hotadd(int node, unsigned long start, unsigned long end) | 223 | static int reserve_hotadd(int node, unsigned long start, unsigned long end) |
211 | { | 224 | { |
212 | unsigned long s_pfn = start >> PAGE_SHIFT; | 225 | unsigned long s_pfn = start >> PAGE_SHIFT; |
213 | unsigned long e_pfn = end >> PAGE_SHIFT; | 226 | unsigned long e_pfn = end >> PAGE_SHIFT; |
214 | int changed = 0; | 227 | int ret = 0, changed = 0; |
215 | struct bootnode *nd = &nodes_add[node]; | 228 | struct bootnode *nd = &nodes_add[node]; |
216 | 229 | ||
217 | /* I had some trouble with strange memory hotadd regions breaking | 230 | /* I had some trouble with strange memory hotadd regions breaking |
@@ -240,7 +253,6 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) | |||
240 | 253 | ||
241 | /* Looks good */ | 254 | /* Looks good */ |
242 | 255 | ||
243 | found_add_area = 1; | ||
244 | if (nd->start == nd->end) { | 256 | if (nd->start == nd->end) { |
245 | nd->start = start; | 257 | nd->start = start; |
246 | nd->end = end; | 258 | nd->end = end; |
@@ -258,14 +270,12 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) | |||
258 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); | 270 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); |
259 | } | 271 | } |
260 | 272 | ||
261 | if ((nd->end >> PAGE_SHIFT) > end_pfn) | 273 | ret = update_end_of_memory(nd->end); |
262 | end_pfn = nd->end >> PAGE_SHIFT; | ||
263 | 274 | ||
264 | if (changed) | 275 | if (changed) |
265 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); | 276 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); |
266 | return 0; | 277 | return ret; |
267 | } | 278 | } |
268 | #endif | ||
269 | 279 | ||
270 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ | 280 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ |
271 | void __init | 281 | void __init |
@@ -284,7 +294,7 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) | |||
284 | } | 294 | } |
285 | if (ma->flags.enabled == 0) | 295 | if (ma->flags.enabled == 0) |
286 | return; | 296 | return; |
287 | if (ma->flags.hot_pluggable && hotadd_percent == 0) | 297 | if (ma->flags.hot_pluggable && !save_add_info()) |
288 | return; | 298 | return; |
289 | start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); | 299 | start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); |
290 | end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); | 300 | end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); |
@@ -327,15 +337,13 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) | |||
327 | push_node_boundaries(node, nd->start >> PAGE_SHIFT, | 337 | push_node_boundaries(node, nd->start >> PAGE_SHIFT, |
328 | nd->end >> PAGE_SHIFT); | 338 | nd->end >> PAGE_SHIFT); |
329 | 339 | ||
330 | #ifdef RESERVE_HOTADD | 340 | if (ma->flags.hot_pluggable && !reserve_hotadd(node, start, end) < 0) { |
331 | if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) { | ||
332 | /* Ignore hotadd region. Undo damage */ | 341 | /* Ignore hotadd region. Undo damage */ |
333 | printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); | 342 | printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); |
334 | *nd = oldnode; | 343 | *nd = oldnode; |
335 | if ((nd->start | nd->end) == 0) | 344 | if ((nd->start | nd->end) == 0) |
336 | node_clear(node, nodes_parsed); | 345 | node_clear(node, nodes_parsed); |
337 | } | 346 | } |
338 | #endif | ||
339 | } | 347 | } |
340 | 348 | ||
341 | /* Sanity check to catch more bad SRATs (they are amazingly common). | 349 | /* Sanity check to catch more bad SRATs (they are amazingly common). |
@@ -351,7 +359,6 @@ static int nodes_cover_memory(void) | |||
351 | unsigned long e = nodes[i].end >> PAGE_SHIFT; | 359 | unsigned long e = nodes[i].end >> PAGE_SHIFT; |
352 | pxmram += e - s; | 360 | pxmram += e - s; |
353 | pxmram -= absent_pages_in_range(s, e); | 361 | pxmram -= absent_pages_in_range(s, e); |
354 | pxmram -= nodes_add[i].end - nodes_add[i].start; | ||
355 | if ((long)pxmram < 0) | 362 | if ((long)pxmram < 0) |
356 | pxmram = 0; | 363 | pxmram = 0; |
357 | } | 364 | } |