diff options
author | Mel Gorman <mel@csn.ul.ie> | 2006-09-27 04:49:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-27 11:26:11 -0400 |
commit | 5cb248abf5ab65ab543b2d5fc16c738b28031fc0 (patch) | |
tree | e9af2f7f86000e36f11f1091cb675c1738d69ca3 /arch/x86_64/mm/srat.c | |
parent | 4cfee88ad30acc47f02b8b7ba3db8556262dce1e (diff) |
[PATCH] Have x86_64 use add_active_range() and free_area_init_nodes
Size zones and holes in an architecture independent manner for x86_64.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Andi Kleen <ak@muc.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Keith Mannthey" <kmannth@gmail.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm/srat.c')
-rw-r--r-- | arch/x86_64/mm/srat.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index ca10701e7a9..7b50bb1caab 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c | |||
@@ -93,6 +93,7 @@ static __init void bad_srat(void) | |||
93 | apicid_to_node[i] = NUMA_NO_NODE; | 93 | apicid_to_node[i] = NUMA_NO_NODE; |
94 | for (i = 0; i < MAX_NUMNODES; i++) | 94 | for (i = 0; i < MAX_NUMNODES; i++) |
95 | nodes_add[i].start = nodes[i].end = 0; | 95 | nodes_add[i].start = nodes[i].end = 0; |
96 | remove_all_active_ranges(); | ||
96 | } | 97 | } |
97 | 98 | ||
98 | static __init inline int srat_disabled(void) | 99 | static __init inline int srat_disabled(void) |
@@ -175,7 +176,7 @@ static int hotadd_enough_memory(struct bootnode *nd) | |||
175 | 176 | ||
176 | if (mem < 0) | 177 | if (mem < 0) |
177 | return 0; | 178 | return 0; |
178 | allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; | 179 | allowed = (end_pfn - absent_pages_in_range(0, end_pfn)) * PAGE_SIZE; |
179 | allowed = (allowed / 100) * hotadd_percent; | 180 | allowed = (allowed / 100) * hotadd_percent; |
180 | if (allocated + mem > allowed) { | 181 | if (allocated + mem > allowed) { |
181 | unsigned long range; | 182 | unsigned long range; |
@@ -225,7 +226,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) | |||
225 | } | 226 | } |
226 | 227 | ||
227 | /* This check might be a bit too strict, but I'm keeping it for now. */ | 228 | /* This check might be a bit too strict, but I'm keeping it for now. */ |
228 | if (e820_hole_size(s_pfn, e_pfn) != e_pfn - s_pfn) { | 229 | if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) { |
229 | printk(KERN_ERR "SRAT: Hotplug area has existing memory\n"); | 230 | printk(KERN_ERR "SRAT: Hotplug area has existing memory\n"); |
230 | return -1; | 231 | return -1; |
231 | } | 232 | } |
@@ -319,6 +320,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) | |||
319 | 320 | ||
320 | printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, | 321 | printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, |
321 | nd->start, nd->end); | 322 | nd->start, nd->end); |
323 | e820_register_active_regions(node, nd->start >> PAGE_SHIFT, | ||
324 | nd->end >> PAGE_SHIFT); | ||
322 | 325 | ||
323 | #ifdef RESERVE_HOTADD | 326 | #ifdef RESERVE_HOTADD |
324 | if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) { | 327 | if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) { |
@@ -343,13 +346,13 @@ static int nodes_cover_memory(void) | |||
343 | unsigned long s = nodes[i].start >> PAGE_SHIFT; | 346 | unsigned long s = nodes[i].start >> PAGE_SHIFT; |
344 | unsigned long e = nodes[i].end >> PAGE_SHIFT; | 347 | unsigned long e = nodes[i].end >> PAGE_SHIFT; |
345 | pxmram += e - s; | 348 | pxmram += e - s; |
346 | pxmram -= e820_hole_size(s, e); | 349 | pxmram -= absent_pages_in_range(s, e); |
347 | pxmram -= nodes_add[i].end - nodes_add[i].start; | 350 | pxmram -= nodes_add[i].end - nodes_add[i].start; |
348 | if ((long)pxmram < 0) | 351 | if ((long)pxmram < 0) |
349 | pxmram = 0; | 352 | pxmram = 0; |
350 | } | 353 | } |
351 | 354 | ||
352 | e820ram = end_pfn - e820_hole_size(0, end_pfn); | 355 | e820ram = end_pfn - absent_pages_in_range(0, end_pfn); |
353 | /* We seem to lose 3 pages somewhere. Allow a bit of slack. */ | 356 | /* We seem to lose 3 pages somewhere. Allow a bit of slack. */ |
354 | if ((long)(e820ram - pxmram) >= 1*1024*1024) { | 357 | if ((long)(e820ram - pxmram) >= 1*1024*1024) { |
355 | printk(KERN_ERR | 358 | printk(KERN_ERR |