aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-05-24 04:22:21 -0400
committerDavid Woodhouse <dwmw2@infradead.org>2006-05-24 04:22:21 -0400
commit66643de455c27973ac31ad6de9f859d399916842 (patch)
tree7ebed7f051879007d4b11d6aaa9e65a1bcb0b08f /arch/x86_64/mm
parent2c23d62abb820e19c54012520f08a198c2233a85 (diff)
parent387e2b0439026aa738a9edca15a57e5c0bcb4dfc (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/asm-powerpc/unistd.h include/asm-sparc/unistd.h include/asm-sparc64/unistd.h Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r--arch/x86_64/mm/srat.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 15ae9fcd65a7..e1513532df29 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata;
34static struct bootnode nodes[MAX_NUMNODES] __initdata; 34static struct bootnode nodes[MAX_NUMNODES] __initdata;
35static struct bootnode nodes_add[MAX_NUMNODES] __initdata; 35static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
36static int found_add_area __initdata; 36static int found_add_area __initdata;
37int hotadd_percent __initdata = 10; 37int hotadd_percent __initdata = 0;
38#ifndef RESERVE_HOTADD
39#define hotadd_percent 0 /* Ignore all settings */
40#endif
38static u8 pxm2node[256] = { [0 ... 255] = 0xff }; 41static u8 pxm2node[256] = { [0 ... 255] = 0xff };
39 42
40/* Too small nodes confuse the VM badly. Usually they result 43/* Too small nodes confuse the VM badly. Usually they result
@@ -103,6 +106,7 @@ static __init void bad_srat(void)
103 int i; 106 int i;
104 printk(KERN_ERR "SRAT: SRAT not used.\n"); 107 printk(KERN_ERR "SRAT: SRAT not used.\n");
105 acpi_numa = -1; 108 acpi_numa = -1;
109 found_add_area = 0;
106 for (i = 0; i < MAX_LOCAL_APIC; i++) 110 for (i = 0; i < MAX_LOCAL_APIC; i++)
107 apicid_to_node[i] = NUMA_NO_NODE; 111 apicid_to_node[i] = NUMA_NO_NODE;
108 for (i = 0; i < MAX_NUMNODES; i++) 112 for (i = 0; i < MAX_NUMNODES; i++)
@@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
154 int pxm, node; 158 int pxm, node;
155 if (srat_disabled()) 159 if (srat_disabled())
156 return; 160 return;
157 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat(); 161 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
162 bad_srat();
158 return; 163 return;
159 } 164 }
160 if (pa->flags.enabled == 0) 165 if (pa->flags.enabled == 0)
@@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd)
191 allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; 196 allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
192 allowed = (allowed / 100) * hotadd_percent; 197 allowed = (allowed / 100) * hotadd_percent;
193 if (allocated + mem > allowed) { 198 if (allocated + mem > allowed) {
199 unsigned long range;
194 /* Give them at least part of their hotadd memory upto hotadd_percent 200 /* Give them at least part of their hotadd memory upto hotadd_percent
195 It would be better to spread the limit out 201 It would be better to spread the limit out
196 over multiple hotplug areas, but that is too complicated 202 over multiple hotplug areas, but that is too complicated
197 right now */ 203 right now */
198 if (allocated >= allowed) 204 if (allocated >= allowed)
199 return 0; 205 return 0;
200 pages = (allowed - allocated + mem) / sizeof(struct page); 206 range = allowed - allocated;
207 pages = (range / PAGE_SIZE);
201 mem = pages * sizeof(struct page); 208 mem = pages * sizeof(struct page);
202 nd->end = nd->start + pages*PAGE_SIZE; 209 nd->end = nd->start + range;
203 } 210 }
204 /* Not completely fool proof, but a good sanity check */ 211 /* Not completely fool proof, but a good sanity check */
205 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem); 212 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);