aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm
diff options
context:
space:
mode:
authorMagnus Damm <magnus@valinux.co.jp>2005-11-05 11:25:54 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:55:17 -0500
commitffd10a2b77bca50dd05ba26acd5a6e68bcc8f61f (patch)
tree92bd5c702cde0c6582950ff66f648e59bf5fb2cd /arch/x86_64/mm
parente583538f077d5f70191670b47a046ba436ec3428 (diff)
[PATCH] x86_64: Make node boundaries consistent
The current x86_64 NUMA memory code is inconsequent when it comes to node memory ranges. The exact behaviour varies depending on which config option that is used. setup_node_bootmem() has start and end as arguments and these are used to calculate the size of the node like this: (end - start). This is all fine if end is pointing to the first non-available byte. The problem is that the current x86_64 code sometimes treats it as the last present byte and sometimes as the first non-available byte. The result is that some configurations might lose a page at the end of the range. This patch tries to fix CONFIG_ACPI_NUMA, CONFIG_K8_NUMA and CONFIG_NUMA_EMU so they all treat the end variable as the first non-available byte. This is the same way as the single node code. The patch is boot tested on dual x86_64 hardware with the above configurations, but maybe the removed code is needed as some workaround? Signed-off-by: Magnus Damm <magnus@valinux.co.jp> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r--arch/x86_64/mm/k8topology.c1
-rw-r--r--arch/x86_64/mm/numa.c2
-rw-r--r--arch/x86_64/mm/srat.c4
3 files changed, 1 insertions, 6 deletions
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index 65417b040c1b..a5663e0bb01c 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -108,6 +108,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
108 limit >>= 16; 108 limit >>= 16;
109 limit <<= 24; 109 limit <<= 24;
110 limit |= (1<<24)-1; 110 limit |= (1<<24)-1;
111 limit++;
111 112
112 if (limit > end_pfn << PAGE_SHIFT) 113 if (limit > end_pfn << PAGE_SHIFT)
113 limit = end_pfn << PAGE_SHIFT; 114 limit = end_pfn << PAGE_SHIFT;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index edd5559380d3..629ff0621b3d 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -209,8 +209,6 @@ static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
209 if (i == numa_fake-1) 209 if (i == numa_fake-1)
210 sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start; 210 sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
211 nodes[i].end = nodes[i].start + sz; 211 nodes[i].end = nodes[i].start + sz;
212 if (i != numa_fake-1)
213 nodes[i].end--;
214 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", 212 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n",
215 i, 213 i,
216 nodes[i].start, nodes[i].end, 214 nodes[i].start, nodes[i].end,
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index c7aa08a58041..33340bd1e328 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -71,8 +71,6 @@ static __init void cutoff_node(int i, unsigned long start, unsigned long end)
71 nd->start = nd->end; 71 nd->start = nd->end;
72 } 72 }
73 if (nd->end > end) { 73 if (nd->end > end) {
74 if (!(end & 0xfff))
75 end--;
76 nd->end = end; 74 nd->end = end;
77 if (nd->start > nd->end) 75 if (nd->start > nd->end)
78 nd->start = nd->end; 76 nd->start = nd->end;
@@ -166,8 +164,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
166 if (nd->end < end) 164 if (nd->end < end)
167 nd->end = end; 165 nd->end = end;
168 } 166 }
169 if (!(nd->end & 0xfff))
170 nd->end--;
171 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, 167 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
172 nd->start, nd->end); 168 nd->start, nd->end);
173} 169}