aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-05-02 08:18:54 -0400
committerTejun Heo <tj@kernel.org>2011-05-02 08:18:54 -0400
commit7888e96b264fad27f97f58c0f3a4d20326eaf181 (patch)
tree8ef77da9bd9b5491fadd6b01dd7de8fdf9d6eec5 /arch/x86/mm
parent99cca492ea8ced305bfd687521ed69fb9e0147aa (diff)
x86, NUMA: Initialize and use remap allocator from setup_node_bootmem()
setup_node_bootmem() is taken from 64bit and doesn't use remap allocator. It's about to be shared with 32bit so add support for it. If NODE_DATA is remapped, it's noted in the debug message and node locality check is skipped as the __pa() of the remapped address doesn't reflect the actual physical address. On 64bit, remap allocator becomes noop and doesn't affect the behavior. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/numa.c41
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/numa_internal.h6
3 files changed, 34 insertions, 15 deletions
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index b45caa39f7cf..a72317ae74c5 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -197,7 +197,9 @@ static void __init setup_node_bootmem(int nid, u64 start, u64 end)
197 const u64 nd_low = PFN_PHYS(MAX_DMA_PFN); 197 const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
198 const u64 nd_high = PFN_PHYS(max_pfn_mapped); 198 const u64 nd_high = PFN_PHYS(max_pfn_mapped);
199 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 199 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
200 bool remapped = false;
200 u64 nd_pa; 201 u64 nd_pa;
202 void *nd;
201 int tnid; 203 int tnid;
202 204
203 /* 205 /*
@@ -207,34 +209,45 @@ static void __init setup_node_bootmem(int nid, u64 start, u64 end)
207 if (end && (end - start) < NODE_MIN_SIZE) 209 if (end && (end - start) < NODE_MIN_SIZE)
208 return; 210 return;
209 211
212 /* initialize remap allocator before aligning to ZONE_ALIGN */
213 init_alloc_remap(nid, start, end);
214
210 start = roundup(start, ZONE_ALIGN); 215 start = roundup(start, ZONE_ALIGN);
211 216
212 printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", 217 printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
213 nid, start, end); 218 nid, start, end);
214 219
215 /* 220 /*
216 * Try to allocate node data on local node and then fall back to 221 * Allocate node data. Try remap allocator first, node-local
217 * all nodes. Never allocate in DMA zone. 222 * memory and then any node. Never allocate in DMA zone.
218 */ 223 */
219 nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, 224 nd = alloc_remap(nid, nd_size);
225 if (nd) {
226 nd_pa = __pa(nd);
227 remapped = true;
228 } else {
229 nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
220 nd_size, SMP_CACHE_BYTES); 230 nd_size, SMP_CACHE_BYTES);
221 if (nd_pa == MEMBLOCK_ERROR) 231 if (nd_pa == MEMBLOCK_ERROR)
222 nd_pa = memblock_find_in_range(nd_low, nd_high, 232 nd_pa = memblock_find_in_range(nd_low, nd_high,
223 nd_size, SMP_CACHE_BYTES); 233 nd_size, SMP_CACHE_BYTES);
224 if (nd_pa == MEMBLOCK_ERROR) { 234 if (nd_pa == MEMBLOCK_ERROR) {
225 pr_err("Cannot find %zu bytes in node %d\n", nd_size, nid); 235 pr_err("Cannot find %zu bytes in node %d\n",
226 return; 236 nd_size, nid);
237 return;
238 }
239 memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
240 nd = __va(nd_pa);
227 } 241 }
228 memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
229 242
230 /* report and initialize */ 243 /* report and initialize */
231 printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]\n", 244 printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n",
232 nd_pa, nd_pa + nd_size - 1); 245 nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
233 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 246 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
234 if (tnid != nid) 247 if (!remapped && tnid != nid)
235 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); 248 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
236 249
237 node_data[nid] = __va(nd_pa); 250 node_data[nid] = nd;
238 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); 251 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
239 NODE_DATA(nid)->node_id = nid; 252 NODE_DATA(nid)->node_id = nid;
240 NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT; 253 NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 900863204be2..fbd558fe10bc 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -265,7 +265,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
265 * opportunistically and the callers will fall back to other memory 265 * opportunistically and the callers will fall back to other memory
266 * allocation mechanisms on failure. 266 * allocation mechanisms on failure.
267 */ 267 */
268static __init void init_alloc_remap(int nid, u64 start, u64 end) 268void __init init_alloc_remap(int nid, u64 start, u64 end)
269{ 269{
270 unsigned long start_pfn = start >> PAGE_SHIFT; 270 unsigned long start_pfn = start >> PAGE_SHIFT;
271 unsigned long end_pfn = end >> PAGE_SHIFT; 271 unsigned long end_pfn = end >> PAGE_SHIFT;
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
index ad86ec91e640..7178c3afe05e 100644
--- a/arch/x86/mm/numa_internal.h
+++ b/arch/x86/mm/numa_internal.h
@@ -21,6 +21,12 @@ void __init numa_reset_distance(void);
21 21
22void __init x86_numa_init(void); 22void __init x86_numa_init(void);
23 23
24#ifdef CONFIG_X86_64
25static inline void init_alloc_remap(int nid, u64 start, u64 end) { }
26#else
27void __init init_alloc_remap(int nid, u64 start, u64 end);
28#endif
29
24#ifdef CONFIG_NUMA_EMU 30#ifdef CONFIG_NUMA_EMU
25void __init numa_emulation(struct numa_meminfo *numa_meminfo, 31void __init numa_emulation(struct numa_meminfo *numa_meminfo,
26 int numa_dist_cnt); 32 int numa_dist_cnt);