diff options
author | Tejun Heo <tj@kernel.org> | 2011-05-02 08:18:53 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-05-02 08:18:53 -0400 |
commit | a4106eae650a4d5d30fcdd36d998edfa5ccb0ec4 (patch) | |
tree | 29da18fc58ff99a9427d5047936e71dad1ac40dd /arch/x86/mm | |
parent | 299a180aec6a8ee3069cf0fe90d722ac20c1f837 (diff) |
x86, NUMA: Move NUMA init logic from numa_64.c to numa.c
Move the generic 64bit NUMA init machinery from numa_64.c to numa.c.
* node_data[], numa_mem_info and numa_distance
* numa_add_memblk[_to](), numa_remove_memblk[_from]()
* numa_set_distance() and friends
* numa_init() and all the numa_meminfo handling helpers called from it
* dummy_numa_init()
* memory_add_physaddr_to_nid()
A new function x86_numa_init() is added and the content of
numa_64.c::initmem_init() is moved into it. initmem_init() now simply
calls x86_numa_init().
Constants and numa_off declaration are moved from numa_{32|64}.h to
numa.h.
This is code reorganization and doesn't involve any functional change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/numa.c | 523 | ||||
-rw-r--r-- | arch/x86/mm/numa_64.c | 503 | ||||
-rw-r--r-- | arch/x86/mm/numa_internal.h | 2 |
3 files changed, 523 insertions, 505 deletions
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index cce174109ca9..ed1daba54906 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -1,13 +1,42 @@ | |||
1 | /* Common code for 32 and 64-bit NUMA */ | 1 | /* Common code for 32 and 64-bit NUMA */ |
2 | #include <linux/topology.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/module.h> | 3 | #include <linux/mm.h> |
4 | #include <linux/string.h> | ||
5 | #include <linux/init.h> | ||
4 | #include <linux/bootmem.h> | 6 | #include <linux/bootmem.h> |
5 | #include <asm/numa.h> | 7 | #include <linux/memblock.h> |
8 | #include <linux/mmzone.h> | ||
9 | #include <linux/ctype.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/nodemask.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/topology.h> | ||
14 | |||
15 | #include <asm/e820.h> | ||
16 | #include <asm/proto.h> | ||
17 | #include <asm/dma.h> | ||
6 | #include <asm/acpi.h> | 18 | #include <asm/acpi.h> |
19 | #include <asm/amd_nb.h> | ||
20 | |||
21 | #include "numa_internal.h" | ||
7 | 22 | ||
8 | int __initdata numa_off; | 23 | int __initdata numa_off; |
9 | nodemask_t numa_nodes_parsed __initdata; | 24 | nodemask_t numa_nodes_parsed __initdata; |
10 | 25 | ||
26 | #ifdef CONFIG_X86_64 | ||
27 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | ||
28 | EXPORT_SYMBOL(node_data); | ||
29 | |||
30 | static struct numa_meminfo numa_meminfo | ||
31 | #ifndef CONFIG_MEMORY_HOTPLUG | ||
32 | __initdata | ||
33 | #endif | ||
34 | ; | ||
35 | |||
36 | static int numa_distance_cnt; | ||
37 | static u8 *numa_distance; | ||
38 | #endif | ||
39 | |||
11 | static __init int numa_setup(char *opt) | 40 | static __init int numa_setup(char *opt) |
12 | { | 41 | { |
13 | if (!opt) | 42 | if (!opt) |
@@ -105,6 +134,392 @@ void __init setup_node_to_cpumask_map(void) | |||
105 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | 134 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); |
106 | } | 135 | } |
107 | 136 | ||
137 | #ifdef CONFIG_X86_64 | ||
138 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, | ||
139 | struct numa_meminfo *mi) | ||
140 | { | ||
141 | /* ignore zero length blks */ | ||
142 | if (start == end) | ||
143 | return 0; | ||
144 | |||
145 | /* whine about and ignore invalid blks */ | ||
146 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | ||
147 | pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", | ||
148 | nid, start, end); | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | ||
153 | pr_err("NUMA: too many memblk ranges\n"); | ||
154 | return -EINVAL; | ||
155 | } | ||
156 | |||
157 | mi->blk[mi->nr_blks].start = start; | ||
158 | mi->blk[mi->nr_blks].end = end; | ||
159 | mi->blk[mi->nr_blks].nid = nid; | ||
160 | mi->nr_blks++; | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | ||
166 | * @idx: Index of memblk to remove | ||
167 | * @mi: numa_meminfo to remove memblk from | ||
168 | * | ||
169 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | ||
170 | * decrementing @mi->nr_blks. | ||
171 | */ | ||
172 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | ||
173 | { | ||
174 | mi->nr_blks--; | ||
175 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | ||
176 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | ||
181 | * @nid: NUMA node ID of the new memblk | ||
182 | * @start: Start address of the new memblk | ||
183 | * @end: End address of the new memblk | ||
184 | * | ||
185 | * Add a new memblk to the default numa_meminfo. | ||
186 | * | ||
187 | * RETURNS: | ||
188 | * 0 on success, -errno on failure. | ||
189 | */ | ||
190 | int __init numa_add_memblk(int nid, u64 start, u64 end) | ||
191 | { | ||
192 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | ||
193 | } | ||
194 | |||
195 | /* Initialize bootmem allocator for a node */ | ||
196 | static void __init | ||
197 | setup_node_bootmem(int nid, unsigned long start, unsigned long end) | ||
198 | { | ||
199 | const u64 nd_low = (u64)MAX_DMA_PFN << PAGE_SHIFT; | ||
200 | const u64 nd_high = (u64)max_pfn_mapped << PAGE_SHIFT; | ||
201 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | ||
202 | unsigned long nd_pa; | ||
203 | int tnid; | ||
204 | |||
205 | /* | ||
206 | * Don't confuse VM with a node that doesn't have the | ||
207 | * minimum amount of memory: | ||
208 | */ | ||
209 | if (end && (end - start) < NODE_MIN_SIZE) | ||
210 | return; | ||
211 | |||
212 | start = roundup(start, ZONE_ALIGN); | ||
213 | |||
214 | printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", | ||
215 | nid, start, end); | ||
216 | |||
217 | /* | ||
218 | * Try to allocate node data on local node and then fall back to | ||
219 | * all nodes. Never allocate in DMA zone. | ||
220 | */ | ||
221 | nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, | ||
222 | nd_size, SMP_CACHE_BYTES); | ||
223 | if (nd_pa == MEMBLOCK_ERROR) | ||
224 | nd_pa = memblock_find_in_range(nd_low, nd_high, | ||
225 | nd_size, SMP_CACHE_BYTES); | ||
226 | if (nd_pa == MEMBLOCK_ERROR) { | ||
227 | pr_err("Cannot find %lu bytes in node %d\n", nd_size, nid); | ||
228 | return; | ||
229 | } | ||
230 | memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); | ||
231 | |||
232 | /* report and initialize */ | ||
233 | printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", | ||
234 | nd_pa, nd_pa + nd_size - 1); | ||
235 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); | ||
236 | if (tnid != nid) | ||
237 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); | ||
238 | |||
239 | node_data[nid] = __va(nd_pa); | ||
240 | memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); | ||
241 | NODE_DATA(nid)->node_id = nid; | ||
242 | NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT; | ||
243 | NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT; | ||
244 | |||
245 | node_set_online(nid); | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * numa_cleanup_meminfo - Cleanup a numa_meminfo | ||
250 | * @mi: numa_meminfo to clean up | ||
251 | * | ||
252 | * Sanitize @mi by merging and removing unncessary memblks. Also check for | ||
253 | * conflicts and clear unused memblks. | ||
254 | * | ||
255 | * RETURNS: | ||
256 | * 0 on success, -errno on failure. | ||
257 | */ | ||
258 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | ||
259 | { | ||
260 | const u64 low = 0; | ||
261 | const u64 high = (u64)max_pfn << PAGE_SHIFT; | ||
262 | int i, j, k; | ||
263 | |||
264 | for (i = 0; i < mi->nr_blks; i++) { | ||
265 | struct numa_memblk *bi = &mi->blk[i]; | ||
266 | |||
267 | /* make sure all blocks are inside the limits */ | ||
268 | bi->start = max(bi->start, low); | ||
269 | bi->end = min(bi->end, high); | ||
270 | |||
271 | /* and there's no empty block */ | ||
272 | if (bi->start >= bi->end) { | ||
273 | numa_remove_memblk_from(i--, mi); | ||
274 | continue; | ||
275 | } | ||
276 | |||
277 | for (j = i + 1; j < mi->nr_blks; j++) { | ||
278 | struct numa_memblk *bj = &mi->blk[j]; | ||
279 | unsigned long start, end; | ||
280 | |||
281 | /* | ||
282 | * See whether there are overlapping blocks. Whine | ||
283 | * about but allow overlaps of the same nid. They | ||
284 | * will be merged below. | ||
285 | */ | ||
286 | if (bi->end > bj->start && bi->start < bj->end) { | ||
287 | if (bi->nid != bj->nid) { | ||
288 | pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", | ||
289 | bi->nid, bi->start, bi->end, | ||
290 | bj->nid, bj->start, bj->end); | ||
291 | return -EINVAL; | ||
292 | } | ||
293 | pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", | ||
294 | bi->nid, bi->start, bi->end, | ||
295 | bj->start, bj->end); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Join together blocks on the same node, holes | ||
300 | * between which don't overlap with memory on other | ||
301 | * nodes. | ||
302 | */ | ||
303 | if (bi->nid != bj->nid) | ||
304 | continue; | ||
305 | start = max(min(bi->start, bj->start), low); | ||
306 | end = min(max(bi->end, bj->end), high); | ||
307 | for (k = 0; k < mi->nr_blks; k++) { | ||
308 | struct numa_memblk *bk = &mi->blk[k]; | ||
309 | |||
310 | if (bi->nid == bk->nid) | ||
311 | continue; | ||
312 | if (start < bk->end && end > bk->start) | ||
313 | break; | ||
314 | } | ||
315 | if (k < mi->nr_blks) | ||
316 | continue; | ||
317 | printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", | ||
318 | bi->nid, bi->start, bi->end, bj->start, bj->end, | ||
319 | start, end); | ||
320 | bi->start = start; | ||
321 | bi->end = end; | ||
322 | numa_remove_memblk_from(j--, mi); | ||
323 | } | ||
324 | } | ||
325 | |||
326 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { | ||
327 | mi->blk[i].start = mi->blk[i].end = 0; | ||
328 | mi->blk[i].nid = NUMA_NO_NODE; | ||
329 | } | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | /* | ||
335 | * Set nodes, which have memory in @mi, in *@nodemask. | ||
336 | */ | ||
337 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, | ||
338 | const struct numa_meminfo *mi) | ||
339 | { | ||
340 | int i; | ||
341 | |||
342 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) | ||
343 | if (mi->blk[i].start != mi->blk[i].end && | ||
344 | mi->blk[i].nid != NUMA_NO_NODE) | ||
345 | node_set(mi->blk[i].nid, *nodemask); | ||
346 | } | ||
347 | |||
348 | /** | ||
349 | * numa_reset_distance - Reset NUMA distance table | ||
350 | * | ||
351 | * The current table is freed. The next numa_set_distance() call will | ||
352 | * create a new one. | ||
353 | */ | ||
354 | void __init numa_reset_distance(void) | ||
355 | { | ||
356 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | ||
357 | |||
358 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | ||
359 | if (numa_distance_cnt) | ||
360 | memblock_x86_free_range(__pa(numa_distance), | ||
361 | __pa(numa_distance) + size); | ||
362 | numa_distance_cnt = 0; | ||
363 | numa_distance = NULL; /* enable table creation */ | ||
364 | } | ||
365 | |||
366 | static int __init numa_alloc_distance(void) | ||
367 | { | ||
368 | nodemask_t nodes_parsed; | ||
369 | size_t size; | ||
370 | int i, j, cnt = 0; | ||
371 | u64 phys; | ||
372 | |||
373 | /* size the new table and allocate it */ | ||
374 | nodes_parsed = numa_nodes_parsed; | ||
375 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); | ||
376 | |||
377 | for_each_node_mask(i, nodes_parsed) | ||
378 | cnt = i; | ||
379 | cnt++; | ||
380 | size = cnt * cnt * sizeof(numa_distance[0]); | ||
381 | |||
382 | phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, | ||
383 | size, PAGE_SIZE); | ||
384 | if (phys == MEMBLOCK_ERROR) { | ||
385 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); | ||
386 | /* don't retry until explicitly reset */ | ||
387 | numa_distance = (void *)1LU; | ||
388 | return -ENOMEM; | ||
389 | } | ||
390 | memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); | ||
391 | |||
392 | numa_distance = __va(phys); | ||
393 | numa_distance_cnt = cnt; | ||
394 | |||
395 | /* fill with the default distances */ | ||
396 | for (i = 0; i < cnt; i++) | ||
397 | for (j = 0; j < cnt; j++) | ||
398 | numa_distance[i * cnt + j] = i == j ? | ||
399 | LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
400 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); | ||
401 | |||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * numa_set_distance - Set NUMA distance from one NUMA to another | ||
407 | * @from: the 'from' node to set distance | ||
408 | * @to: the 'to' node to set distance | ||
409 | * @distance: NUMA distance | ||
410 | * | ||
411 | * Set the distance from node @from to @to to @distance. If distance table | ||
412 | * doesn't exist, one which is large enough to accommodate all the currently | ||
413 | * known nodes will be created. | ||
414 | * | ||
415 | * If such table cannot be allocated, a warning is printed and further | ||
416 | * calls are ignored until the distance table is reset with | ||
417 | * numa_reset_distance(). | ||
418 | * | ||
419 | * If @from or @to is higher than the highest known node at the time of | ||
420 | * table creation or @distance doesn't make sense, the call is ignored. | ||
421 | * This is to allow simplification of specific NUMA config implementations. | ||
422 | */ | ||
423 | void __init numa_set_distance(int from, int to, int distance) | ||
424 | { | ||
425 | if (!numa_distance && numa_alloc_distance() < 0) | ||
426 | return; | ||
427 | |||
428 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) { | ||
429 | printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", | ||
430 | from, to, distance); | ||
431 | return; | ||
432 | } | ||
433 | |||
434 | if ((u8)distance != distance || | ||
435 | (from == to && distance != LOCAL_DISTANCE)) { | ||
436 | pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", | ||
437 | from, to, distance); | ||
438 | return; | ||
439 | } | ||
440 | |||
441 | numa_distance[from * numa_distance_cnt + to] = distance; | ||
442 | } | ||
443 | |||
444 | int __node_distance(int from, int to) | ||
445 | { | ||
446 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) | ||
447 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
448 | return numa_distance[from * numa_distance_cnt + to]; | ||
449 | } | ||
450 | EXPORT_SYMBOL(__node_distance); | ||
451 | |||
452 | /* | ||
453 | * Sanity check to catch more bad NUMA configurations (they are amazingly | ||
454 | * common). Make sure the nodes cover all memory. | ||
455 | */ | ||
456 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | ||
457 | { | ||
458 | unsigned long numaram, e820ram; | ||
459 | int i; | ||
460 | |||
461 | numaram = 0; | ||
462 | for (i = 0; i < mi->nr_blks; i++) { | ||
463 | unsigned long s = mi->blk[i].start >> PAGE_SHIFT; | ||
464 | unsigned long e = mi->blk[i].end >> PAGE_SHIFT; | ||
465 | numaram += e - s; | ||
466 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | ||
467 | if ((long)numaram < 0) | ||
468 | numaram = 0; | ||
469 | } | ||
470 | |||
471 | e820ram = max_pfn - (memblock_x86_hole_size(0, | ||
472 | max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); | ||
473 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | ||
474 | if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { | ||
475 | printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", | ||
476 | (numaram << PAGE_SHIFT) >> 20, | ||
477 | (e820ram << PAGE_SHIFT) >> 20); | ||
478 | return false; | ||
479 | } | ||
480 | return true; | ||
481 | } | ||
482 | |||
483 | static int __init numa_register_memblks(struct numa_meminfo *mi) | ||
484 | { | ||
485 | int i, nid; | ||
486 | |||
487 | /* Account for nodes with cpus and no memory */ | ||
488 | node_possible_map = numa_nodes_parsed; | ||
489 | numa_nodemask_from_meminfo(&node_possible_map, mi); | ||
490 | if (WARN_ON(nodes_empty(node_possible_map))) | ||
491 | return -EINVAL; | ||
492 | |||
493 | for (i = 0; i < mi->nr_blks; i++) | ||
494 | memblock_x86_register_active_regions(mi->blk[i].nid, | ||
495 | mi->blk[i].start >> PAGE_SHIFT, | ||
496 | mi->blk[i].end >> PAGE_SHIFT); | ||
497 | |||
498 | /* for out of order entries */ | ||
499 | sort_node_map(); | ||
500 | if (!numa_meminfo_cover_memory(mi)) | ||
501 | return -EINVAL; | ||
502 | |||
503 | /* Finally register nodes. */ | ||
504 | for_each_node_mask(nid, node_possible_map) { | ||
505 | u64 start = (u64)max_pfn << PAGE_SHIFT; | ||
506 | u64 end = 0; | ||
507 | |||
508 | for (i = 0; i < mi->nr_blks; i++) { | ||
509 | if (nid != mi->blk[i].nid) | ||
510 | continue; | ||
511 | start = min(mi->blk[i].start, start); | ||
512 | end = max(mi->blk[i].end, end); | ||
513 | } | ||
514 | |||
515 | if (start < end) | ||
516 | setup_node_bootmem(nid, start, end); | ||
517 | } | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | #endif | ||
522 | |||
108 | /* | 523 | /* |
109 | * There are unfortunately some poorly designed mainboards around that | 524 | * There are unfortunately some poorly designed mainboards around that |
110 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | 525 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node |
@@ -127,6 +542,93 @@ void __init numa_init_array(void) | |||
127 | } | 542 | } |
128 | } | 543 | } |
129 | 544 | ||
545 | #ifdef CONFIG_X86_64 | ||
546 | static int __init numa_init(int (*init_func)(void)) | ||
547 | { | ||
548 | int i; | ||
549 | int ret; | ||
550 | |||
551 | for (i = 0; i < MAX_LOCAL_APIC; i++) | ||
552 | set_apicid_to_node(i, NUMA_NO_NODE); | ||
553 | |||
554 | nodes_clear(numa_nodes_parsed); | ||
555 | nodes_clear(node_possible_map); | ||
556 | nodes_clear(node_online_map); | ||
557 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | ||
558 | remove_all_active_ranges(); | ||
559 | numa_reset_distance(); | ||
560 | |||
561 | ret = init_func(); | ||
562 | if (ret < 0) | ||
563 | return ret; | ||
564 | ret = numa_cleanup_meminfo(&numa_meminfo); | ||
565 | if (ret < 0) | ||
566 | return ret; | ||
567 | |||
568 | numa_emulation(&numa_meminfo, numa_distance_cnt); | ||
569 | |||
570 | ret = numa_register_memblks(&numa_meminfo); | ||
571 | if (ret < 0) | ||
572 | return ret; | ||
573 | |||
574 | for (i = 0; i < nr_cpu_ids; i++) { | ||
575 | int nid = early_cpu_to_node(i); | ||
576 | |||
577 | if (nid == NUMA_NO_NODE) | ||
578 | continue; | ||
579 | if (!node_online(nid)) | ||
580 | numa_clear_node(i); | ||
581 | } | ||
582 | numa_init_array(); | ||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | /** | ||
587 | * dummy_numa_init - Fallback dummy NUMA init | ||
588 | * | ||
589 | * Used if there's no underlying NUMA architecture, NUMA initialization | ||
590 | * fails, or NUMA is disabled on the command line. | ||
591 | * | ||
592 | * Must online at least one node and add memory blocks that cover all | ||
593 | * allowed memory. This function must not fail. | ||
594 | */ | ||
595 | static int __init dummy_numa_init(void) | ||
596 | { | ||
597 | printk(KERN_INFO "%s\n", | ||
598 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | ||
599 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", | ||
600 | 0LU, max_pfn << PAGE_SHIFT); | ||
601 | |||
602 | node_set(0, numa_nodes_parsed); | ||
603 | numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | /** | ||
609 | * x86_numa_init - Initialize NUMA | ||
610 | * | ||
611 | * Try each configured NUMA initialization method until one succeeds. The | ||
612 | * last fallback is dummy single node config encomapssing whole memory and | ||
613 | * never fails. | ||
614 | */ | ||
615 | void __init x86_numa_init(void) | ||
616 | { | ||
617 | if (!numa_off) { | ||
618 | #ifdef CONFIG_ACPI_NUMA | ||
619 | if (!numa_init(x86_acpi_numa_init)) | ||
620 | return; | ||
621 | #endif | ||
622 | #ifdef CONFIG_AMD_NUMA | ||
623 | if (!numa_init(amd_numa_init)) | ||
624 | return; | ||
625 | #endif | ||
626 | } | ||
627 | |||
628 | numa_init(dummy_numa_init); | ||
629 | } | ||
630 | #endif | ||
631 | |||
130 | static __init int find_near_online_node(int node) | 632 | static __init int find_near_online_node(int node) |
131 | { | 633 | { |
132 | int n, val; | 634 | int n, val; |
@@ -292,3 +794,18 @@ const struct cpumask *cpumask_of_node(int node) | |||
292 | EXPORT_SYMBOL(cpumask_of_node); | 794 | EXPORT_SYMBOL(cpumask_of_node); |
293 | 795 | ||
294 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 796 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
797 | |||
798 | #if defined(CONFIG_X86_64) && defined(CONFIG_MEMORY_HOTPLUG) | ||
799 | int memory_add_physaddr_to_nid(u64 start) | ||
800 | { | ||
801 | struct numa_meminfo *mi = &numa_meminfo; | ||
802 | int nid = mi->blk[0].nid; | ||
803 | int i; | ||
804 | |||
805 | for (i = 0; i < mi->nr_blks; i++) | ||
806 | if (mi->blk[i].start <= start && mi->blk[i].end > start) | ||
807 | nid = mi->blk[i].nid; | ||
808 | return nid; | ||
809 | } | ||
810 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | ||
811 | #endif | ||
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 70bd8221f928..dd27f401f0a0 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -2,499 +2,13 @@ | |||
2 | * Generic VM initialization for x86-64 NUMA setups. | 2 | * Generic VM initialization for x86-64 NUMA setups. |
3 | * Copyright 2002,2003 Andi Kleen, SuSE Labs. | 3 | * Copyright 2002,2003 Andi Kleen, SuSE Labs. |
4 | */ | 4 | */ |
5 | #include <linux/kernel.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/bootmem.h> | 5 | #include <linux/bootmem.h> |
10 | #include <linux/memblock.h> | ||
11 | #include <linux/mmzone.h> | ||
12 | #include <linux/ctype.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/nodemask.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/acpi.h> | ||
17 | |||
18 | #include <asm/e820.h> | ||
19 | #include <asm/proto.h> | ||
20 | #include <asm/dma.h> | ||
21 | #include <asm/acpi.h> | ||
22 | #include <asm/amd_nb.h> | ||
23 | 6 | ||
24 | #include "numa_internal.h" | 7 | #include "numa_internal.h" |
25 | 8 | ||
26 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | ||
27 | EXPORT_SYMBOL(node_data); | ||
28 | |||
29 | static struct numa_meminfo numa_meminfo | ||
30 | #ifndef CONFIG_MEMORY_HOTPLUG | ||
31 | __initdata | ||
32 | #endif | ||
33 | ; | ||
34 | |||
35 | static int numa_distance_cnt; | ||
36 | static u8 *numa_distance; | ||
37 | |||
38 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, | ||
39 | struct numa_meminfo *mi) | ||
40 | { | ||
41 | /* ignore zero length blks */ | ||
42 | if (start == end) | ||
43 | return 0; | ||
44 | |||
45 | /* whine about and ignore invalid blks */ | ||
46 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | ||
47 | pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", | ||
48 | nid, start, end); | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | ||
53 | pr_err("NUMA: too many memblk ranges\n"); | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | |||
57 | mi->blk[mi->nr_blks].start = start; | ||
58 | mi->blk[mi->nr_blks].end = end; | ||
59 | mi->blk[mi->nr_blks].nid = nid; | ||
60 | mi->nr_blks++; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | ||
66 | * @idx: Index of memblk to remove | ||
67 | * @mi: numa_meminfo to remove memblk from | ||
68 | * | ||
69 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | ||
70 | * decrementing @mi->nr_blks. | ||
71 | */ | ||
72 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | ||
73 | { | ||
74 | mi->nr_blks--; | ||
75 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | ||
76 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | ||
81 | * @nid: NUMA node ID of the new memblk | ||
82 | * @start: Start address of the new memblk | ||
83 | * @end: End address of the new memblk | ||
84 | * | ||
85 | * Add a new memblk to the default numa_meminfo. | ||
86 | * | ||
87 | * RETURNS: | ||
88 | * 0 on success, -errno on failure. | ||
89 | */ | ||
90 | int __init numa_add_memblk(int nid, u64 start, u64 end) | ||
91 | { | ||
92 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | ||
93 | } | ||
94 | |||
95 | /* Initialize bootmem allocator for a node */ | ||
96 | static void __init | ||
97 | setup_node_bootmem(int nid, unsigned long start, unsigned long end) | ||
98 | { | ||
99 | const u64 nd_low = (u64)MAX_DMA_PFN << PAGE_SHIFT; | ||
100 | const u64 nd_high = (u64)max_pfn_mapped << PAGE_SHIFT; | ||
101 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | ||
102 | unsigned long nd_pa; | ||
103 | int tnid; | ||
104 | |||
105 | /* | ||
106 | * Don't confuse VM with a node that doesn't have the | ||
107 | * minimum amount of memory: | ||
108 | */ | ||
109 | if (end && (end - start) < NODE_MIN_SIZE) | ||
110 | return; | ||
111 | |||
112 | start = roundup(start, ZONE_ALIGN); | ||
113 | |||
114 | printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", | ||
115 | nid, start, end); | ||
116 | |||
117 | /* | ||
118 | * Try to allocate node data on local node and then fall back to | ||
119 | * all nodes. Never allocate in DMA zone. | ||
120 | */ | ||
121 | nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, | ||
122 | nd_size, SMP_CACHE_BYTES); | ||
123 | if (nd_pa == MEMBLOCK_ERROR) | ||
124 | nd_pa = memblock_find_in_range(nd_low, nd_high, | ||
125 | nd_size, SMP_CACHE_BYTES); | ||
126 | if (nd_pa == MEMBLOCK_ERROR) { | ||
127 | pr_err("Cannot find %lu bytes in node %d\n", nd_size, nid); | ||
128 | return; | ||
129 | } | ||
130 | memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); | ||
131 | |||
132 | /* report and initialize */ | ||
133 | printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", | ||
134 | nd_pa, nd_pa + nd_size - 1); | ||
135 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); | ||
136 | if (tnid != nid) | ||
137 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); | ||
138 | |||
139 | node_data[nid] = __va(nd_pa); | ||
140 | memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); | ||
141 | NODE_DATA(nid)->node_id = nid; | ||
142 | NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT; | ||
143 | NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT; | ||
144 | |||
145 | node_set_online(nid); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * numa_cleanup_meminfo - Cleanup a numa_meminfo | ||
150 | * @mi: numa_meminfo to clean up | ||
151 | * | ||
152 | * Sanitize @mi by merging and removing unncessary memblks. Also check for | ||
153 | * conflicts and clear unused memblks. | ||
154 | * | ||
155 | * RETURNS: | ||
156 | * 0 on success, -errno on failure. | ||
157 | */ | ||
158 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | ||
159 | { | ||
160 | const u64 low = 0; | ||
161 | const u64 high = (u64)max_pfn << PAGE_SHIFT; | ||
162 | int i, j, k; | ||
163 | |||
164 | for (i = 0; i < mi->nr_blks; i++) { | ||
165 | struct numa_memblk *bi = &mi->blk[i]; | ||
166 | |||
167 | /* make sure all blocks are inside the limits */ | ||
168 | bi->start = max(bi->start, low); | ||
169 | bi->end = min(bi->end, high); | ||
170 | |||
171 | /* and there's no empty block */ | ||
172 | if (bi->start >= bi->end) { | ||
173 | numa_remove_memblk_from(i--, mi); | ||
174 | continue; | ||
175 | } | ||
176 | |||
177 | for (j = i + 1; j < mi->nr_blks; j++) { | ||
178 | struct numa_memblk *bj = &mi->blk[j]; | ||
179 | unsigned long start, end; | ||
180 | |||
181 | /* | ||
182 | * See whether there are overlapping blocks. Whine | ||
183 | * about but allow overlaps of the same nid. They | ||
184 | * will be merged below. | ||
185 | */ | ||
186 | if (bi->end > bj->start && bi->start < bj->end) { | ||
187 | if (bi->nid != bj->nid) { | ||
188 | pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", | ||
189 | bi->nid, bi->start, bi->end, | ||
190 | bj->nid, bj->start, bj->end); | ||
191 | return -EINVAL; | ||
192 | } | ||
193 | pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", | ||
194 | bi->nid, bi->start, bi->end, | ||
195 | bj->start, bj->end); | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Join together blocks on the same node, holes | ||
200 | * between which don't overlap with memory on other | ||
201 | * nodes. | ||
202 | */ | ||
203 | if (bi->nid != bj->nid) | ||
204 | continue; | ||
205 | start = max(min(bi->start, bj->start), low); | ||
206 | end = min(max(bi->end, bj->end), high); | ||
207 | for (k = 0; k < mi->nr_blks; k++) { | ||
208 | struct numa_memblk *bk = &mi->blk[k]; | ||
209 | |||
210 | if (bi->nid == bk->nid) | ||
211 | continue; | ||
212 | if (start < bk->end && end > bk->start) | ||
213 | break; | ||
214 | } | ||
215 | if (k < mi->nr_blks) | ||
216 | continue; | ||
217 | printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", | ||
218 | bi->nid, bi->start, bi->end, bj->start, bj->end, | ||
219 | start, end); | ||
220 | bi->start = start; | ||
221 | bi->end = end; | ||
222 | numa_remove_memblk_from(j--, mi); | ||
223 | } | ||
224 | } | ||
225 | |||
226 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { | ||
227 | mi->blk[i].start = mi->blk[i].end = 0; | ||
228 | mi->blk[i].nid = NUMA_NO_NODE; | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * Set nodes, which have memory in @mi, in *@nodemask. | ||
236 | */ | ||
237 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, | ||
238 | const struct numa_meminfo *mi) | ||
239 | { | ||
240 | int i; | ||
241 | |||
242 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) | ||
243 | if (mi->blk[i].start != mi->blk[i].end && | ||
244 | mi->blk[i].nid != NUMA_NO_NODE) | ||
245 | node_set(mi->blk[i].nid, *nodemask); | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * numa_reset_distance - Reset NUMA distance table | ||
250 | * | ||
251 | * The current table is freed. The next numa_set_distance() call will | ||
252 | * create a new one. | ||
253 | */ | ||
254 | void __init numa_reset_distance(void) | ||
255 | { | ||
256 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | ||
257 | |||
258 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | ||
259 | if (numa_distance_cnt) | ||
260 | memblock_x86_free_range(__pa(numa_distance), | ||
261 | __pa(numa_distance) + size); | ||
262 | numa_distance_cnt = 0; | ||
263 | numa_distance = NULL; /* enable table creation */ | ||
264 | } | ||
265 | |||
266 | static int __init numa_alloc_distance(void) | ||
267 | { | ||
268 | nodemask_t nodes_parsed; | ||
269 | size_t size; | ||
270 | int i, j, cnt = 0; | ||
271 | u64 phys; | ||
272 | |||
273 | /* size the new table and allocate it */ | ||
274 | nodes_parsed = numa_nodes_parsed; | ||
275 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); | ||
276 | |||
277 | for_each_node_mask(i, nodes_parsed) | ||
278 | cnt = i; | ||
279 | cnt++; | ||
280 | size = cnt * cnt * sizeof(numa_distance[0]); | ||
281 | |||
282 | phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, | ||
283 | size, PAGE_SIZE); | ||
284 | if (phys == MEMBLOCK_ERROR) { | ||
285 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); | ||
286 | /* don't retry until explicitly reset */ | ||
287 | numa_distance = (void *)1LU; | ||
288 | return -ENOMEM; | ||
289 | } | ||
290 | memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); | ||
291 | |||
292 | numa_distance = __va(phys); | ||
293 | numa_distance_cnt = cnt; | ||
294 | |||
295 | /* fill with the default distances */ | ||
296 | for (i = 0; i < cnt; i++) | ||
297 | for (j = 0; j < cnt; j++) | ||
298 | numa_distance[i * cnt + j] = i == j ? | ||
299 | LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
300 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * numa_set_distance - Set NUMA distance from one NUMA to another | ||
307 | * @from: the 'from' node to set distance | ||
308 | * @to: the 'to' node to set distance | ||
309 | * @distance: NUMA distance | ||
310 | * | ||
311 | * Set the distance from node @from to @to to @distance. If distance table | ||
312 | * doesn't exist, one which is large enough to accommodate all the currently | ||
313 | * known nodes will be created. | ||
314 | * | ||
315 | * If such table cannot be allocated, a warning is printed and further | ||
316 | * calls are ignored until the distance table is reset with | ||
317 | * numa_reset_distance(). | ||
318 | * | ||
319 | * If @from or @to is higher than the highest known node at the time of | ||
320 | * table creation or @distance doesn't make sense, the call is ignored. | ||
321 | * This is to allow simplification of specific NUMA config implementations. | ||
322 | */ | ||
323 | void __init numa_set_distance(int from, int to, int distance) | ||
324 | { | ||
325 | if (!numa_distance && numa_alloc_distance() < 0) | ||
326 | return; | ||
327 | |||
328 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) { | ||
329 | printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", | ||
330 | from, to, distance); | ||
331 | return; | ||
332 | } | ||
333 | |||
334 | if ((u8)distance != distance || | ||
335 | (from == to && distance != LOCAL_DISTANCE)) { | ||
336 | pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", | ||
337 | from, to, distance); | ||
338 | return; | ||
339 | } | ||
340 | |||
341 | numa_distance[from * numa_distance_cnt + to] = distance; | ||
342 | } | ||
343 | |||
344 | int __node_distance(int from, int to) | ||
345 | { | ||
346 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) | ||
347 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; | ||
348 | return numa_distance[from * numa_distance_cnt + to]; | ||
349 | } | ||
350 | EXPORT_SYMBOL(__node_distance); | ||
351 | |||
352 | /* | ||
353 | * Sanity check to catch more bad NUMA configurations (they are amazingly | ||
354 | * common). Make sure the nodes cover all memory. | ||
355 | */ | ||
356 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | ||
357 | { | ||
358 | unsigned long numaram, e820ram; | ||
359 | int i; | ||
360 | |||
361 | numaram = 0; | ||
362 | for (i = 0; i < mi->nr_blks; i++) { | ||
363 | unsigned long s = mi->blk[i].start >> PAGE_SHIFT; | ||
364 | unsigned long e = mi->blk[i].end >> PAGE_SHIFT; | ||
365 | numaram += e - s; | ||
366 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | ||
367 | if ((long)numaram < 0) | ||
368 | numaram = 0; | ||
369 | } | ||
370 | |||
371 | e820ram = max_pfn - (memblock_x86_hole_size(0, | ||
372 | max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); | ||
373 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | ||
374 | if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { | ||
375 | printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", | ||
376 | (numaram << PAGE_SHIFT) >> 20, | ||
377 | (e820ram << PAGE_SHIFT) >> 20); | ||
378 | return false; | ||
379 | } | ||
380 | return true; | ||
381 | } | ||
382 | |||
383 | static int __init numa_register_memblks(struct numa_meminfo *mi) | ||
384 | { | ||
385 | int i, nid; | ||
386 | |||
387 | /* Account for nodes with cpus and no memory */ | ||
388 | node_possible_map = numa_nodes_parsed; | ||
389 | numa_nodemask_from_meminfo(&node_possible_map, mi); | ||
390 | if (WARN_ON(nodes_empty(node_possible_map))) | ||
391 | return -EINVAL; | ||
392 | |||
393 | for (i = 0; i < mi->nr_blks; i++) | ||
394 | memblock_x86_register_active_regions(mi->blk[i].nid, | ||
395 | mi->blk[i].start >> PAGE_SHIFT, | ||
396 | mi->blk[i].end >> PAGE_SHIFT); | ||
397 | |||
398 | /* for out of order entries */ | ||
399 | sort_node_map(); | ||
400 | if (!numa_meminfo_cover_memory(mi)) | ||
401 | return -EINVAL; | ||
402 | |||
403 | /* Finally register nodes. */ | ||
404 | for_each_node_mask(nid, node_possible_map) { | ||
405 | u64 start = (u64)max_pfn << PAGE_SHIFT; | ||
406 | u64 end = 0; | ||
407 | |||
408 | for (i = 0; i < mi->nr_blks; i++) { | ||
409 | if (nid != mi->blk[i].nid) | ||
410 | continue; | ||
411 | start = min(mi->blk[i].start, start); | ||
412 | end = max(mi->blk[i].end, end); | ||
413 | } | ||
414 | |||
415 | if (start < end) | ||
416 | setup_node_bootmem(nid, start, end); | ||
417 | } | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * dummy_numma_init - Fallback dummy NUMA init | ||
424 | * | ||
425 | * Used if there's no underlying NUMA architecture, NUMA initialization | ||
426 | * fails, or NUMA is disabled on the command line. | ||
427 | * | ||
428 | * Must online at least one node and add memory blocks that cover all | ||
429 | * allowed memory. This function must not fail. | ||
430 | */ | ||
431 | static int __init dummy_numa_init(void) | ||
432 | { | ||
433 | printk(KERN_INFO "%s\n", | ||
434 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | ||
435 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", | ||
436 | 0LU, max_pfn << PAGE_SHIFT); | ||
437 | |||
438 | node_set(0, numa_nodes_parsed); | ||
439 | numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static int __init numa_init(int (*init_func)(void)) | ||
445 | { | ||
446 | int i; | ||
447 | int ret; | ||
448 | |||
449 | for (i = 0; i < MAX_LOCAL_APIC; i++) | ||
450 | set_apicid_to_node(i, NUMA_NO_NODE); | ||
451 | |||
452 | nodes_clear(numa_nodes_parsed); | ||
453 | nodes_clear(node_possible_map); | ||
454 | nodes_clear(node_online_map); | ||
455 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | ||
456 | remove_all_active_ranges(); | ||
457 | numa_reset_distance(); | ||
458 | |||
459 | ret = init_func(); | ||
460 | if (ret < 0) | ||
461 | return ret; | ||
462 | ret = numa_cleanup_meminfo(&numa_meminfo); | ||
463 | if (ret < 0) | ||
464 | return ret; | ||
465 | |||
466 | numa_emulation(&numa_meminfo, numa_distance_cnt); | ||
467 | |||
468 | ret = numa_register_memblks(&numa_meminfo); | ||
469 | if (ret < 0) | ||
470 | return ret; | ||
471 | |||
472 | for (i = 0; i < nr_cpu_ids; i++) { | ||
473 | int nid = early_cpu_to_node(i); | ||
474 | |||
475 | if (nid == NUMA_NO_NODE) | ||
476 | continue; | ||
477 | if (!node_online(nid)) | ||
478 | numa_clear_node(i); | ||
479 | } | ||
480 | numa_init_array(); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | void __init initmem_init(void) | 9 | void __init initmem_init(void) |
485 | { | 10 | { |
486 | if (!numa_off) { | 11 | x86_numa_init(); |
487 | #ifdef CONFIG_ACPI_NUMA | ||
488 | if (!numa_init(x86_acpi_numa_init)) | ||
489 | return; | ||
490 | #endif | ||
491 | #ifdef CONFIG_AMD_NUMA | ||
492 | if (!numa_init(amd_numa_init)) | ||
493 | return; | ||
494 | #endif | ||
495 | } | ||
496 | |||
497 | numa_init(dummy_numa_init); | ||
498 | } | 12 | } |
499 | 13 | ||
500 | unsigned long __init numa_free_all_bootmem(void) | 14 | unsigned long __init numa_free_all_bootmem(void) |
@@ -509,18 +23,3 @@ unsigned long __init numa_free_all_bootmem(void) | |||
509 | 23 | ||
510 | return pages; | 24 | return pages; |
511 | } | 25 | } |
512 | |||
513 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
514 | int memory_add_physaddr_to_nid(u64 start) | ||
515 | { | ||
516 | struct numa_meminfo *mi = &numa_meminfo; | ||
517 | int nid = mi->blk[0].nid; | ||
518 | int i; | ||
519 | |||
520 | for (i = 0; i < mi->nr_blks; i++) | ||
521 | if (mi->blk[i].start <= start && mi->blk[i].end > start) | ||
522 | nid = mi->blk[i].nid; | ||
523 | return nid; | ||
524 | } | ||
525 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | ||
526 | #endif | ||
diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h index ef2d97377d7c..ad86ec91e640 100644 --- a/arch/x86/mm/numa_internal.h +++ b/arch/x86/mm/numa_internal.h | |||
@@ -19,6 +19,8 @@ void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi); | |||
19 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi); | 19 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi); |
20 | void __init numa_reset_distance(void); | 20 | void __init numa_reset_distance(void); |
21 | 21 | ||
22 | void __init x86_numa_init(void); | ||
23 | |||
22 | #ifdef CONFIG_NUMA_EMU | 24 | #ifdef CONFIG_NUMA_EMU |
23 | void __init numa_emulation(struct numa_meminfo *numa_meminfo, | 25 | void __init numa_emulation(struct numa_meminfo *numa_meminfo, |
24 | int numa_dist_cnt); | 26 | int numa_dist_cnt); |