aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 01:20:46 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 01:20:46 -0500
commit2e6e33bab6e1996a5dec9108fb467b52b841e7a8 (patch)
tree6b98b15c2fe7899cdeb2453589cdee00f7853492 /arch/powerpc/mm
parentb7ad6d75028d021362221d9b2db19fcff995c3f8 (diff)
parentb88a0b1d5560cf1959c1565617e460a45c688a08 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (78 commits) [PATCH] powerpc: Add FSL SEC node to documentation [PATCH] macintosh: tidy-up driver_register() return values [PATCH] powerpc: tidy-up of_register_driver()/driver_register() return values [PATCH] powerpc: via-pmu warning fix [PATCH] macintosh: cleanup the use of i2c headers [PATCH] powerpc: dont allow old RTC to be selected [PATCH] powerpc: make powerbook_sleep_grackle static [PATCH] powerpc: Fix warning in add_memory [PATCH] powerpc: update mailing list addresses [PATCH] powerpc: Remove calculation of io hole [PATCH] powerpc: iseries: Add bootargs to /chosen [PATCH] powerpc: iseries: Add /system-id, /model and /compatible [PATCH] powerpc: Add strne2a() to convert a string from EBCDIC to ASCII [PATCH] powerpc: iseries: Make more stuff static in platforms/iseries/mf.c [PATCH] powerpc: iseries: Remove pointless iSeries_(restart|power_off|halt) [PATCH] powerpc: iseries: mf related cleanups [PATCH] powerpc: Replace platform_is_lpar() with a firmware feature [PATCH] powerpc: trivial: Cleanup whitespace in cputable.h [PATCH] powerpc: Remove unused iommu_off logic from pSeries_init_early() [PATCH] powerpc: Unconfuse htab_bolt_mapping() callers ...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/hash_low_32.S2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c32
-rw-r--r--arch/powerpc/mm/init_64.c48
-rw-r--r--arch/powerpc/mm/lmb.c16
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmap.c2
-rw-r--r--arch/powerpc/mm/numa.c160
-rw-r--r--arch/powerpc/mm/slb_low.S2
-rw-r--r--arch/powerpc/mm/stab.c4
-rw-r--r--arch/powerpc/mm/tlb_64.c2
11 files changed, 108 insertions, 165 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a4815d316722..ec4adcb4bc28 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc/mm/fault.c
3 *
4 * PowerPC version 2 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 4 *
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 12ccd7155bac..ea469eefa146 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc/kernel/hashtable.S
3 *
4 * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $ 2 * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
5 * 3 *
6 * PowerPC version 4 * PowerPC version
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index e9d589eefc14..89b35c181314 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -169,7 +169,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
169#ifdef CONFIG_PPC_ISERIES 169#ifdef CONFIG_PPC_ISERIES
170 if (_machine == PLATFORM_ISERIES_LPAR) 170 if (_machine == PLATFORM_ISERIES_LPAR)
171 ret = iSeries_hpte_insert(hpteg, va, 171 ret = iSeries_hpte_insert(hpteg, va,
172 __pa(vaddr), 172 paddr,
173 tmp_mode, 173 tmp_mode,
174 HPTE_V_BOLTED, 174 HPTE_V_BOLTED,
175 psize); 175 psize);
@@ -178,7 +178,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
178#ifdef CONFIG_PPC_PSERIES 178#ifdef CONFIG_PPC_PSERIES
179 if (_machine & PLATFORM_LPAR) 179 if (_machine & PLATFORM_LPAR)
180 ret = pSeries_lpar_hpte_insert(hpteg, va, 180 ret = pSeries_lpar_hpte_insert(hpteg, va,
181 virt_to_abs(paddr), 181 paddr,
182 tmp_mode, 182 tmp_mode,
183 HPTE_V_BOLTED, 183 HPTE_V_BOLTED,
184 psize); 184 psize);
@@ -186,7 +186,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
186#endif 186#endif
187#ifdef CONFIG_PPC_MULTIPLATFORM 187#ifdef CONFIG_PPC_MULTIPLATFORM
188 ret = native_hpte_insert(hpteg, va, 188 ret = native_hpte_insert(hpteg, va,
189 virt_to_abs(paddr), 189 paddr,
190 tmp_mode, HPTE_V_BOLTED, 190 tmp_mode, HPTE_V_BOLTED,
191 psize); 191 psize);
192#endif 192#endif
@@ -392,7 +392,7 @@ static unsigned long __init htab_get_table_size(void)
392#ifdef CONFIG_MEMORY_HOTPLUG 392#ifdef CONFIG_MEMORY_HOTPLUG
393void create_section_mapping(unsigned long start, unsigned long end) 393void create_section_mapping(unsigned long start, unsigned long end)
394{ 394{
395 BUG_ON(htab_bolt_mapping(start, end, start, 395 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
396 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, 396 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
397 mmu_linear_psize)); 397 mmu_linear_psize));
398} 398}
@@ -422,7 +422,7 @@ void __init htab_initialize(void)
422 422
423 htab_hash_mask = pteg_count - 1; 423 htab_hash_mask = pteg_count - 1;
424 424
425 if (platform_is_lpar()) { 425 if (firmware_has_feature(FW_FEATURE_LPAR)) {
426 /* Using a hypervisor which owns the htab */ 426 /* Using a hypervisor which owns the htab */
427 htab_address = NULL; 427 htab_address = NULL;
428 _SDR1 = 0; 428 _SDR1 = 0;
@@ -431,7 +431,6 @@ void __init htab_initialize(void)
431 * the absolute address space. 431 * the absolute address space.
432 */ 432 */
433 table = lmb_alloc(htab_size_bytes, htab_size_bytes); 433 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
434 BUG_ON(table == 0);
435 434
436 DBG("Hash table allocated at %lx, size: %lx\n", table, 435 DBG("Hash table allocated at %lx, size: %lx\n", table,
437 htab_size_bytes); 436 htab_size_bytes);
@@ -474,21 +473,22 @@ void __init htab_initialize(void)
474 473
475 if (dart_tablebase != 0 && dart_tablebase >= base 474 if (dart_tablebase != 0 && dart_tablebase >= base
476 && dart_tablebase < (base + size)) { 475 && dart_tablebase < (base + size)) {
476 unsigned long dart_table_end = dart_tablebase + 16 * MB;
477 if (base != dart_tablebase) 477 if (base != dart_tablebase)
478 BUG_ON(htab_bolt_mapping(base, dart_tablebase, 478 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
479 base, mode_rw, 479 __pa(base), mode_rw,
480 mmu_linear_psize)); 480 mmu_linear_psize));
481 if ((base + size) > (dart_tablebase + 16*MB)) 481 if ((base + size) > dart_table_end)
482 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, 482 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
483 base + size, 483 base + size,
484 dart_tablebase+16*MB, 484 __pa(dart_table_end),
485 mode_rw, 485 mode_rw,
486 mmu_linear_psize)); 486 mmu_linear_psize));
487 continue; 487 continue;
488 } 488 }
489#endif /* CONFIG_U3_DART */ 489#endif /* CONFIG_U3_DART */
490 BUG_ON(htab_bolt_mapping(base, base + size, base, 490 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
491 mode_rw, mmu_linear_psize)); 491 mode_rw, mmu_linear_psize));
492 } 492 }
493 493
494 /* 494 /*
@@ -505,8 +505,8 @@ void __init htab_initialize(void)
505 if (base + size >= tce_alloc_start) 505 if (base + size >= tce_alloc_start)
506 tce_alloc_start = base + size + 1; 506 tce_alloc_start = base + size + 1;
507 507
508 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, 508 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
509 tce_alloc_start, mode_rw, 509 __pa(tce_alloc_start), mode_rw,
510 mmu_linear_psize)); 510 mmu_linear_psize));
511 } 511 }
512 512
@@ -517,7 +517,7 @@ void __init htab_initialize(void)
517 517
518void htab_initialize_secondary(void) 518void htab_initialize_secondary(void)
519{ 519{
520 if (!platform_is_lpar()) 520 if (!firmware_has_feature(FW_FEATURE_LPAR))
521 mtspr(SPRN_SDR1, _SDR1); 521 mtspr(SPRN_SDR1, _SDR1);
522} 522}
523 523
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index bacb71c89811..babebd15bdc4 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -84,54 +84,6 @@
84/* max amount of RAM to use */ 84/* max amount of RAM to use */
85unsigned long __max_memory; 85unsigned long __max_memory;
86 86
87/* info on what we think the IO hole is */
88unsigned long io_hole_start;
89unsigned long io_hole_size;
90
91/*
92 * Do very early mm setup.
93 */
94void __init mm_init_ppc64(void)
95{
96#ifndef CONFIG_PPC_ISERIES
97 unsigned long i;
98#endif
99
100 ppc64_boot_msg(0x100, "MM Init");
101
102 /* This is the story of the IO hole... please, keep seated,
103 * unfortunately, we are out of oxygen masks at the moment.
104 * So we need some rough way to tell where your big IO hole
105 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
106 * that area as well, on POWER4 we don't have one, etc...
107 * We need that as a "hint" when sizing the TCE table on POWER3
108 * So far, the simplest way that seem work well enough for us it
109 * to just assume that the first discontinuity in our physical
110 * RAM layout is the IO hole. That may not be correct in the future
111 * (and isn't on iSeries but then we don't care ;)
112 */
113
114#ifndef CONFIG_PPC_ISERIES
115 for (i = 1; i < lmb.memory.cnt; i++) {
116 unsigned long base, prevbase, prevsize;
117
118 prevbase = lmb.memory.region[i-1].base;
119 prevsize = lmb.memory.region[i-1].size;
120 base = lmb.memory.region[i].base;
121 if (base > (prevbase + prevsize)) {
122 io_hole_start = prevbase + prevsize;
123 io_hole_size = base - (prevbase + prevsize);
124 break;
125 }
126 }
127#endif /* CONFIG_PPC_ISERIES */
128 if (io_hole_start)
129 printk("IO Hole assumed to be %lx -> %lx\n",
130 io_hole_start, io_hole_start + io_hole_size - 1);
131
132 ppc64_boot_msg(0x100, "MM Init Done");
133}
134
135void free_initmem(void) 87void free_initmem(void)
136{ 88{
137 unsigned long addr; 89 unsigned long addr;
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index bbe3eac918e8..417d58518558 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -31,6 +31,8 @@
31#define DBG(fmt...) 31#define DBG(fmt...)
32#endif 32#endif
33 33
34#define LMB_ALLOC_ANYWHERE 0
35
34struct lmb lmb; 36struct lmb lmb;
35 37
36void lmb_dump_all(void) 38void lmb_dump_all(void)
@@ -226,6 +228,20 @@ unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
226unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, 228unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
227 unsigned long max_addr) 229 unsigned long max_addr)
228{ 230{
231 unsigned long alloc;
232
233 alloc = __lmb_alloc_base(size, align, max_addr);
234
235 if (alloc == 0)
236 panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
237 size, max_addr);
238
239 return alloc;
240}
241
242unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
243 unsigned long max_addr)
244{
229 long i, j; 245 long i, j;
230 unsigned long base = 0; 246 unsigned long base = 0;
231 247
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 454cac01d8cc..badac10d700c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -125,7 +125,7 @@ int __devinit add_memory(u64 start, u64 size)
125 nid = hot_add_scn_to_nid(start); 125 nid = hot_add_scn_to_nid(start);
126 pgdata = NODE_DATA(nid); 126 pgdata = NODE_DATA(nid);
127 127
128 start = __va(start); 128 start = (unsigned long)__va(start);
129 create_section_mapping(start, start + size); 129 create_section_mapping(start, start + size);
130 130
131 /* this should work for most non-highmem platforms */ 131 /* this should work for most non-highmem platforms */
@@ -249,7 +249,6 @@ void __init do_init_bootmem(void)
249 bootmap_pages = bootmem_bootmap_pages(total_pages); 249 bootmap_pages = bootmem_bootmap_pages(total_pages);
250 250
251 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 251 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
252 BUG_ON(!start);
253 252
254 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); 253 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
255 254
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index fe65f522aff3..972a8e884b9a 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/arch/ppc64/mm/mmap.c
3 *
4 * flexible mmap layout support 2 * flexible mmap layout support
5 * 3 *
6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 2863a912bcd0..e89b22aa539e 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -129,10 +129,12 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,
129 *start_pfn = 0; 129 *start_pfn = 0;
130} 130}
131 131
132static inline void map_cpu_to_node(int cpu, int node) 132static void __cpuinit map_cpu_to_node(int cpu, int node)
133{ 133{
134 numa_cpu_lookup_table[cpu] = node; 134 numa_cpu_lookup_table[cpu] = node;
135 135
136 dbg("adding cpu %d to node %d\n", cpu, node);
137
136 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) 138 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
137 cpu_set(cpu, numa_cpumask_lookup_table[node]); 139 cpu_set(cpu, numa_cpumask_lookup_table[node]);
138} 140}
@@ -153,7 +155,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
153} 155}
154#endif /* CONFIG_HOTPLUG_CPU */ 156#endif /* CONFIG_HOTPLUG_CPU */
155 157
156static struct device_node *find_cpu_node(unsigned int cpu) 158static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
157{ 159{
158 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); 160 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
159 struct device_node *cpu_node = NULL; 161 struct device_node *cpu_node = NULL;
@@ -189,23 +191,29 @@ static int *of_get_associativity(struct device_node *dev)
189 return (unsigned int *)get_property(dev, "ibm,associativity", NULL); 191 return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
190} 192}
191 193
192static int of_node_numa_domain(struct device_node *device) 194/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
195 * info is found.
196 */
197static int of_node_to_nid(struct device_node *device)
193{ 198{
194 int numa_domain; 199 int nid = -1;
195 unsigned int *tmp; 200 unsigned int *tmp;
196 201
197 if (min_common_depth == -1) 202 if (min_common_depth == -1)
198 return 0; 203 goto out;
199 204
200 tmp = of_get_associativity(device); 205 tmp = of_get_associativity(device);
201 if (tmp && (tmp[0] >= min_common_depth)) { 206 if (!tmp)
202 numa_domain = tmp[min_common_depth]; 207 goto out;
203 } else { 208
204 dbg("WARNING: no NUMA information for %s\n", 209 if (tmp[0] >= min_common_depth)
205 device->full_name); 210 nid = tmp[min_common_depth];
206 numa_domain = 0; 211
207 } 212 /* POWER4 LPAR uses 0xffff as invalid node */
208 return numa_domain; 213 if (nid == 0xffff || nid >= MAX_NUMNODES)
214 nid = -1;
215out:
216 return nid;
209} 217}
210 218
211/* 219/*
@@ -246,8 +254,7 @@ static int __init find_min_common_depth(void)
246 if ((len >= 1) && ref_points) { 254 if ((len >= 1) && ref_points) {
247 depth = ref_points[1]; 255 depth = ref_points[1];
248 } else { 256 } else {
249 dbg("WARNING: could not find NUMA " 257 dbg("NUMA: ibm,associativity-reference-points not found.\n");
250 "associativity reference point\n");
251 depth = -1; 258 depth = -1;
252 } 259 }
253 of_node_put(rtas_root); 260 of_node_put(rtas_root);
@@ -283,9 +290,9 @@ static unsigned long __devinit read_n_cells(int n, unsigned int **buf)
283 * Figure out to which domain a cpu belongs and stick it there. 290 * Figure out to which domain a cpu belongs and stick it there.
284 * Return the id of the domain used. 291 * Return the id of the domain used.
285 */ 292 */
286static int numa_setup_cpu(unsigned long lcpu) 293static int __cpuinit numa_setup_cpu(unsigned long lcpu)
287{ 294{
288 int numa_domain = 0; 295 int nid = 0;
289 struct device_node *cpu = find_cpu_node(lcpu); 296 struct device_node *cpu = find_cpu_node(lcpu);
290 297
291 if (!cpu) { 298 if (!cpu) {
@@ -293,27 +300,16 @@ static int numa_setup_cpu(unsigned long lcpu)
293 goto out; 300 goto out;
294 } 301 }
295 302
296 numa_domain = of_node_numa_domain(cpu); 303 nid = of_node_to_nid(cpu);
297 304
298 if (numa_domain >= num_online_nodes()) { 305 if (nid < 0 || !node_online(nid))
299 /* 306 nid = any_online_node(NODE_MASK_ALL);
300 * POWER4 LPAR uses 0xffff as invalid node,
301 * dont warn in this case.
302 */
303 if (numa_domain != 0xffff)
304 printk(KERN_ERR "WARNING: cpu %ld "
305 "maps to invalid NUMA node %d\n",
306 lcpu, numa_domain);
307 numa_domain = 0;
308 }
309out: 307out:
310 node_set_online(numa_domain); 308 map_cpu_to_node(lcpu, nid);
311
312 map_cpu_to_node(lcpu, numa_domain);
313 309
314 of_node_put(cpu); 310 of_node_put(cpu);
315 311
316 return numa_domain; 312 return nid;
317} 313}
318 314
319static int cpu_numa_callback(struct notifier_block *nfb, 315static int cpu_numa_callback(struct notifier_block *nfb,
@@ -325,10 +321,7 @@ static int cpu_numa_callback(struct notifier_block *nfb,
325 321
326 switch (action) { 322 switch (action) {
327 case CPU_UP_PREPARE: 323 case CPU_UP_PREPARE:
328 if (min_common_depth == -1 || !numa_enabled) 324 numa_setup_cpu(lcpu);
329 map_cpu_to_node(lcpu, 0);
330 else
331 numa_setup_cpu(lcpu);
332 ret = NOTIFY_OK; 325 ret = NOTIFY_OK;
333 break; 326 break;
334#ifdef CONFIG_HOTPLUG_CPU 327#ifdef CONFIG_HOTPLUG_CPU
@@ -375,7 +368,7 @@ static int __init parse_numa_properties(void)
375{ 368{
376 struct device_node *cpu = NULL; 369 struct device_node *cpu = NULL;
377 struct device_node *memory = NULL; 370 struct device_node *memory = NULL;
378 int max_domain; 371 int default_nid = 0;
379 unsigned long i; 372 unsigned long i;
380 373
381 if (numa_enabled == 0) { 374 if (numa_enabled == 0) {
@@ -385,32 +378,32 @@ static int __init parse_numa_properties(void)
385 378
386 min_common_depth = find_min_common_depth(); 379 min_common_depth = find_min_common_depth();
387 380
388 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
389 if (min_common_depth < 0) 381 if (min_common_depth < 0)
390 return min_common_depth; 382 return min_common_depth;
391 383
392 max_domain = numa_setup_cpu(boot_cpuid); 384 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
393 385
394 /* 386 /*
395 * Even though we connect cpus to numa domains later in SMP init, 387 * Even though we connect cpus to numa domains later in SMP
396 * we need to know the maximum node id now. This is because each 388 * init, we need to know the node ids now. This is because
397 * node id must have NODE_DATA etc backing it. 389 * each node to be onlined must have NODE_DATA etc backing it.
398 * As a result of hotplug we could still have cpus appear later on
399 * with larger node ids. In that case we force the cpu into node 0.
400 */ 390 */
401 for_each_cpu(i) { 391 for_each_present_cpu(i) {
402 int numa_domain; 392 int nid;
403 393
404 cpu = find_cpu_node(i); 394 cpu = find_cpu_node(i);
395 BUG_ON(!cpu);
396 nid = of_node_to_nid(cpu);
397 of_node_put(cpu);
405 398
406 if (cpu) { 399 /*
407 numa_domain = of_node_numa_domain(cpu); 400 * Don't fall back to default_nid yet -- we will plug
408 of_node_put(cpu); 401 * cpus into nodes once the memory scan has discovered
409 402 * the topology.
410 if (numa_domain < MAX_NUMNODES && 403 */
411 max_domain < numa_domain) 404 if (nid < 0)
412 max_domain = numa_domain; 405 continue;
413 } 406 node_set_online(nid);
414 } 407 }
415 408
416 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); 409 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
@@ -418,7 +411,7 @@ static int __init parse_numa_properties(void)
418 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 411 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
419 unsigned long start; 412 unsigned long start;
420 unsigned long size; 413 unsigned long size;
421 int numa_domain; 414 int nid;
422 int ranges; 415 int ranges;
423 unsigned int *memcell_buf; 416 unsigned int *memcell_buf;
424 unsigned int len; 417 unsigned int len;
@@ -439,18 +432,15 @@ new_range:
439 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 432 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
440 size = read_n_cells(n_mem_size_cells, &memcell_buf); 433 size = read_n_cells(n_mem_size_cells, &memcell_buf);
441 434
442 numa_domain = of_node_numa_domain(memory); 435 /*
443 436 * Assumption: either all memory nodes or none will
444 if (numa_domain >= MAX_NUMNODES) { 437 * have associativity properties. If none, then
445 if (numa_domain != 0xffff) 438 * everything goes to default_nid.
446 printk(KERN_ERR "WARNING: memory at %lx maps " 439 */
447 "to invalid NUMA node %d\n", start, 440 nid = of_node_to_nid(memory);
448 numa_domain); 441 if (nid < 0)
449 numa_domain = 0; 442 nid = default_nid;
450 } 443 node_set_online(nid);
451
452 if (max_domain < numa_domain)
453 max_domain = numa_domain;
454 444
455 if (!(size = numa_enforce_memory_limit(start, size))) { 445 if (!(size = numa_enforce_memory_limit(start, size))) {
456 if (--ranges) 446 if (--ranges)
@@ -459,16 +449,13 @@ new_range:
459 continue; 449 continue;
460 } 450 }
461 451
462 add_region(numa_domain, start >> PAGE_SHIFT, 452 add_region(nid, start >> PAGE_SHIFT,
463 size >> PAGE_SHIFT); 453 size >> PAGE_SHIFT);
464 454
465 if (--ranges) 455 if (--ranges)
466 goto new_range; 456 goto new_range;
467 } 457 }
468 458
469 for (i = 0; i <= max_domain; i++)
470 node_set_online(i);
471
472 return 0; 459 return 0;
473} 460}
474 461
@@ -483,7 +470,6 @@ static void __init setup_nonnuma(void)
483 printk(KERN_INFO "Memory hole size: %ldMB\n", 470 printk(KERN_INFO "Memory hole size: %ldMB\n",
484 (top_of_ram - total_ram) >> 20); 471 (top_of_ram - total_ram) >> 20);
485 472
486 map_cpu_to_node(boot_cpuid, 0);
487 for (i = 0; i < lmb.memory.cnt; ++i) 473 for (i = 0; i < lmb.memory.cnt; ++i)
488 add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT, 474 add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT,
489 lmb_size_pages(&lmb.memory, i)); 475 lmb_size_pages(&lmb.memory, i));
@@ -570,11 +556,11 @@ static void __init *careful_allocation(int nid, unsigned long size,
570 unsigned long end_pfn) 556 unsigned long end_pfn)
571{ 557{
572 int new_nid; 558 int new_nid;
573 unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 559 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
574 560
575 /* retry over all memory */ 561 /* retry over all memory */
576 if (!ret) 562 if (!ret)
577 ret = lmb_alloc_base(size, align, lmb_end_of_DRAM()); 563 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
578 564
579 if (!ret) 565 if (!ret)
580 panic("numa.c: cannot allocate %lu bytes on node %d", 566 panic("numa.c: cannot allocate %lu bytes on node %d",
@@ -620,6 +606,8 @@ void __init do_init_bootmem(void)
620 dump_numa_memory_topology(); 606 dump_numa_memory_topology();
621 607
622 register_cpu_notifier(&ppc64_numa_nb); 608 register_cpu_notifier(&ppc64_numa_nb);
609 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
610 (void *)(unsigned long)boot_cpuid);
623 611
624 for_each_online_node(nid) { 612 for_each_online_node(nid) {
625 unsigned long start_pfn, end_pfn, pages_present; 613 unsigned long start_pfn, end_pfn, pages_present;
@@ -767,10 +755,10 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
767{ 755{
768 struct device_node *memory = NULL; 756 struct device_node *memory = NULL;
769 nodemask_t nodes; 757 nodemask_t nodes;
770 int numa_domain = 0; 758 int default_nid = any_online_node(NODE_MASK_ALL);
771 759
772 if (!numa_enabled || (min_common_depth < 0)) 760 if (!numa_enabled || (min_common_depth < 0))
773 return numa_domain; 761 return default_nid;
774 762
775 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 763 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
776 unsigned long start, size; 764 unsigned long start, size;
@@ -787,15 +775,15 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
787ha_new_range: 775ha_new_range:
788 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 776 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
789 size = read_n_cells(n_mem_size_cells, &memcell_buf); 777 size = read_n_cells(n_mem_size_cells, &memcell_buf);
790 numa_domain = of_node_numa_domain(memory); 778 nid = of_node_to_nid(memory);
791 779
792 /* Domains not present at boot default to 0 */ 780 /* Domains not present at boot default to 0 */
793 if (!node_online(numa_domain)) 781 if (nid < 0 || !node_online(nid))
794 numa_domain = any_online_node(NODE_MASK_ALL); 782 nid = default_nid;
795 783
796 if ((scn_addr >= start) && (scn_addr < (start + size))) { 784 if ((scn_addr >= start) && (scn_addr < (start + size))) {
797 of_node_put(memory); 785 of_node_put(memory);
798 goto got_numa_domain; 786 goto got_nid;
799 } 787 }
800 788
801 if (--ranges) /* process all ranges in cell */ 789 if (--ranges) /* process all ranges in cell */
@@ -804,12 +792,12 @@ ha_new_range:
804 BUG(); /* section address should be found above */ 792 BUG(); /* section address should be found above */
805 793
806 /* Temporary code to ensure that returned node is not empty */ 794 /* Temporary code to ensure that returned node is not empty */
807got_numa_domain: 795got_nid:
808 nodes_setall(nodes); 796 nodes_setall(nodes);
809 while (NODE_DATA(numa_domain)->node_spanned_pages == 0) { 797 while (NODE_DATA(nid)->node_spanned_pages == 0) {
810 node_clear(numa_domain, nodes); 798 node_clear(nid, nodes);
811 numa_domain = any_online_node(nodes); 799 nid = any_online_node(nodes);
812 } 800 }
813 return numa_domain; 801 return nid;
814} 802}
815#endif /* CONFIG_MEMORY_HOTPLUG */ 803#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index d1acee38f163..abfaabf667bf 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc64/mm/slb_low.S
3 *
4 * Low-level SLB routines 2 * Low-level SLB routines
5 * 3 *
6 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM 4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 82e4951826bc..91d25fb27f89 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -247,10 +247,6 @@ void stabs_alloc(void)
247 247
248 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, 248 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
249 1<<SID_SHIFT); 249 1<<SID_SHIFT);
250 if (! newstab)
251 panic("Unable to allocate segment table for CPU %d.\n",
252 cpu);
253
254 newstab = (unsigned long)__va(newstab); 250 newstab = (unsigned long)__va(newstab);
255 251
256 memset((void *)newstab, 0, HW_PAGE_SIZE); 252 memset((void *)newstab, 0, HW_PAGE_SIZE);
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index bb3afb6e6317..f734b11566c2 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -36,7 +36,7 @@
36DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); 36DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
37 37
38/* This is declared as we are using the more or less generic 38/* This is declared as we are using the more or less generic
39 * include/asm-ppc64/tlb.h file -- tgall 39 * include/asm-powerpc/tlb.h file -- tgall
40 */ 40 */
41DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 41DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 42DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);