aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/setup-common.c119
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c120
-rw-r--r--arch/powerpc/kernel/smp.c (renamed from arch/ppc64/kernel/smp.c)61
-rw-r--r--arch/powerpc/platforms/chrp/smp.c39
-rw-r--r--arch/ppc/kernel/Makefile1
-rw-r--r--arch/ppc/kernel/irq.c2
-rw-r--r--arch/ppc64/kernel/Makefile1
-rw-r--r--include/asm-powerpc/smp.h (renamed from include/asm-ppc64/smp.h)37
10 files changed, 189 insertions, 197 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 631149ea93db..b3ae2993efb8 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_PPC_OF) += prom_init.o
41obj-$(CONFIG_MODULES) += ppc_ksyms.o 41obj-$(CONFIG_MODULES) += ppc_ksyms.o
42obj-$(CONFIG_BOOTX_TEXT) += btext.o 42obj-$(CONFIG_BOOTX_TEXT) += btext.o
43obj-$(CONFIG_6xx) += idle_6xx.o 43obj-$(CONFIG_6xx) += idle_6xx.o
44obj-$(CONFIG_SMP) += smp.o
44 45
45ifeq ($(CONFIG_PPC_ISERIES),y) 46ifeq ($(CONFIG_PPC_ISERIES),y)
46$(obj)/head_64.o: $(obj)/lparmap.s 47$(obj)/head_64.o: $(obj)/lparmap.s
@@ -49,8 +50,9 @@ endif
49 50
50else 51else
51# stuff used from here for ARCH=ppc or ARCH=ppc64 52# stuff used from here for ARCH=ppc or ARCH=ppc64
53smpobj-$(CONFIG_SMP) += smp.o
52obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \ 54obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
53 setup-common.o 55 setup-common.o $(smpobj-y)
54 56
55 57
56endif 58endif
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 14ebe3bc48c3..d43fa8c0e5ac 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -170,12 +170,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
170 } 170 }
171 171
172#ifdef CONFIG_SMP 172#ifdef CONFIG_SMP
173#ifdef CONFIG_PPC64 /* XXX for now */
174 pvr = per_cpu(pvr, cpu_id); 173 pvr = per_cpu(pvr, cpu_id);
175#else 174#else
176 pvr = cpu_data[cpu_id].pvr;
177#endif
178#else
179 pvr = mfspr(SPRN_PVR); 175 pvr = mfspr(SPRN_PVR);
180#endif 176#endif
181 maj = (pvr >> 8) & 0xFF; 177 maj = (pvr >> 8) & 0xFF;
@@ -408,3 +404,118 @@ static int __init set_preferred_console(void)
408} 404}
409console_initcall(set_preferred_console); 405console_initcall(set_preferred_console);
410#endif /* CONFIG_PPC_MULTIPLATFORM */ 406#endif /* CONFIG_PPC_MULTIPLATFORM */
407
408#ifdef CONFIG_SMP
409
410/**
411 * setup_cpu_maps - initialize the following cpu maps:
412 * cpu_possible_map
413 * cpu_present_map
414 * cpu_sibling_map
415 *
416 * Having the possible map set up early allows us to restrict allocations
417 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
418 *
419 * We do not initialize the online map here; cpus set their own bits in
420 * cpu_online_map as they come up.
421 *
422 * This function is valid only for Open Firmware systems. finish_device_tree
423 * must be called before using this.
424 *
425 * While we're here, we may as well set the "physical" cpu ids in the paca.
426 */
427void __init smp_setup_cpu_maps(void)
428{
429 struct device_node *dn = NULL;
430 int cpu = 0;
431 int swap_cpuid = 0;
432
433 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
434 int *intserv;
435 int j, len = sizeof(u32), nthreads = 1;
436
437 intserv = (int *)get_property(dn, "ibm,ppc-interrupt-server#s",
438 &len);
439 if (intserv)
440 nthreads = len / sizeof(int);
441 else {
442 intserv = (int *) get_property(dn, "reg", NULL);
443 if (!intserv)
444 intserv = &cpu; /* assume logical == phys */
445 }
446
447 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
448 cpu_set(cpu, cpu_present_map);
449 set_hard_smp_processor_id(cpu, intserv[j]);
450
451 if (intserv[j] == boot_cpuid_phys)
452 swap_cpuid = cpu;
453 cpu_set(cpu, cpu_possible_map);
454 cpu++;
455 }
456 }
457
458 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
459 * boot cpu is logical 0.
460 */
461 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
462 u32 tmp;
463 tmp = get_hard_smp_processor_id(0);
464 set_hard_smp_processor_id(0, boot_cpuid_phys);
465 set_hard_smp_processor_id(swap_cpuid, tmp);
466 }
467
468#ifdef CONFIG_PPC64
469 /*
470 * On pSeries LPAR, we need to know how many cpus
471 * could possibly be added to this partition.
472 */
473 if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
474 (dn = of_find_node_by_path("/rtas"))) {
475 int num_addr_cell, num_size_cell, maxcpus;
476 unsigned int *ireg;
477
478 num_addr_cell = prom_n_addr_cells(dn);
479 num_size_cell = prom_n_size_cells(dn);
480
481 ireg = (unsigned int *)
482 get_property(dn, "ibm,lrdr-capacity", NULL);
483
484 if (!ireg)
485 goto out;
486
487 maxcpus = ireg[num_addr_cell + num_size_cell];
488
489 /* Double maxcpus for processors which have SMT capability */
490 if (cpu_has_feature(CPU_FTR_SMT))
491 maxcpus *= 2;
492
493 if (maxcpus > NR_CPUS) {
494 printk(KERN_WARNING
495 "Partition configured for %d cpus, "
496 "operating system maximum is %d.\n",
497 maxcpus, NR_CPUS);
498 maxcpus = NR_CPUS;
499 } else
500 printk(KERN_INFO "Partition configured for %d cpus.\n",
501 maxcpus);
502
503 for (cpu = 0; cpu < maxcpus; cpu++)
504 cpu_set(cpu, cpu_possible_map);
505 out:
506 of_node_put(dn);
507 }
508
509 /*
510 * Do the sibling map; assume only two threads per processor.
511 */
512 for_each_cpu(cpu) {
513 cpu_set(cpu, cpu_sibling_map[cpu]);
514 if (cpu_has_feature(CPU_FTR_SMT))
515 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
516 }
517
518 systemcfg->processorCount = num_present_cpus();
519#endif /* CONFIG_PPC64 */
520}
521#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9680ae99b084..b45eedbb4b3a 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -288,6 +288,8 @@ void __init setup_arch(char **cmdline_p)
288 unflatten_device_tree(); 288 unflatten_device_tree();
289 finish_device_tree(); 289 finish_device_tree();
290 290
291 smp_setup_cpu_maps();
292
291#ifdef CONFIG_BOOTX_TEXT 293#ifdef CONFIG_BOOTX_TEXT
292 init_boot_display(); 294 init_boot_display();
293#endif 295#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 275d86ddd612..6b52cce872be 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -181,114 +181,8 @@ static int __init early_smt_enabled(char *p)
181} 181}
182early_param("smt-enabled", early_smt_enabled); 182early_param("smt-enabled", early_smt_enabled);
183 183
184/** 184#else
185 * setup_cpu_maps - initialize the following cpu maps: 185#define check_smt_enabled()
186 * cpu_possible_map
187 * cpu_present_map
188 * cpu_sibling_map
189 *
190 * Having the possible map set up early allows us to restrict allocations
191 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
192 *
193 * We do not initialize the online map here; cpus set their own bits in
194 * cpu_online_map as they come up.
195 *
196 * This function is valid only for Open Firmware systems. finish_device_tree
197 * must be called before using this.
198 *
199 * While we're here, we may as well set the "physical" cpu ids in the paca.
200 */
201static void __init setup_cpu_maps(void)
202{
203 struct device_node *dn = NULL;
204 int cpu = 0;
205 int swap_cpuid = 0;
206
207 check_smt_enabled();
208
209 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
210 u32 *intserv;
211 int j, len = sizeof(u32), nthreads;
212
213 intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
214 &len);
215 if (!intserv)
216 intserv = (u32 *)get_property(dn, "reg", NULL);
217
218 nthreads = len / sizeof(u32);
219
220 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
221 cpu_set(cpu, cpu_present_map);
222 set_hard_smp_processor_id(cpu, intserv[j]);
223
224 if (intserv[j] == boot_cpuid_phys)
225 swap_cpuid = cpu;
226 cpu_set(cpu, cpu_possible_map);
227 cpu++;
228 }
229 }
230
231 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
232 * boot cpu is logical 0.
233 */
234 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
235 u32 tmp;
236 tmp = get_hard_smp_processor_id(0);
237 set_hard_smp_processor_id(0, boot_cpuid_phys);
238 set_hard_smp_processor_id(swap_cpuid, tmp);
239 }
240
241 /*
242 * On pSeries LPAR, we need to know how many cpus
243 * could possibly be added to this partition.
244 */
245 if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
246 (dn = of_find_node_by_path("/rtas"))) {
247 int num_addr_cell, num_size_cell, maxcpus;
248 unsigned int *ireg;
249
250 num_addr_cell = prom_n_addr_cells(dn);
251 num_size_cell = prom_n_size_cells(dn);
252
253 ireg = (unsigned int *)
254 get_property(dn, "ibm,lrdr-capacity", NULL);
255
256 if (!ireg)
257 goto out;
258
259 maxcpus = ireg[num_addr_cell + num_size_cell];
260
261 /* Double maxcpus for processors which have SMT capability */
262 if (cpu_has_feature(CPU_FTR_SMT))
263 maxcpus *= 2;
264
265 if (maxcpus > NR_CPUS) {
266 printk(KERN_WARNING
267 "Partition configured for %d cpus, "
268 "operating system maximum is %d.\n",
269 maxcpus, NR_CPUS);
270 maxcpus = NR_CPUS;
271 } else
272 printk(KERN_INFO "Partition configured for %d cpus.\n",
273 maxcpus);
274
275 for (cpu = 0; cpu < maxcpus; cpu++)
276 cpu_set(cpu, cpu_possible_map);
277 out:
278 of_node_put(dn);
279 }
280
281 /*
282 * Do the sibling map; assume only two threads per processor.
283 */
284 for_each_cpu(cpu) {
285 cpu_set(cpu, cpu_sibling_map[cpu]);
286 if (cpu_has_feature(CPU_FTR_SMT))
287 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
288 }
289
290 systemcfg->processorCount = num_present_cpus();
291}
292#endif /* CONFIG_SMP */ 186#endif /* CONFIG_SMP */
293 187
294extern struct machdep_calls pSeries_md; 188extern struct machdep_calls pSeries_md;
@@ -417,6 +311,8 @@ void smp_release_cpus(void)
417 311
418 DBG(" <- smp_release_cpus()\n"); 312 DBG(" <- smp_release_cpus()\n");
419} 313}
314#else
315#define smp_release_cpus()
420#endif /* CONFIG_SMP || CONFIG_KEXEC */ 316#endif /* CONFIG_SMP || CONFIG_KEXEC */
421 317
422/* 318/*
@@ -608,17 +504,13 @@ void __init setup_system(void)
608 504
609 parse_early_param(); 505 parse_early_param();
610 506
611#ifdef CONFIG_SMP 507 check_smt_enabled();
612 /* 508 smp_setup_cpu_maps();
613 * iSeries has already initialized the cpu maps at this point.
614 */
615 setup_cpu_maps();
616 509
617 /* Release secondary cpus out of their spinloops at 0x60 now that 510 /* Release secondary cpus out of their spinloops at 0x60 now that
618 * we can map physical -> logical CPU ids 511 * we can map physical -> logical CPU ids
619 */ 512 */
620 smp_release_cpus(); 513 smp_release_cpus();
621#endif
622 514
623 printk("Starting Linux PPC64 %s\n", system_utsname.version); 515 printk("Starting Linux PPC64 %s\n", system_utsname.version);
624 516
diff --git a/arch/ppc64/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 017c12919832..1794a694a928 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -39,13 +39,18 @@
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/prom.h> 40#include <asm/prom.h>
41#include <asm/smp.h> 41#include <asm/smp.h>
42#include <asm/paca.h>
43#include <asm/time.h> 42#include <asm/time.h>
43#include <asm/xmon.h>
44#include <asm/machdep.h> 44#include <asm/machdep.h>
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/abs_addr.h>
48#include <asm/mpic.h> 47#include <asm/mpic.h>
48#ifdef CONFIG_PPC64
49#include <asm/paca.h>
50#endif
51
52int smp_hw_index[NR_CPUS];
53struct thread_info *secondary_ti;
49 54
50#ifdef DEBUG 55#ifdef DEBUG
51#define DBG(fmt...) udbg_printf(fmt) 56#define DBG(fmt...) udbg_printf(fmt)
@@ -60,6 +65,7 @@ cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
60EXPORT_SYMBOL(cpu_online_map); 65EXPORT_SYMBOL(cpu_online_map);
61EXPORT_SYMBOL(cpu_possible_map); 66EXPORT_SYMBOL(cpu_possible_map);
62 67
68/* SMP operations for this machine */
63struct smp_ops_t *smp_ops; 69struct smp_ops_t *smp_ops;
64 70
65static volatile unsigned int cpu_callin_map[NR_CPUS]; 71static volatile unsigned int cpu_callin_map[NR_CPUS];
@@ -89,7 +95,9 @@ void __devinit smp_mpic_setup_cpu(int cpu)
89{ 95{
90 mpic_setup_this_cpu(); 96 mpic_setup_this_cpu();
91} 97}
98#endif /* CONFIG_MPIC */
92 99
100#ifdef CONFIG_PPC64
93void __devinit smp_generic_kick_cpu(int nr) 101void __devinit smp_generic_kick_cpu(int nr)
94{ 102{
95 BUG_ON(nr < 0 || nr >= NR_CPUS); 103 BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -102,8 +110,7 @@ void __devinit smp_generic_kick_cpu(int nr)
102 paca[nr].cpu_start = 1; 110 paca[nr].cpu_start = 1;
103 smp_mb(); 111 smp_mb();
104} 112}
105 113#endif
106#endif /* CONFIG_MPIC */
107 114
108void smp_message_recv(int msg, struct pt_regs *regs) 115void smp_message_recv(int msg, struct pt_regs *regs)
109{ 116{
@@ -111,15 +118,10 @@ void smp_message_recv(int msg, struct pt_regs *regs)
111 case PPC_MSG_CALL_FUNCTION: 118 case PPC_MSG_CALL_FUNCTION:
112 smp_call_function_interrupt(); 119 smp_call_function_interrupt();
113 break; 120 break;
114 case PPC_MSG_RESCHEDULE: 121 case PPC_MSG_RESCHEDULE:
115 /* XXX Do we have to do this? */ 122 /* XXX Do we have to do this? */
116 set_need_resched(); 123 set_need_resched();
117 break; 124 break;
118#if 0
119 case PPC_MSG_MIGRATE_TASK:
120 /* spare */
121 break;
122#endif
123#ifdef CONFIG_DEBUGGER 125#ifdef CONFIG_DEBUGGER
124 case PPC_MSG_DEBUGGER_BREAK: 126 case PPC_MSG_DEBUGGER_BREAK:
125 debugger_ipi(regs); 127 debugger_ipi(regs);
@@ -171,8 +173,8 @@ static struct call_data_struct {
171 int wait; 173 int wait;
172} *call_data; 174} *call_data;
173 175
174/* delay of at least 8 seconds on 1GHz cpu */ 176/* delay of at least 8 seconds */
175#define SMP_CALL_TIMEOUT (1UL << (30 + 3)) 177#define SMP_CALL_TIMEOUT 8
176 178
177/* 179/*
178 * This function sends a 'generic call function' IPI to all other CPUs 180 * This function sends a 'generic call function' IPI to all other CPUs
@@ -194,7 +196,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
194{ 196{
195 struct call_data_struct data; 197 struct call_data_struct data;
196 int ret = -1, cpus; 198 int ret = -1, cpus;
197 unsigned long timeout; 199 u64 timeout;
198 200
199 /* Can deadlock when called with interrupts disabled */ 201 /* Can deadlock when called with interrupts disabled */
200 WARN_ON(irqs_disabled()); 202 WARN_ON(irqs_disabled());
@@ -220,11 +222,12 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
220 /* Send a message to all other CPUs and wait for them to respond */ 222 /* Send a message to all other CPUs and wait for them to respond */
221 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); 223 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
222 224
225 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
226
223 /* Wait for response */ 227 /* Wait for response */
224 timeout = SMP_CALL_TIMEOUT;
225 while (atomic_read(&data.started) != cpus) { 228 while (atomic_read(&data.started) != cpus) {
226 HMT_low(); 229 HMT_low();
227 if (--timeout == 0) { 230 if (get_tb() >= timeout) {
228 printk("smp_call_function on cpu %d: other cpus not " 231 printk("smp_call_function on cpu %d: other cpus not "
229 "responding (%d)\n", smp_processor_id(), 232 "responding (%d)\n", smp_processor_id(),
230 atomic_read(&data.started)); 233 atomic_read(&data.started));
@@ -234,10 +237,9 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
234 } 237 }
235 238
236 if (wait) { 239 if (wait) {
237 timeout = SMP_CALL_TIMEOUT;
238 while (atomic_read(&data.finished) != cpus) { 240 while (atomic_read(&data.finished) != cpus) {
239 HMT_low(); 241 HMT_low();
240 if (--timeout == 0) { 242 if (get_tb() >= timeout) {
241 printk("smp_call_function on cpu %d: other " 243 printk("smp_call_function on cpu %d: other "
242 "cpus not finishing (%d/%d)\n", 244 "cpus not finishing (%d/%d)\n",
243 smp_processor_id(), 245 smp_processor_id(),
@@ -251,7 +253,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
251 253
252 ret = 0; 254 ret = 0;
253 255
254out: 256 out:
255 call_data = NULL; 257 call_data = NULL;
256 HMT_medium(); 258 HMT_medium();
257 spin_unlock(&call_lock); 259 spin_unlock(&call_lock);
@@ -313,8 +315,11 @@ static void __init smp_create_idle(unsigned int cpu)
313 p = fork_idle(cpu); 315 p = fork_idle(cpu);
314 if (IS_ERR(p)) 316 if (IS_ERR(p))
315 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 317 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
318#ifdef CONFIG_PPC64
316 paca[cpu].__current = p; 319 paca[cpu].__current = p;
320#endif
317 current_set[cpu] = p->thread_info; 321 current_set[cpu] = p->thread_info;
322 p->thread_info->cpu = cpu;
318} 323}
319 324
320void __init smp_prepare_cpus(unsigned int max_cpus) 325void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -333,18 +338,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
333 smp_store_cpu_info(boot_cpuid); 338 smp_store_cpu_info(boot_cpuid);
334 cpu_callin_map[boot_cpuid] = 1; 339 cpu_callin_map[boot_cpuid] = 1;
335 340
336#ifndef CONFIG_PPC_ISERIES
337 paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
338
339 /*
340 * Should update do_gtod.stamp_xsec.
341 * For now we leave it which means the time can be some
342 * number of msecs off until someone does a settimeofday()
343 */
344 do_gtod.varp->tb_orig_stamp = tb_last_stamp;
345 systemcfg->tb_orig_stamp = tb_last_stamp;
346#endif
347
348 max_cpus = smp_ops->probe(); 341 max_cpus = smp_ops->probe();
349 342
350 smp_space_timers(max_cpus); 343 smp_space_timers(max_cpus);
@@ -359,8 +352,9 @@ void __devinit smp_prepare_boot_cpu(void)
359 BUG_ON(smp_processor_id() != boot_cpuid); 352 BUG_ON(smp_processor_id() != boot_cpuid);
360 353
361 cpu_set(boot_cpuid, cpu_online_map); 354 cpu_set(boot_cpuid, cpu_online_map);
362 355#ifdef CONFIG_PPC64
363 paca[boot_cpuid].__current = current; 356 paca[boot_cpuid].__current = current;
357#endif
364 current_set[boot_cpuid] = current->thread_info; 358 current_set[boot_cpuid] = current->thread_info;
365} 359}
366 360
@@ -444,13 +438,16 @@ int __devinit __cpu_up(unsigned int cpu)
444{ 438{
445 int c; 439 int c;
446 440
441 secondary_ti = current_set[cpu];
447 if (!cpu_enable(cpu)) 442 if (!cpu_enable(cpu))
448 return 0; 443 return 0;
449 444
450 if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) 445 if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))
451 return -EINVAL; 446 return -EINVAL;
452 447
448#ifdef CONFIG_PPC64
453 paca[cpu].default_decr = tb_ticks_per_jiffy; 449 paca[cpu].default_decr = tb_ticks_per_jiffy;
450#endif
454 451
455 /* Make sure callin-map entry is 0 (can be leftover a CPU 452 /* Make sure callin-map entry is 0 (can be leftover a CPU
456 * hotplug 453 * hotplug
@@ -513,7 +510,7 @@ int __devinit start_secondary(void *unused)
513 current->active_mm = &init_mm; 510 current->active_mm = &init_mm;
514 511
515 smp_store_cpu_info(cpu); 512 smp_store_cpu_info(cpu);
516 set_dec(paca[cpu].default_decr); 513 set_dec(tb_ticks_per_jiffy);
517 cpu_callin_map[cpu] = 1; 514 cpu_callin_map[cpu] = 1;
518 515
519 smp_ops->setup_cpu(cpu); 516 smp_ops->setup_cpu(cpu);
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index 31ee49c25014..bb2315997d45 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -35,43 +35,6 @@
35#include <asm/smp.h> 35#include <asm/smp.h>
36#include <asm/mpic.h> 36#include <asm/mpic.h>
37 37
38extern unsigned long smp_chrp_cpu_nr;
39
40static int __init smp_chrp_probe(void)
41{
42 struct device_node *cpus = NULL;
43 unsigned int *reg;
44 int reglen;
45 int ncpus = 0;
46 int cpuid;
47 unsigned int phys;
48
49 /* Count CPUs in the device-tree */
50 cpuid = 1; /* the boot cpu is logical cpu 0 */
51 while ((cpus = of_find_node_by_type(cpus, "cpu")) != NULL) {
52 phys = ncpus;
53 reg = (unsigned int *) get_property(cpus, "reg", &reglen);
54 if (reg && reglen >= sizeof(unsigned int))
55 /* hmmm, not having a reg property would be bad */
56 phys = *reg;
57 if (phys != boot_cpuid_phys) {
58 set_hard_smp_processor_id(cpuid, phys);
59 ++cpuid;
60 }
61 ++ncpus;
62 }
63
64 printk(KERN_INFO "CHRP SMP probe found %d cpus\n", ncpus);
65
66 /* Nothing more to do if less than 2 of them */
67 if (ncpus <= 1)
68 return 1;
69
70 mpic_request_ipis();
71
72 return ncpus;
73}
74
75static void __devinit smp_chrp_kick_cpu(int nr) 38static void __devinit smp_chrp_kick_cpu(int nr)
76{ 39{
77 *(unsigned long *)KERNELBASE = nr; 40 *(unsigned long *)KERNELBASE = nr;
@@ -114,7 +77,7 @@ void __devinit smp_chrp_take_timebase(void)
114/* CHRP with openpic */ 77/* CHRP with openpic */
115struct smp_ops_t chrp_smp_ops = { 78struct smp_ops_t chrp_smp_ops = {
116 .message_pass = smp_mpic_message_pass, 79 .message_pass = smp_mpic_message_pass,
117 .probe = smp_chrp_probe, 80 .probe = smp_mpic_probe,
118 .kick_cpu = smp_chrp_kick_cpu, 81 .kick_cpu = smp_chrp_kick_cpu,
119 .setup_cpu = smp_chrp_setup_cpu, 82 .setup_cpu = smp_chrp_setup_cpu,
120 .give_timebase = smp_chrp_give_timebase, 83 .give_timebase = smp_chrp_give_timebase,
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index b35346df1e37..c610ca933a25 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -45,7 +45,6 @@ obj-$(CONFIG_MODULES) += module.o
45obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o 45obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
46obj-$(CONFIG_PCI) += pci.o 46obj-$(CONFIG_PCI) += pci.o
47obj-$(CONFIG_KGDB) += ppc-stub.o 47obj-$(CONFIG_KGDB) += ppc-stub.o
48obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
49obj-$(CONFIG_TAU) += temp.o 48obj-$(CONFIG_TAU) += temp.o
50ifndef CONFIG_E200 49ifndef CONFIG_E200
51obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o 50obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index 772e428aaa59..fbb2b9f8922c 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -126,7 +126,7 @@ skip:
126 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 126 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
127 } 127 }
128#endif 128#endif
129#ifdef CONFIG_SMP 129#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
130 /* should this be per processor send/receive? */ 130 /* should this be per processor send/receive? */
131 seq_printf(p, "IPI (recv/sent): %10u/%u\n", 131 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
132 atomic_read(&ipi_recv), atomic_read(&ipi_sent)); 132 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index f597c2954b71..c441aebe7648 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o
33obj-$(CONFIG_KEXEC) += machine_kexec.o 33obj-$(CONFIG_KEXEC) += machine_kexec.o
34obj-$(CONFIG_EEH) += eeh.o 34obj-$(CONFIG_EEH) += eeh.o
35obj-$(CONFIG_PROC_FS) += proc_ppc64.o 35obj-$(CONFIG_PROC_FS) += proc_ppc64.o
36obj-$(CONFIG_SMP) += smp.o
37obj-$(CONFIG_MODULES) += module.o 36obj-$(CONFIG_MODULES) += module.o
38ifneq ($(CONFIG_PPC_MERGE),y) 37ifneq ($(CONFIG_PPC_MERGE),y)
39obj-$(CONFIG_MODULES) += ppc_ksyms.o 38obj-$(CONFIG_MODULES) += ppc_ksyms.o
diff --git a/include/asm-ppc64/smp.h b/include/asm-powerpc/smp.h
index ba0f5c8bbb22..8bcdd0faefea 100644
--- a/include/asm-ppc64/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * smp.h: PPC64 specific SMP code. 2 * smp.h: PowerPC-specific SMP code.
3 * 3 *
4 * Original was a copy of sparc smp.h. Now heavily modified 4 * Original was a copy of sparc smp.h. Now heavily modified
5 * for PPC. 5 * for PPC.
@@ -13,9 +13,9 @@
13 * 2 of the License, or (at your option) any later version. 13 * 2 of the License, or (at your option) any later version.
14 */ 14 */
15 15
16#ifndef _ASM_POWERPC_SMP_H
17#define _ASM_POWERPC_SMP_H
16#ifdef __KERNEL__ 18#ifdef __KERNEL__
17#ifndef _PPC64_SMP_H
18#define _PPC64_SMP_H
19 19
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/threads.h> 21#include <linux/threads.h>
@@ -24,7 +24,9 @@
24 24
25#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
26 26
27#ifdef CONFIG_PPC64
27#include <asm/paca.h> 28#include <asm/paca.h>
29#endif
28 30
29extern int boot_cpuid; 31extern int boot_cpuid;
30extern int boot_cpuid_phys; 32extern int boot_cpuid_phys;
@@ -45,8 +47,19 @@ void generic_cpu_die(unsigned int cpu);
45void generic_mach_cpu_die(void); 47void generic_mach_cpu_die(void);
46#endif 48#endif
47 49
50#ifdef CONFIG_PPC64
48#define raw_smp_processor_id() (get_paca()->paca_index) 51#define raw_smp_processor_id() (get_paca()->paca_index)
49#define hard_smp_processor_id() (get_paca()->hw_cpu_id) 52#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
53#else
54/* 32-bit */
55extern int smp_hw_index[];
56
57#define raw_smp_processor_id() (current_thread_info()->cpu)
58#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
59#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
60#define set_hard_smp_processor_id(cpu, phys)\
61 (smp_hw_index[(cpu)] = (phys))
62#endif
50 63
51extern cpumask_t cpu_sibling_map[NR_CPUS]; 64extern cpumask_t cpu_sibling_map[NR_CPUS];
52 65
@@ -65,21 +78,35 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
65void smp_init_iSeries(void); 78void smp_init_iSeries(void);
66void smp_init_pSeries(void); 79void smp_init_pSeries(void);
67void smp_init_cell(void); 80void smp_init_cell(void);
81void smp_setup_cpu_maps(void);
68 82
69extern int __cpu_disable(void); 83extern int __cpu_disable(void);
70extern void __cpu_die(unsigned int cpu); 84extern void __cpu_die(unsigned int cpu);
85
86#else
87/* for UP */
88#define smp_setup_cpu_maps()
89#define smp_release_cpus()
90
71#endif /* CONFIG_SMP */ 91#endif /* CONFIG_SMP */
72 92
93#ifdef CONFIG_PPC64
73#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id) 94#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id)
74#define set_hard_smp_processor_id(CPU, VAL) \ 95#define set_hard_smp_processor_id(CPU, VAL) \
75 do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0) 96 do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0)
97#else
98/* 32-bit */
99#ifndef CONFIG_SMP
100#define get_hard_smp_processor_id(cpu) boot_cpuid_phys
101#define set_hard_smp_processor_id(cpu, phys)
102#endif
103#endif
76 104
77extern int smt_enabled_at_boot; 105extern int smt_enabled_at_boot;
78 106
79extern int smp_mpic_probe(void); 107extern int smp_mpic_probe(void);
80extern void smp_mpic_setup_cpu(int cpu); 108extern void smp_mpic_setup_cpu(int cpu);
81extern void smp_generic_kick_cpu(int nr); 109extern void smp_generic_kick_cpu(int nr);
82extern void smp_release_cpus(void);
83 110
84extern void smp_generic_give_timebase(void); 111extern void smp_generic_give_timebase(void);
85extern void smp_generic_take_timebase(void); 112extern void smp_generic_take_timebase(void);
@@ -88,5 +115,5 @@ extern struct smp_ops_t *smp_ops;
88 115
89#endif /* __ASSEMBLY__ */ 116#endif /* __ASSEMBLY__ */
90 117
91#endif /* !(_PPC64_SMP_H) */
92#endif /* __KERNEL__ */ 118#endif /* __KERNEL__ */
119#endif /* _ASM_POWERPC_SMP_H) */