aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/Kconfig5
-rw-r--r--arch/mips/fw/arc/init.c8
-rw-r--r--arch/mips/kernel/mips-mt.c1
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/kernel/smp-mt.c193
-rw-r--r--arch/mips/kernel/smp.c23
-rw-r--r--arch/mips/kernel/smtc-proc.c1
-rw-r--r--arch/mips/kernel/smtc.c1
-rw-r--r--arch/mips/mips-boards/generic/init.c8
-rw-r--r--arch/mips/mips-boards/malta/malta_smtc.c66
-rw-r--r--arch/mips/mipssim/Makefile2
-rw-r--r--arch/mips/mipssim/sim_setup.c16
-rw-r--r--arch/mips/mipssim/sim_smtc.c (renamed from arch/mips/mipssim/sim_smp.c)92
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c5
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c149
-rw-r--r--arch/mips/qemu/q-smp.c40
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c109
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c101
-rw-r--r--arch/mips/sibyte/cfe/Makefile1
-rw-r--r--arch/mips/sibyte/cfe/setup.c11
-rw-r--r--arch/mips/sibyte/cfe/smp.c110
-rw-r--r--arch/mips/sibyte/sb1250/smp.c100
-rw-r--r--include/asm-mips/sibyte/sb1250.h2
-rw-r--r--include/asm-mips/smp-ops.h56
-rw-r--r--include/asm-mips/smp.h61
27 files changed, 681 insertions, 485 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8cbdfd255670..b211e7961f28 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1441,6 +1441,7 @@ config MIPS_MT_SMP
1441 select SMP 1441 select SMP
1442 select SYS_SUPPORTS_SCHED_SMT if SMP 1442 select SYS_SUPPORTS_SCHED_SMT if SMP
1443 select SYS_SUPPORTS_SMP 1443 select SYS_SUPPORTS_SMP
1444 select SMP_UP
1444 help 1445 help
1445 This is a kernel model which is also known a VSMP or lately 1446 This is a kernel model which is also known a VSMP or lately
1446 has been marketesed into SMVP. 1447 has been marketesed into SMVP.
@@ -1457,6 +1458,7 @@ config MIPS_MT_SMTC
1457 select NR_CPUS_DEFAULT_8 1458 select NR_CPUS_DEFAULT_8
1458 select SMP 1459 select SMP
1459 select SYS_SUPPORTS_SMP 1460 select SYS_SUPPORTS_SMP
1461 select SMP_UP
1460 help 1462 help
1461 This is a kernel model which is known a SMTC or lately has been 1463 This is a kernel model which is known a SMTC or lately has been
1462 marketesed into SMVP. 1464 marketesed into SMVP.
@@ -1735,6 +1737,9 @@ config SMP
1735 1737
1736 If you don't know what to do here, say N. 1738 If you don't know what to do here, say N.
1737 1739
1740config SMP_UP
1741 bool
1742
1738config SYS_SUPPORTS_SMP 1743config SYS_SUPPORTS_SMP
1739 bool 1744 bool
1740 1745
diff --git a/arch/mips/fw/arc/init.c b/arch/mips/fw/arc/init.c
index e2f75b13312f..3ad8788b6eaa 100644
--- a/arch/mips/fw/arc/init.c
+++ b/arch/mips/fw/arc/init.c
@@ -12,6 +12,7 @@
12 12
13#include <asm/bootinfo.h> 13#include <asm/bootinfo.h>
14#include <asm/sgialib.h> 14#include <asm/sgialib.h>
15#include <asm/smp-ops.h>
15 16
16#undef DEBUG_PROM_INIT 17#undef DEBUG_PROM_INIT
17 18
@@ -48,4 +49,11 @@ void __init prom_init(void)
48 ArcRead(0, &c, 1, &cnt); 49 ArcRead(0, &c, 1, &cnt);
49 ArcEnterInteractiveMode(); 50 ArcEnterInteractiveMode();
50#endif 51#endif
52#ifdef CONFIG_SGI_IP27
53 {
54 extern struct plat_smp_ops ip27_smp_ops;
55
56 register_smp_ops(&ip27_smp_ops);
57 }
58#endif
51} 59}
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 3d6b1ec1f328..640fb0cc6e39 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -17,7 +17,6 @@
17#include <asm/system.h> 17#include <asm/system.h>
18#include <asm/hardirq.h> 18#include <asm/hardirq.h>
19#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
20#include <asm/smp.h>
21#include <asm/mipsmtregs.h> 20#include <asm/mipsmtregs.h>
22#include <asm/r4kcache.h> 21#include <asm/r4kcache.h>
23#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 7b4418dd5857..269c252d956f 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <asm/cpu.h> 29#include <asm/cpu.h>
30#include <asm/sections.h> 30#include <asm/sections.h>
31#include <asm/setup.h> 31#include <asm/setup.h>
32#include <asm/smp-ops.h>
32#include <asm/system.h> 33#include <asm/system.h>
33 34
34struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; 35struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
@@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p)
575 arch_mem_init(cmdline_p); 576 arch_mem_init(cmdline_p);
576 577
577 resource_init(); 578 resource_init();
578#ifdef CONFIG_SMP
579 plat_smp_setup(); 579 plat_smp_setup();
580#endif
581} 580}
582 581
583static int __init fpu_disable(char *s) 582static int __init fpu_disable(char *s)
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 2ab0b7eeaa7e..89e6f6aa5166 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -215,72 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
215 write_tc_c0_tchalt(TCHALT_H); 215 write_tc_c0_tchalt(TCHALT_H);
216} 216}
217 217
218/* 218static void vsmp_send_ipi_single(int cpu, unsigned int action)
219 * Common setup before any secondaries are started
220 * Make sure all CPU's are in a sensible state before we boot any of the
221 * secondarys
222 */
223void __init plat_smp_setup(void)
224{ 219{
225 unsigned int mvpconf0, ntc, tc, ncpu = 0; 220 int i;
226 unsigned int nvpe; 221 unsigned long flags;
222 int vpflags;
227 223
228#ifdef CONFIG_MIPS_MT_FPAFF 224 local_irq_save(flags);
229 /* If we have an FPU, enroll ourselves in the FPU-full mask */
230 if (cpu_has_fpu)
231 cpu_set(0, mt_fpu_cpumask);
232#endif /* CONFIG_MIPS_MT_FPAFF */
233 if (!cpu_has_mipsmt)
234 return;
235 225
236 /* disable MT so we can configure */ 226 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
237 dvpe();
238 dmt();
239 227
240 /* Put MVPE's into 'configuration state' */ 228 switch (action) {
241 set_c0_mvpcontrol(MVPCONTROL_VPC); 229 case SMP_CALL_FUNCTION:
230 i = C_SW1;
231 break;
242 232
243 mvpconf0 = read_c0_mvpconf0(); 233 case SMP_RESCHEDULE_YOURSELF:
244 ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; 234 default:
235 i = C_SW0;
236 break;
237 }
245 238
246 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 239 /* 1:1 mapping of vpe and tc... */
247 smp_num_siblings = nvpe; 240 settc(cpu);
241 write_vpe_c0_cause(read_vpe_c0_cause() | i);
242 evpe(vpflags);
248 243
249 /* we'll always have more TC's than VPE's, so loop setting everything 244 local_irq_restore(flags);
250 to a sensible state */ 245}
251 for (tc = 0; tc <= ntc; tc++) {
252 settc(tc);
253 246
254 smp_tc_init(tc, mvpconf0); 247static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
255 ncpu = smp_vpe_init(tc, mvpconf0, ncpu); 248{
256 } 249 unsigned int i;
257 250
258 /* Release config state */ 251 for_each_cpu_mask(i, mask)
259 clear_c0_mvpcontrol(MVPCONTROL_VPC); 252 vsmp_send_ipi_single(i, action);
253}
260 254
261 /* We'll wait until starting the secondaries before starting MVPE */ 255static void __cpuinit vsmp_init_secondary(void)
256{
257 /* Enable per-cpu interrupts */
262 258
263 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); 259 /* This is Malta specific: IPI,performance and timer inetrrupts */
260 write_c0_status((read_c0_status() & ~ST0_IM ) |
261 (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
264} 262}
265 263
266void __init plat_prepare_cpus(unsigned int max_cpus) 264static void __cpuinit vsmp_smp_finish(void)
267{ 265{
268 mips_mt_set_cpuoptions(); 266 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
269
270 /* set up ipi interrupts */
271 if (cpu_has_vint) {
272 set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
273 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
274 }
275 267
276 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; 268#ifdef CONFIG_MIPS_MT_FPAFF
277 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; 269 /* If we have an FPU, enroll ourselves in the FPU-full mask */
270 if (cpu_has_fpu)
271 cpu_set(smp_processor_id(), mt_fpu_cpumask);
272#endif /* CONFIG_MIPS_MT_FPAFF */
278 273
279 setup_irq(cpu_ipi_resched_irq, &irq_resched); 274 local_irq_enable();
280 setup_irq(cpu_ipi_call_irq, &irq_call); 275}
281 276
282 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); 277static void vsmp_cpus_done(void)
283 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); 278{
284} 279}
285 280
286/* 281/*
@@ -291,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
291 * (unsigned long)idle->thread_info the gp 286 * (unsigned long)idle->thread_info the gp
292 * assumes a 1:1 mapping of TC => VPE 287 * assumes a 1:1 mapping of TC => VPE
293 */ 288 */
294void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 289static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
295{ 290{
296 struct thread_info *gp = task_thread_info(idle); 291 struct thread_info *gp = task_thread_info(idle);
297 dvpe(); 292 dvpe();
@@ -325,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
325 evpe(EVPE_ENABLE); 320 evpe(EVPE_ENABLE);
326} 321}
327 322
328void __cpuinit prom_init_secondary(void) 323/*
329{ 324 * Common setup before any secondaries are started
330 /* Enable per-cpu interrupts */ 325 * Make sure all CPU's are in a sensible state before we boot any of the
331 326 * secondarys
332 /* This is Malta specific: IPI,performance and timer inetrrupts */ 327 */
333 write_c0_status((read_c0_status() & ~ST0_IM ) | 328static void __init vsmp_smp_setup(void)
334 (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
335}
336
337void __cpuinit prom_smp_finish(void)
338{ 329{
339 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 330 unsigned int mvpconf0, ntc, tc, ncpu = 0;
331 unsigned int nvpe;
340 332
341#ifdef CONFIG_MIPS_MT_FPAFF 333#ifdef CONFIG_MIPS_MT_FPAFF
342 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 334 /* If we have an FPU, enroll ourselves in the FPU-full mask */
343 if (cpu_has_fpu) 335 if (cpu_has_fpu)
344 cpu_set(smp_processor_id(), mt_fpu_cpumask); 336 cpu_set(0, mt_fpu_cpumask);
345#endif /* CONFIG_MIPS_MT_FPAFF */ 337#endif /* CONFIG_MIPS_MT_FPAFF */
338 if (!cpu_has_mipsmt)
339 return;
346 340
347 local_irq_enable(); 341 /* disable MT so we can configure */
348} 342 dvpe();
343 dmt();
349 344
350void prom_cpus_done(void) 345 /* Put MVPE's into 'configuration state' */
351{ 346 set_c0_mvpcontrol(MVPCONTROL_VPC);
352}
353 347
354void core_send_ipi(int cpu, unsigned int action) 348 mvpconf0 = read_c0_mvpconf0();
355{ 349 ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
356 int i;
357 unsigned long flags;
358 int vpflags;
359 350
360 local_irq_save(flags); 351 nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
352 smp_num_siblings = nvpe;
361 353
362 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ 354 /* we'll always have more TC's than VPE's, so loop setting everything
355 to a sensible state */
356 for (tc = 0; tc <= ntc; tc++) {
357 settc(tc);
363 358
364 switch (action) { 359 smp_tc_init(tc, mvpconf0);
365 case SMP_CALL_FUNCTION: 360 ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
366 i = C_SW1; 361 }
367 break;
368 362
369 case SMP_RESCHEDULE_YOURSELF: 363 /* Release config state */
370 default: 364 clear_c0_mvpcontrol(MVPCONTROL_VPC);
371 i = C_SW0; 365
372 break; 366 /* We'll wait until starting the secondaries before starting MVPE */
367
368 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
369}
370
371static void __init vsmp_prepare_cpus(unsigned int max_cpus)
372{
373 mips_mt_set_cpuoptions();
374
375 /* set up ipi interrupts */
376 if (cpu_has_vint) {
377 set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
378 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
373 } 379 }
374 380
375 /* 1:1 mapping of vpe and tc... */ 381 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
376 settc(cpu); 382 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
377 write_vpe_c0_cause(read_vpe_c0_cause() | i);
378 evpe(vpflags);
379 383
380 local_irq_restore(flags); 384 setup_irq(cpu_ipi_resched_irq, &irq_resched);
385 setup_irq(cpu_ipi_call_irq, &irq_call);
386
387 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
388 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
381} 389}
390
391struct plat_smp_ops vsmp_smp_ops = {
392 .send_ipi_single = vsmp_send_ipi_single,
393 .send_ipi_mask = vsmp_send_ipi_mask,
394 .init_secondary = vsmp_init_secondary,
395 .smp_finish = vsmp_smp_finish,
396 .cpus_done = vsmp_cpus_done,
397 .boot_secondary = vsmp_boot_secondary,
398 .smp_setup = vsmp_smp_setup,
399 .prepare_cpus = vsmp_prepare_cpus,
400};
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 335be9bcf0dc..1e5dfc28294a 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -37,7 +37,6 @@
37#include <asm/processor.h> 37#include <asm/processor.h>
38#include <asm/system.h> 38#include <asm/system.h>
39#include <asm/mmu_context.h> 39#include <asm/mmu_context.h>
40#include <asm/smp.h>
41#include <asm/time.h> 40#include <asm/time.h>
42 41
43#ifdef CONFIG_MIPS_MT_SMTC 42#ifdef CONFIG_MIPS_MT_SMTC
@@ -84,6 +83,16 @@ static inline void set_cpu_sibling_map(int cpu)
84 cpu_set(cpu, cpu_sibling_map[cpu]); 83 cpu_set(cpu, cpu_sibling_map[cpu]);
85} 84}
86 85
86struct plat_smp_ops *mp_ops;
87
88__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
89{
90 if (ops)
91 printk(KERN_WARNING "Overriding previous set SMP ops\n");
92
93 mp_ops = ops;
94}
95
87/* 96/*
88 * First C code run on the secondary CPUs after being started up by 97 * First C code run on the secondary CPUs after being started up by
89 * the master. 98 * the master.
@@ -100,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void)
100 cpu_report(); 109 cpu_report();
101 per_cpu_trap_init(); 110 per_cpu_trap_init();
102 mips_clockevent_init(); 111 mips_clockevent_init();
103 prom_init_secondary(); 112 mp_ops->init_secondary();
104 113
105 /* 114 /*
106 * XXX parity protection should be folded in here when it's converted 115 * XXX parity protection should be folded in here when it's converted
@@ -112,7 +121,7 @@ asmlinkage __cpuinit void start_secondary(void)
112 cpu = smp_processor_id(); 121 cpu = smp_processor_id();
113 cpu_data[cpu].udelay_val = loops_per_jiffy; 122 cpu_data[cpu].udelay_val = loops_per_jiffy;
114 123
115 prom_smp_finish(); 124 mp_ops->smp_finish();
116 set_cpu_sibling_map(cpu); 125 set_cpu_sibling_map(cpu);
117 126
118 cpu_set(cpu, cpu_callin_map); 127 cpu_set(cpu, cpu_callin_map);
@@ -184,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
184 smp_mb(); 193 smp_mb();
185 194
186 /* Send a message to all other CPUs and wait for them to respond */ 195 /* Send a message to all other CPUs and wait for them to respond */
187 core_send_ipi_mask(mask, SMP_CALL_FUNCTION); 196 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
188 197
189 /* Wait for response */ 198 /* Wait for response */
190 /* FIXME: lock-up detection, backtrace on lock-up */ 199 /* FIXME: lock-up detection, backtrace on lock-up */
@@ -278,7 +287,7 @@ void smp_send_stop(void)
278 287
279void __init smp_cpus_done(unsigned int max_cpus) 288void __init smp_cpus_done(unsigned int max_cpus)
280{ 289{
281 prom_cpus_done(); 290 mp_ops->cpus_done();
282} 291}
283 292
284/* called from main before smp_init() */ 293/* called from main before smp_init() */
@@ -286,7 +295,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
286{ 295{
287 init_new_context(current, &init_mm); 296 init_new_context(current, &init_mm);
288 current_thread_info()->cpu = 0; 297 current_thread_info()->cpu = 0;
289 plat_prepare_cpus(max_cpus); 298 mp_ops->prepare_cpus(max_cpus);
290 set_cpu_sibling_map(0); 299 set_cpu_sibling_map(0);
291#ifndef CONFIG_HOTPLUG_CPU 300#ifndef CONFIG_HOTPLUG_CPU
292 cpu_present_map = cpu_possible_map; 301 cpu_present_map = cpu_possible_map;
@@ -325,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
325 if (IS_ERR(idle)) 334 if (IS_ERR(idle))
326 panic(KERN_ERR "Fork failed for CPU %d", cpu); 335 panic(KERN_ERR "Fork failed for CPU %d", cpu);
327 336
328 prom_boot_secondary(cpu, idle); 337 mp_ops->boot_secondary(cpu, idle);
329 338
330 /* 339 /*
331 * Trust is futile. We should really have timeouts ... 340 * Trust is futile. We should really have timeouts ...
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
index 6f3709996172..fe256559c997 100644
--- a/arch/mips/kernel/smtc-proc.c
+++ b/arch/mips/kernel/smtc-proc.c
@@ -14,7 +14,6 @@
14#include <asm/system.h> 14#include <asm/system.h>
15#include <asm/hardirq.h> 15#include <asm/hardirq.h>
16#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/smp.h>
18#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
19#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
20#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 9c92d42996cb..85f700e58131 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -16,7 +16,6 @@
16#include <asm/hazards.h> 16#include <asm/hazards.h>
17#include <asm/irq.h> 17#include <asm/irq.h>
18#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
19#include <asm/smp.h>
20#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
21#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
22#include <asm/time.h> 21#include <asm/time.h>
diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c
index 30f1f54cb68b..1695dca5506b 100644
--- a/arch/mips/mips-boards/generic/init.c
+++ b/arch/mips/mips-boards/generic/init.c
@@ -250,6 +250,8 @@ void __init mips_ejtag_setup(void)
250 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); 250 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
251} 251}
252 252
253extern struct plat_smp_ops msmtc_smp_ops;
254
253void __init prom_init(void) 255void __init prom_init(void)
254{ 256{
255 prom_argc = fw_arg0; 257 prom_argc = fw_arg0;
@@ -416,4 +418,10 @@ void __init prom_init(void)
416#ifdef CONFIG_SERIAL_8250_CONSOLE 418#ifdef CONFIG_SERIAL_8250_CONSOLE
417 console_config(); 419 console_config();
418#endif 420#endif
421#ifdef CONFIG_MIPS_MT_SMP
422 register_smp_ops(&vsmp_smp_ops);
423#endif
424#ifdef CONFIG_MIPS_MT_SMTC
425 register_smp_ops(&msmtc_smp_ops);
426#endif
419} 427}
diff --git a/arch/mips/mips-boards/malta/malta_smtc.c b/arch/mips/mips-boards/malta/malta_smtc.c
index 5c980f4a48fe..6f051ca243fa 100644
--- a/arch/mips/mips-boards/malta/malta_smtc.c
+++ b/arch/mips/mips-boards/malta/malta_smtc.c
@@ -15,26 +15,24 @@
15 * Cause the specified action to be performed on a targeted "CPU" 15 * Cause the specified action to be performed on a targeted "CPU"
16 */ 16 */
17 17
18void core_send_ipi(int cpu, unsigned int action) 18static void msmtc_send_ipi_single(int cpu, unsigned int action)
19{ 19{
20 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 20 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 21 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
22} 22}
23 23
24/* 24static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
25 * Platform "CPU" startup hook
26 */
27
28void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
29{ 25{
30 smtc_boot_secondary(cpu, idle); 26 unsigned int i;
27
28 for_each_cpu_mask(i, mask)
29 msmtc_send_ipi_single(i, action);
31} 30}
32 31
33/* 32/*
34 * Post-config but pre-boot cleanup entry point 33 * Post-config but pre-boot cleanup entry point
35 */ 34 */
36 35static void __cpuinit msmtc_init_secondary(void)
37void __cpuinit prom_init_secondary(void)
38{ 36{
39 void smtc_init_secondary(void); 37 void smtc_init_secondary(void);
40 int myvpe; 38 int myvpe;
@@ -50,45 +48,61 @@ void __cpuinit prom_init_secondary(void)
50 set_c0_status(0x100 << cp0_perfcount_irq); 48 set_c0_status(0x100 << cp0_perfcount_irq);
51 } 49 }
52 50
53 smtc_init_secondary(); 51 smtc_init_secondary();
54} 52}
55 53
56/* 54/*
57 * Platform SMP pre-initialization 55 * Platform "CPU" startup hook
58 *
59 * As noted above, we can assume a single CPU for now
60 * but it may be multithreaded.
61 */ 56 */
62 57static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
63void __cpuinit plat_smp_setup(void)
64{ 58{
65 if (read_c0_config3() & (1<<2)) 59 smtc_boot_secondary(cpu, idle);
66 mipsmt_build_cpu_map(0);
67} 60}
68 61
69void __init plat_prepare_cpus(unsigned int max_cpus) 62/*
63 * SMP initialization finalization entry point
64 */
65static void __cpuinit msmtc_smp_finish(void)
70{ 66{
71 if (read_c0_config3() & (1<<2)) 67 smtc_smp_finish();
72 mipsmt_prepare_cpus();
73} 68}
74 69
75/* 70/*
76 * SMP initialization finalization entry point 71 * Hook for after all CPUs are online
77 */ 72 */
78 73
79void __cpuinit prom_smp_finish(void) 74static void msmtc_cpus_done(void)
80{ 75{
81 smtc_smp_finish();
82} 76}
83 77
84/* 78/*
85 * Hook for after all CPUs are online 79 * Platform SMP pre-initialization
80 *
81 * As noted above, we can assume a single CPU for now
82 * but it may be multithreaded.
86 */ 83 */
87 84
88void prom_cpus_done(void) 85static void __init msmtc_smp_setup(void)
89{ 86{
87 mipsmt_build_cpu_map(0);
90} 88}
91 89
90static void __init msmtc_prepare_cpus(unsigned int max_cpus)
91{
92 mipsmt_prepare_cpus();
93}
94
95struct plat_smp_ops msmtc_smp_ops = {
96 .send_ipi_single = msmtc_send_ipi_single,
97 .send_ipi_mask = msmtc_send_ipi_mask,
98 .init_secondary = msmtc_init_secondary,
99 .smp_finish = msmtc_smp_finish,
100 .cpus_done = msmtc_cpus_done,
101 .boot_secondary = msmtc_boot_secondary,
102 .smp_setup = msmtc_smp_setup,
103 .prepare_cpus = msmtc_prepare_cpus,
104};
105
92#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 106#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
93/* 107/*
94 * IRQ affinity hook 108 * IRQ affinity hook
diff --git a/arch/mips/mipssim/Makefile b/arch/mips/mipssim/Makefile
index 75568b584df4..57f43c1c7882 100644
--- a/arch/mips/mipssim/Makefile
+++ b/arch/mips/mipssim/Makefile
@@ -21,6 +21,6 @@ obj-y := sim_platform.o sim_setup.o sim_mem.o sim_time.o sim_int.o \
21 sim_cmdline.o 21 sim_cmdline.o
22 22
23obj-$(CONFIG_EARLY_PRINTK) += sim_console.o 23obj-$(CONFIG_EARLY_PRINTK) += sim_console.o
24obj-$(CONFIG_SMP) += sim_smp.o 24obj-$(CONFIG_MIPS_MT_SMTC) += sim_smtc.o
25 25
26EXTRA_CFLAGS += -Werror 26EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mipssim/sim_setup.c b/arch/mips/mipssim/sim_setup.c
index 452c129d02c1..d49fe73426b7 100644
--- a/arch/mips/mipssim/sim_setup.c
+++ b/arch/mips/mipssim/sim_setup.c
@@ -60,6 +60,8 @@ void __init plat_mem_setup(void)
60#endif 60#endif
61} 61}
62 62
63extern struct plat_smp_ops ssmtc_smp_ops;
64
63void __init prom_init(void) 65void __init prom_init(void)
64{ 66{
65 set_io_port_base(0xbfd00000); 67 set_io_port_base(0xbfd00000);
@@ -67,8 +69,20 @@ void __init prom_init(void)
67 pr_info("\nLINUX started...\n"); 69 pr_info("\nLINUX started...\n");
68 prom_init_cmdline(); 70 prom_init_cmdline();
69 prom_meminit(); 71 prom_meminit();
70}
71 72
73#ifdef CONFIG_MIPS_MT_SMP
74 if (cpu_has_mipsmt)
75 register_smp_ops(&vsmp_smp_ops);
76 else
77 register_smp_ops(&up_smp_ops);
78#endif
79#ifdef CONFIG_MIPS_MT_SMTC
80 if (cpu_has_mipsmt)
81 register_smp_ops(&ssmtc_smp_ops);
82 else
83 register_smp_ops(&up_smp_ops);
84#endif
85}
72 86
73static void __init serial_init(void) 87static void __init serial_init(void)
74{ 88{
diff --git a/arch/mips/mipssim/sim_smp.c b/arch/mips/mipssim/sim_smtc.c
index ccbbccac23ef..d6e4f656ad14 100644
--- a/arch/mips/mipssim/sim_smp.c
+++ b/arch/mips/mipssim/sim_smtc.c
@@ -16,7 +16,7 @@
16 * 16 *
17 */ 17 */
18/* 18/*
19 * Simulator Platform-specific hooks for SMP operation 19 * Simulator Platform-specific hooks for SMTC operation
20 */ 20 */
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
@@ -29,65 +29,72 @@
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
32#ifdef CONFIG_MIPS_MT_SMTC
33#include <asm/smtc_ipi.h> 32#include <asm/smtc_ipi.h>
34#endif /* CONFIG_MIPS_MT_SMTC */
35 33
36/* VPE/SMP Prototype implements platform interfaces directly */ 34/* VPE/SMP Prototype implements platform interfaces directly */
37#if !defined(CONFIG_MIPS_MT_SMP)
38 35
39/* 36/*
40 * Cause the specified action to be performed on a targeted "CPU" 37 * Cause the specified action to be performed on a targeted "CPU"
41 */ 38 */
42 39
43void core_send_ipi(int cpu, unsigned int action) 40static void ssmtc_send_ipi_single(int cpu, unsigned int action)
44{ 41{
45#ifdef CONFIG_MIPS_MT_SMTC
46 smtc_send_ipi(cpu, LINUX_SMP_IPI, action); 42 smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
47#endif /* CONFIG_MIPS_MT_SMTC */ 43 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
48/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ 44}
45
46static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
47{
48 unsigned int i;
49 49
50 for_each_cpu_mask(i, mask)
51 ssmtc_send_ipi_single(i, action);
50} 52}
51 53
52/* 54/*
53 * Platform "CPU" startup hook 55 * Post-config but pre-boot cleanup entry point
54 */ 56 */
55 57static void __cpuinit ssmtc_init_secondary(void)
56void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
57{ 58{
58#ifdef CONFIG_MIPS_MT_SMTC 59 void smtc_init_secondary(void);
59 smtc_boot_secondary(cpu, idle); 60
60#endif /* CONFIG_MIPS_MT_SMTC */ 61 smtc_init_secondary();
61} 62}
62 63
63/* 64/*
64 * Post-config but pre-boot cleanup entry point 65 * SMP initialization finalization entry point
65 */ 66 */
67static void __cpuinit ssmtc_smp_finish(void)
68{
69 smtc_smp_finish();
70}
66 71
67void __cpuinit prom_init_secondary(void) 72/*
73 * Hook for after all CPUs are online
74 */
75static void ssmtc_cpus_done(void)
68{ 76{
69#ifdef CONFIG_MIPS_MT_SMTC 77}
70 void smtc_init_secondary(void);
71 78
72 smtc_init_secondary(); 79/*
73#endif /* CONFIG_MIPS_MT_SMTC */ 80 * Platform "CPU" startup hook
81 */
82static void __cpuinit ssmtc_boot_secondary(int cpu, struct task_struct *idle)
83{
84 smtc_boot_secondary(cpu, idle);
74} 85}
75 86
76void plat_smp_setup(void) 87static void __init ssmtc_smp_setup(void)
77{ 88{
78#ifdef CONFIG_MIPS_MT_SMTC
79 if (read_c0_config3() & (1 << 2)) 89 if (read_c0_config3() & (1 << 2))
80 mipsmt_build_cpu_map(0); 90 mipsmt_build_cpu_map(0);
81#endif /* CONFIG_MIPS_MT_SMTC */
82} 91}
83 92
84/* 93/*
85 * Platform SMP pre-initialization 94 * Platform SMP pre-initialization
86 */ 95 */
87 96static void ssmtc_prepare_cpus(unsigned int max_cpus)
88void plat_prepare_cpus(unsigned int max_cpus)
89{ 97{
90#ifdef CONFIG_MIPS_MT_SMTC
91 /* 98 /*
92 * As noted above, we can assume a single CPU for now 99 * As noted above, we can assume a single CPU for now
93 * but it may be multithreaded. 100 * but it may be multithreaded.
@@ -96,28 +103,15 @@ void plat_prepare_cpus(unsigned int max_cpus)
96 if (read_c0_config3() & (1 << 2)) { 103 if (read_c0_config3() & (1 << 2)) {
97 mipsmt_prepare_cpus(); 104 mipsmt_prepare_cpus();
98 } 105 }
99#endif /* CONFIG_MIPS_MT_SMTC */
100} 106}
101 107
102/* 108struct plat_smp_ops ssmtc_smp_ops = {
103 * SMP initialization finalization entry point 109 .send_ipi_single = ssmtc_send_ipi_single,
104 */ 110 .send_ipi_mask = ssmtc_send_ipi_mask,
105 111 .init_secondary = ssmtc_init_secondary,
106void __cpuinit prom_smp_finish(void) 112 .smp_finish = ssmtc_smp_finish,
107{ 113 .cpus_done = ssmtc_cpus_done,
108#ifdef CONFIG_MIPS_MT_SMTC 114 .boot_secondary = ssmtc_boot_secondary,
109 smtc_smp_finish(); 115 .smp_setup = ssmtc_smp_setup,
110#endif /* CONFIG_MIPS_MT_SMTC */ 116 .prepare_cpus = ssmtc_prepare_cpus,
111} 117};
112
113/*
114 * Hook for after all CPUs are online
115 */
116
117void prom_cpus_done(void)
118{
119#ifdef CONFIG_MIPS_MT_SMTC
120
121#endif /* CONFIG_MIPS_MT_SMTC */
122}
123#endif /* CONFIG_MIPS32R2_MT_SMP */
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 96d3ff051d3d..35dc435846a6 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -19,6 +19,7 @@
19#include <asm/pgtable.h> 19#include <asm/pgtable.h>
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <asm/reboot.h> 21#include <asm/reboot.h>
22#include <asm/smp-ops.h>
22#include <asm/system.h> 23#include <asm/system.h>
23#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
24#include <asm/pmon.h> 25#include <asm/pmon.h>
@@ -78,6 +79,8 @@ static void prom_halt(void)
78 __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0"); 79 __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0");
79} 80}
80 81
82extern struct plat_smp_ops yos_smp_ops;
83
81/* 84/*
82 * Init routine which accepts the variables from PMON 85 * Init routine which accepts the variables from PMON
83 */ 86 */
@@ -127,6 +130,8 @@ void __init prom_init(void)
127 } 130 }
128 131
129 prom_grab_secondary(); 132 prom_grab_secondary();
133
134 register_smp_ops(&yos_smp_ops);
130} 135}
131 136
132void __init prom_free_prom_memory(void) 137void __init prom_free_prom_memory(void)
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index b0f12cd2968a..653f3ec61cab 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -42,70 +42,6 @@ void __init prom_grab_secondary(void)
42 launchstack + LAUNCHSTACK_SIZE, 0); 42 launchstack + LAUNCHSTACK_SIZE, 0);
43} 43}
44 44
45/*
46 * Detect available CPUs, populate phys_cpu_present_map before smp_init
47 *
48 * We don't want to start the secondary CPU yet nor do we have a nice probing
49 * feature in PMON so we just assume presence of the secondary core.
50 */
51void __init plat_smp_setup(void)
52{
53 int i;
54
55 cpus_clear(phys_cpu_present_map);
56
57 for (i = 0; i < 2; i++) {
58 cpu_set(i, phys_cpu_present_map);
59 __cpu_number_map[i] = i;
60 __cpu_logical_map[i] = i;
61 }
62}
63
64void __init plat_prepare_cpus(unsigned int max_cpus)
65{
66 /*
67 * Be paranoid. Enable the IPI only if we're really about to go SMP.
68 */
69 if (cpus_weight(cpu_possible_map))
70 set_c0_status(STATUSF_IP5);
71}
72
73/*
74 * Firmware CPU startup hook
75 * Complicated by PMON's weird interface which tries to minimic the UNIX fork.
76 * It launches the next * available CPU and copies some information on the
77 * stack so the first thing we do is throw away that stuff and load useful
78 * values into the registers ...
79 */
80void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
81{
82 unsigned long gp = (unsigned long) task_thread_info(idle);
83 unsigned long sp = __KSTK_TOS(idle);
84
85 secondary_sp = sp;
86 secondary_gp = gp;
87
88 spin_unlock(&launch_lock);
89}
90
91/* Hook for after all CPUs are online */
92void prom_cpus_done(void)
93{
94}
95
96/*
97 * After we've done initial boot, this function is called to allow the
98 * board code to clean up state, if needed
99 */
100void __cpuinit prom_init_secondary(void)
101{
102 set_c0_status(ST0_CO | ST0_IE | ST0_IM);
103}
104
105void __cpuinit prom_smp_finish(void)
106{
107}
108
109void titan_mailbox_irq(void) 45void titan_mailbox_irq(void)
110{ 46{
111 int cpu = smp_processor_id(); 47 int cpu = smp_processor_id();
@@ -133,7 +69,7 @@ void titan_mailbox_irq(void)
133/* 69/*
134 * Send inter-processor interrupt 70 * Send inter-processor interrupt
135 */ 71 */
136void core_send_ipi(int cpu, unsigned int action) 72static void yos_send_ipi_single(int cpu, unsigned int action)
137{ 73{
138 /* 74 /*
139 * Generate an INTMSG so that it can be sent over to the 75 * Generate an INTMSG so that it can be sent over to the
@@ -159,3 +95,86 @@ void core_send_ipi(int cpu, unsigned int action)
159 break; 95 break;
160 } 96 }
161} 97}
98
99static void yos_send_ipi_mask(cpumask_t mask, unsigned int action)
100{
101 unsigned int i;
102
103 for_each_cpu_mask(i, mask)
104 yos_send_ipi_single(i, action);
105}
106
107/*
108 * After we've done initial boot, this function is called to allow the
109 * board code to clean up state, if needed
110 */
111static void __cpuinit yos_init_secondary(void)
112{
113 set_c0_status(ST0_CO | ST0_IE | ST0_IM);
114}
115
116static void __cpuinit yos_smp_finish(void)
117{
118}
119
120/* Hook for after all CPUs are online */
121static void yos_cpus_done(void)
122{
123}
124
125/*
126 * Firmware CPU startup hook
127 * Complicated by PMON's weird interface which tries to minimic the UNIX fork.
128 * It launches the next * available CPU and copies some information on the
129 * stack so the first thing we do is throw away that stuff and load useful
130 * values into the registers ...
131 */
132static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
133{
134 unsigned long gp = (unsigned long) task_thread_info(idle);
135 unsigned long sp = __KSTK_TOS(idle);
136
137 secondary_sp = sp;
138 secondary_gp = gp;
139
140 spin_unlock(&launch_lock);
141}
142
143/*
144 * Detect available CPUs, populate phys_cpu_present_map before smp_init
145 *
146 * We don't want to start the secondary CPU yet nor do we have a nice probing
147 * feature in PMON so we just assume presence of the secondary core.
148 */
149static void __init yos_smp_setup(void)
150{
151 int i;
152
153 cpus_clear(phys_cpu_present_map);
154
155 for (i = 0; i < 2; i++) {
156 cpu_set(i, phys_cpu_present_map);
157 __cpu_number_map[i] = i;
158 __cpu_logical_map[i] = i;
159 }
160}
161
162static void __init yos_prepare_cpus(unsigned int max_cpus)
163{
164 /*
165 * Be paranoid. Enable the IPI only if we're really about to go SMP.
166 */
167 if (cpus_weight(cpu_possible_map))
168 set_c0_status(STATUSF_IP5);
169}
170
171struct plat_smp_ops yos_smp_ops = {
172 .send_ipi_single = yos_send_ipi_single,
173 .send_ipi_mask = yos_send_ipi_mask,
174 .init_secondary = yos_init_secondary,
175 .smp_finish = yos_smp_finish,
176 .cpus_done = yos_cpus_done,
177 .boot_secondary = yos_boot_secondary,
178 .smp_setup = yos_smp_setup,
179 .prepare_cpus = yos_prepare_cpus,
180};
diff --git a/arch/mips/qemu/q-smp.c b/arch/mips/qemu/q-smp.c
index 4b0178d0df0b..ead6c30eeb14 100644
--- a/arch/mips/qemu/q-smp.c
+++ b/arch/mips/qemu/q-smp.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) 6 * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * 7 *
8 * Symmetric Uniprocessor (TM) Support 8 * Symmetric Uniprocessor (TM) Support
9 */ 9 */
@@ -13,43 +13,55 @@
13/* 13/*
14 * Send inter-processor interrupt 14 * Send inter-processor interrupt
15 */ 15 */
16void core_send_ipi(int cpu, unsigned int action) 16void up_send_ipi_single(int cpu, unsigned int action)
17{ 17{
18 panic(KERN_ERR "%s called", __FUNCTION__); 18 panic(KERN_ERR "%s called", __func__);
19}
20
21static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
22{
23 panic(KERN_ERR "%s called", __func__);
19} 24}
20 25
21/* 26/*
22 * After we've done initial boot, this function is called to allow the 27 * After we've done initial boot, this function is called to allow the
23 * board code to clean up state, if needed 28 * board code to clean up state, if needed
24 */ 29 */
25void __cpuinit prom_init_secondary(void) 30void __cpuinit up_init_secondary(void)
26{ 31{
27} 32}
28 33
29void __cpuinit prom_smp_finish(void) 34void __cpuinit up_smp_finish(void)
30{ 35{
31} 36}
32 37
33/* Hook for after all CPUs are online */ 38/* Hook for after all CPUs are online */
34void prom_cpus_done(void) 39void up_cpus_done(void)
35{
36}
37
38void __init prom_prepare_cpus(unsigned int max_cpus)
39{ 40{
40 cpus_clear(phys_cpu_present_map);
41} 41}
42 42
43/* 43/*
44 * Firmware CPU startup hook 44 * Firmware CPU startup hook
45 */ 45 */
46void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 46void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle)
47{ 47{
48} 48}
49 49
50void __init plat_smp_setup(void) 50void __init up_smp_setup(void)
51{ 51{
52} 52}
53void __init plat_prepare_cpus(unsigned int max_cpus) 53
54void __init up_prepare_cpus(unsigned int max_cpus)
54{ 55{
55} 56}
57
58struct plat_smp_ops up_smp_ops = {
59 .send_ipi_single = up_send_ipi_single,
60 .send_ipi_mask = up_send_ipi_mask,
61 .init_secondary = up_init_secondary,
62 .smp_finish = up_smp_finish,
63 .cpus_done = up_cpus_done,
64 .boot_secondary = up_boot_secondary,
65 .smp_setup = up_smp_setup,
66 .prepare_cpus = up_prepare_cpus,
67};
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 3305fa9ae66d..a49e7c85f724 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -27,7 +27,6 @@
27#include <asm/sn/hub.h> 27#include <asm/sn/hub.h>
28#include <asm/sn/intr.h> 28#include <asm/sn/intr.h>
29#include <asm/current.h> 29#include <asm/current.h>
30#include <asm/smp.h>
31#include <asm/processor.h> 30#include <asm/processor.h>
32#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
33#include <asm/thread_info.h> 32#include <asm/thread_info.h>
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index f10d9839006d..48932ce1d730 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -11,7 +11,6 @@
11 11
12#include <asm/page.h> 12#include <asm/page.h>
13#include <asm/sections.h> 13#include <asm/sections.h>
14#include <asm/smp.h>
15#include <asm/sn/types.h> 14#include <asm/sn/types.h>
16#include <asm/sn/arch.h> 15#include <asm/sn/arch.h>
17#include <asm/sn/gda.h> 16#include <asm/sn/gda.h>
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index a70656d42191..f15fc93d6b35 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -140,30 +140,51 @@ static __init void intr_clear_all(nasid_t nasid)
140 REMOTE_HUB_CLR_INTR(nasid, i); 140 REMOTE_HUB_CLR_INTR(nasid, i);
141} 141}
142 142
143void __init plat_smp_setup(void) 143static void ip27_send_ipi_single(int destid, unsigned int action)
144{ 144{
145 cnodeid_t cnode; 145 int irq;
146 146
147 for_each_online_node(cnode) { 147 switch (action) {
148 if (cnode == 0) 148 case SMP_RESCHEDULE_YOURSELF:
149 continue; 149 irq = CPU_RESCHED_A_IRQ;
150 intr_clear_all(COMPACT_TO_NASID_NODEID(cnode)); 150 break;
151 case SMP_CALL_FUNCTION:
152 irq = CPU_CALL_A_IRQ;
153 break;
154 default:
155 panic("sendintr");
151 } 156 }
152 157
153 replicate_kernel_text(); 158 irq += cputoslice(destid);
154 159
155 /* 160 /*
156 * Assumption to be fixed: we're always booted on logical / physical 161 * Convert the compact hub number to the NASID to get the correct
157 * processor 0. While we're always running on logical processor 0 162 * part of the address space. Then set the interrupt bit associated
158 * this still means this is physical processor zero; it might for 163 * with the CPU we want to send the interrupt to.
159 * example be disabled in the firwware.
160 */ 164 */
161 alloc_cpupda(0, 0); 165 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
162} 166}
163 167
164void __init plat_prepare_cpus(unsigned int max_cpus) 168static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
169{
170 unsigned int i;
171
172 for_each_cpu_mask(i, mask)
173 ip27_send_ipi_single(i, action);
174}
175
176static void __cpuinit ip27_init_secondary(void)
177{
178 per_cpu_init();
179 local_irq_enable();
180}
181
182static void __cpuinit ip27_smp_finish(void)
183{
184}
185
186static void __init ip27_cpus_done(void)
165{ 187{
166 /* We already did everything necessary earlier */
167} 188}
168 189
169/* 190/*
@@ -171,7 +192,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
171 * set sp to the kernel stack of the newly created idle process, gp to the proc 192 * set sp to the kernel stack of the newly created idle process, gp to the proc
172 * struct so that current_thread_info() will work. 193 * struct so that current_thread_info() will work.
173 */ 194 */
174void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 195static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle)
175{ 196{
176 unsigned long gp = (unsigned long)task_thread_info(idle); 197 unsigned long gp = (unsigned long)task_thread_info(idle);
177 unsigned long sp = __KSTK_TOS(idle); 198 unsigned long sp = __KSTK_TOS(idle);
@@ -181,41 +202,39 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
181 0, (void *) sp, (void *) gp); 202 0, (void *) sp, (void *) gp);
182} 203}
183 204
184void __cpuinit prom_init_secondary(void) 205static void __init ip27_smp_setup(void)
185{ 206{
186 per_cpu_init(); 207 cnodeid_t cnode;
187 local_irq_enable();
188}
189
190void __init prom_cpus_done(void)
191{
192}
193
194void __cpuinit prom_smp_finish(void)
195{
196}
197
198void core_send_ipi(int destid, unsigned int action)
199{
200 int irq;
201 208
202 switch (action) { 209 for_each_online_node(cnode) {
203 case SMP_RESCHEDULE_YOURSELF: 210 if (cnode == 0)
204 irq = CPU_RESCHED_A_IRQ; 211 continue;
205 break; 212 intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
206 case SMP_CALL_FUNCTION:
207 irq = CPU_CALL_A_IRQ;
208 break;
209 default:
210 panic("sendintr");
211 } 213 }
212 214
213 irq += cputoslice(destid); 215 replicate_kernel_text();
214 216
215 /* 217 /*
216 * Convert the compact hub number to the NASID to get the correct 218 * Assumption to be fixed: we're always booted on logical / physical
217 * part of the address space. Then set the interrupt bit associated 219 * processor 0. While we're always running on logical processor 0
218 * with the CPU we want to send the interrupt to. 220 * this still means this is physical processor zero; it might for
221 * example be disabled in the firwware.
219 */ 222 */
220 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); 223 alloc_cpupda(0, 0);
221} 224}
225
226static void __init ip27_prepare_cpus(unsigned int max_cpus)
227{
228 /* We already did everything necessary earlier */
229}
230
231struct plat_smp_ops ip27_smp_ops = {
232 .send_ipi_single = ip27_send_ipi_single,
233 .send_ipi_mask = ip27_send_ipi_mask,
234 .init_secondary = ip27_init_secondary,
235 .smp_finish = ip27_smp_finish,
236 .cpus_done = ip27_cpus_done,
237 .boot_secondary = ip27_boot_secondary,
238 .smp_setup = ip27_smp_setup,
239 .prepare_cpus = ip27_prepare_cpus,
240};
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 436ba78359ab..183c460b9ca1 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -23,6 +23,7 @@
23 23
24#include <asm/mmu_context.h> 24#include <asm/mmu_context.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/fw/cfe/cfe_api.h>
26#include <asm/sibyte/sb1250.h> 27#include <asm/sibyte/sb1250.h>
27#include <asm/sibyte/bcm1480_regs.h> 28#include <asm/sibyte/bcm1480_regs.h>
28#include <asm/sibyte/bcm1480_int.h> 29#include <asm/sibyte/bcm1480_int.h>
@@ -67,28 +68,114 @@ void __cpuinit bcm1480_smp_init(void)
67 change_c0_status(ST0_IM, imask); 68 change_c0_status(ST0_IM, imask);
68} 69}
69 70
70void __cpuinit bcm1480_smp_finish(void) 71/*
72 * These are routines for dealing with the sb1250 smp capabilities
73 * independent of board/firmware
74 */
75
76/*
77 * Simple enough; everything is set up, so just poke the appropriate mailbox
78 * register, and we should be set
79 */
80static void bcm1480_send_ipi_single(int cpu, unsigned int action)
81{
82 __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
83}
84
85static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action)
86{
87 unsigned int i;
88
89 for_each_cpu_mask(i, mask)
90 bcm1480_send_ipi_single(i, action);
91}
92
93/*
94 * Code to run on secondary just after probing the CPU
95 */
96static void __cpuinit bcm1480_init_secondary(void)
97{
98 extern void bcm1480_smp_init(void);
99
100 bcm1480_smp_init();
101}
102
103/*
104 * Do any tidying up before marking online and running the idle
105 * loop
106 */
107static void __cpuinit bcm1480_smp_finish(void)
71{ 108{
72 extern void sb1480_clockevent_init(void); 109 extern void sb1480_clockevent_init(void);
73 110
74 sb1480_clockevent_init(); 111 sb1480_clockevent_init();
75 local_irq_enable(); 112 local_irq_enable();
113 bcm1480_smp_finish();
76} 114}
77 115
78/* 116/*
79 * These are routines for dealing with the sb1250 smp capabilities 117 * Final cleanup after all secondaries booted
80 * independent of board/firmware
81 */ 118 */
119static void bcm1480_cpus_done(void)
120{
121}
82 122
83/* 123/*
84 * Simple enough; everything is set up, so just poke the appropriate mailbox 124 * Setup the PC, SP, and GP of a secondary processor and start it
85 * register, and we should be set 125 * running!
86 */ 126 */
87void core_send_ipi(int cpu, unsigned int action) 127static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
88{ 128{
89 __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); 129 int retval;
130
131 retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
132 __KSTK_TOS(idle),
133 (unsigned long)task_thread_info(idle), 0);
134 if (retval != 0)
135 printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
90} 136}
91 137
138/*
139 * Use CFE to find out how many CPUs are available, setting up
140 * phys_cpu_present_map and the logical/physical mappings.
141 * XXXKW will the boot CPU ever not be physical 0?
142 *
143 * Common setup before any secondaries are started
144 */
145static void __init bcm1480_smp_setup(void)
146{
147 int i, num;
148
149 cpus_clear(phys_cpu_present_map);
150 cpu_set(0, phys_cpu_present_map);
151 __cpu_number_map[0] = 0;
152 __cpu_logical_map[0] = 0;
153
154 for (i = 1, num = 0; i < NR_CPUS; i++) {
155 if (cfe_cpu_stop(i) == 0) {
156 cpu_set(i, phys_cpu_present_map);
157 __cpu_number_map[i] = ++num;
158 __cpu_logical_map[num] = i;
159 }
160 }
161 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
162}
163
164static void __init bcm1480_prepare_cpus(unsigned int max_cpus)
165{
166}
167
168struct plat_smp_ops bcm1480_smp_ops = {
169 .send_ipi_single = bcm1480_send_ipi_single,
170 .send_ipi_mask = bcm1480_send_ipi_mask,
171 .init_secondary = bcm1480_init_secondary,
172 .smp_finish = bcm1480_smp_finish,
173 .cpus_done = bcm1480_cpus_done,
174 .boot_secondary = bcm1480_boot_secondary,
175 .smp_setup = bcm1480_smp_setup,
176 .prepare_cpus = bcm1480_prepare_cpus,
177};
178
92void bcm1480_mailbox_interrupt(void) 179void bcm1480_mailbox_interrupt(void)
93{ 180{
94 int cpu = smp_processor_id(); 181 int cpu = smp_processor_id();
diff --git a/arch/mips/sibyte/cfe/Makefile b/arch/mips/sibyte/cfe/Makefile
index a1214937b705..02b32e142adf 100644
--- a/arch/mips/sibyte/cfe/Makefile
+++ b/arch/mips/sibyte/cfe/Makefile
@@ -1,3 +1,2 @@
1lib-y = setup.o 1lib-y = setup.o
2lib-$(CONFIG_SMP) += smp.o
3lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o 2lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index dbd6e6fdd3f9..50d7c05e15b8 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -28,6 +28,7 @@
28#include <asm/bootinfo.h> 28#include <asm/bootinfo.h>
29#include <asm/reboot.h> 29#include <asm/reboot.h>
30#include <asm/sibyte/board.h> 30#include <asm/sibyte/board.h>
31#include <asm/smp-ops.h>
31 32
32#include <asm/fw/cfe/cfe_api.h> 33#include <asm/fw/cfe/cfe_api.h>
33#include <asm/fw/cfe/cfe_error.h> 34#include <asm/fw/cfe/cfe_error.h>
@@ -232,6 +233,9 @@ static int __init initrd_setup(char *str)
232 233
233#endif 234#endif
234 235
236extern struct plat_smp_ops sb_smp_ops;
237extern struct plat_smp_ops bcm1480_smp_ops;
238
235/* 239/*
236 * prom_init is called just after the cpu type is determined, from setup_arch() 240 * prom_init is called just after the cpu type is determined, from setup_arch()
237 */ 241 */
@@ -340,6 +344,13 @@ void __init prom_init(void)
340 arcs_cmdline[CL_SIZE-1] = 0; 344 arcs_cmdline[CL_SIZE-1] = 0;
341 345
342 prom_meminit(); 346 prom_meminit();
347
348#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
349 register_smp_ops(&sb_smp_ops);
350#endif
351#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
352 register_smp_ops(&bcm1480_smp_ops);
353#endif
343} 354}
344 355
345void __init prom_free_prom_memory(void) 356void __init prom_free_prom_memory(void)
diff --git a/arch/mips/sibyte/cfe/smp.c b/arch/mips/sibyte/cfe/smp.c
deleted file mode 100644
index 534a62912f21..000000000000
--- a/arch/mips/sibyte/cfe/smp.c
+++ /dev/null
@@ -1,110 +0,0 @@
1/*
2 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22#include <asm/processor.h>
23
24#include <asm/fw/cfe/cfe_api.h>
25#include <asm/fw/cfe/cfe_error.h>
26
27/*
28 * Use CFE to find out how many CPUs are available, setting up
29 * phys_cpu_present_map and the logical/physical mappings.
30 * XXXKW will the boot CPU ever not be physical 0?
31 *
32 * Common setup before any secondaries are started
33 */
34void __init plat_smp_setup(void)
35{
36 int i, num;
37
38 cpus_clear(phys_cpu_present_map);
39 cpu_set(0, phys_cpu_present_map);
40 __cpu_number_map[0] = 0;
41 __cpu_logical_map[0] = 0;
42
43 for (i = 1, num = 0; i < NR_CPUS; i++) {
44 if (cfe_cpu_stop(i) == 0) {
45 cpu_set(i, phys_cpu_present_map);
46 __cpu_number_map[i] = ++num;
47 __cpu_logical_map[num] = i;
48 }
49 }
50 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
51}
52
53void __init plat_prepare_cpus(unsigned int max_cpus)
54{
55}
56
57/*
58 * Setup the PC, SP, and GP of a secondary processor and start it
59 * running!
60 */
61void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
62{
63 int retval;
64
65 retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
66 __KSTK_TOS(idle),
67 (unsigned long)task_thread_info(idle), 0);
68 if (retval != 0)
69 printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
70}
71
72/*
73 * Code to run on secondary just after probing the CPU
74 */
75void __cpuinit prom_init_secondary(void)
76{
77#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
78 extern void bcm1480_smp_init(void);
79 bcm1480_smp_init();
80#elif defined(CONFIG_SIBYTE_SB1250)
81 extern void sb1250_smp_init(void);
82 sb1250_smp_init();
83#else
84#error invalid SMP configuration
85#endif
86}
87
88/*
89 * Do any tidying up before marking online and running the idle
90 * loop
91 */
92void __cpuinit prom_smp_finish(void)
93{
94#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
95 extern void bcm1480_smp_finish(void);
96 bcm1480_smp_finish();
97#elif defined(CONFIG_SIBYTE_SB1250)
98 extern void sb1250_smp_finish(void);
99 sb1250_smp_finish();
100#else
101#error invalid SMP configuration
102#endif
103}
104
105/*
106 * Final cleanup after all secondaries booted
107 */
108void prom_cpus_done(void)
109{
110}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 3f52c95a4eb8..0734b933e969 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -24,6 +24,7 @@
24 24
25#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/fw/cfe/cfe_api.h>
27#include <asm/sibyte/sb1250.h> 28#include <asm/sibyte/sb1250.h>
28#include <asm/sibyte/sb1250_regs.h> 29#include <asm/sibyte/sb1250_regs.h>
29#include <asm/sibyte/sb1250_int.h> 30#include <asm/sibyte/sb1250_int.h>
@@ -55,7 +56,43 @@ void __cpuinit sb1250_smp_init(void)
55 change_c0_status(ST0_IM, imask); 56 change_c0_status(ST0_IM, imask);
56} 57}
57 58
58void __cpuinit sb1250_smp_finish(void) 59/*
60 * These are routines for dealing with the sb1250 smp capabilities
61 * independent of board/firmware
62 */
63
64/*
65 * Simple enough; everything is set up, so just poke the appropriate mailbox
66 * register, and we should be set
67 */
68static void sb1250_send_ipi_single(int cpu, unsigned int action)
69{
70 __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
71}
72
73static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action)
74{
75 unsigned int i;
76
77 for_each_cpu_mask(i, mask)
78 sb1250_send_ipi_single(i, action);
79}
80
81/*
82 * Code to run on secondary just after probing the CPU
83 */
84static void __cpuinit sb1250_init_secondary(void)
85{
86 extern void sb1250_smp_init(void);
87
88 sb1250_smp_init();
89}
90
91/*
92 * Do any tidying up before marking online and running the idle
93 * loop
94 */
95static void __cpuinit sb1250_smp_finish(void)
59{ 96{
60 extern void sb1250_clockevent_init(void); 97 extern void sb1250_clockevent_init(void);
61 98
@@ -64,19 +101,68 @@ void __cpuinit sb1250_smp_finish(void)
64} 101}
65 102
66/* 103/*
67 * These are routines for dealing with the sb1250 smp capabilities 104 * Final cleanup after all secondaries booted
68 * independent of board/firmware
69 */ 105 */
106static void sb1250_cpus_done(void)
107{
108}
70 109
71/* 110/*
72 * Simple enough; everything is set up, so just poke the appropriate mailbox 111 * Setup the PC, SP, and GP of a secondary processor and start it
73 * register, and we should be set 112 * running!
74 */ 113 */
75void core_send_ipi(int cpu, unsigned int action) 114static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
76{ 115{
77 __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); 116 int retval;
117
118 retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
119 __KSTK_TOS(idle),
120 (unsigned long)task_thread_info(idle), 0);
121 if (retval != 0)
122 printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
78} 123}
79 124
125/*
126 * Use CFE to find out how many CPUs are available, setting up
127 * phys_cpu_present_map and the logical/physical mappings.
128 * XXXKW will the boot CPU ever not be physical 0?
129 *
130 * Common setup before any secondaries are started
131 */
132static void __init sb1250_smp_setup(void)
133{
134 int i, num;
135
136 cpus_clear(phys_cpu_present_map);
137 cpu_set(0, phys_cpu_present_map);
138 __cpu_number_map[0] = 0;
139 __cpu_logical_map[0] = 0;
140
141 for (i = 1, num = 0; i < NR_CPUS; i++) {
142 if (cfe_cpu_stop(i) == 0) {
143 cpu_set(i, phys_cpu_present_map);
144 __cpu_number_map[i] = ++num;
145 __cpu_logical_map[num] = i;
146 }
147 }
148 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
149}
150
151static void __init sb1250_prepare_cpus(unsigned int max_cpus)
152{
153}
154
155struct plat_smp_ops sb_smp_ops = {
156 .send_ipi_single = sb1250_send_ipi_single,
157 .send_ipi_mask = sb1250_send_ipi_mask,
158 .init_secondary = sb1250_init_secondary,
159 .smp_finish = sb1250_smp_finish,
160 .cpus_done = sb1250_cpus_done,
161 .boot_secondary = sb1250_boot_secondary,
162 .smp_setup = sb1250_smp_setup,
163 .prepare_cpus = sb1250_prepare_cpus,
164};
165
80void sb1250_mailbox_interrupt(void) 166void sb1250_mailbox_interrupt(void)
81{ 167{
82 int cpu = smp_processor_id(); 168 int cpu = smp_processor_id();
diff --git a/include/asm-mips/sibyte/sb1250.h b/include/asm-mips/sibyte/sb1250.h
index 0dad844a3b5b..80c1a052662a 100644
--- a/include/asm-mips/sibyte/sb1250.h
+++ b/include/asm-mips/sibyte/sb1250.h
@@ -48,12 +48,10 @@ extern unsigned int zbbus_mhz;
48extern void sb1250_time_init(void); 48extern void sb1250_time_init(void);
49extern void sb1250_mask_irq(int cpu, int irq); 49extern void sb1250_mask_irq(int cpu, int irq);
50extern void sb1250_unmask_irq(int cpu, int irq); 50extern void sb1250_unmask_irq(int cpu, int irq);
51extern void sb1250_smp_finish(void);
52 51
53extern void bcm1480_time_init(void); 52extern void bcm1480_time_init(void);
54extern void bcm1480_mask_irq(int cpu, int irq); 53extern void bcm1480_mask_irq(int cpu, int irq);
55extern void bcm1480_unmask_irq(int cpu, int irq); 54extern void bcm1480_unmask_irq(int cpu, int irq);
56extern void bcm1480_smp_finish(void);
57 55
58#define AT_spin \ 56#define AT_spin \
59 __asm__ __volatile__ ( \ 57 __asm__ __volatile__ ( \
diff --git a/include/asm-mips/smp-ops.h b/include/asm-mips/smp-ops.h
new file mode 100644
index 000000000000..b17fdfb5d818
--- /dev/null
+++ b/include/asm-mips/smp-ops.h
@@ -0,0 +1,56 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General
3 * Public License. See the file "COPYING" in the main directory of this
4 * archive for more details.
5 *
6 * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
7 * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
8 * Copyright (C) 2000, 2001, 2002 Ralf Baechle
9 * Copyright (C) 2000, 2001 Broadcom Corporation
10 */
11#ifndef __ASM_SMP_OPS_H
12#define __ASM_SMP_OPS_H
13
14#ifdef CONFIG_SMP
15
16#include <linux/cpumask.h>
17
18struct plat_smp_ops {
19 void (*send_ipi_single)(int cpu, unsigned int action);
20 void (*send_ipi_mask)(cpumask_t mask, unsigned int action);
21 void (*init_secondary)(void);
22 void (*smp_finish)(void);
23 void (*cpus_done)(void);
24 void (*boot_secondary)(int cpu, struct task_struct *idle);
25 void (*smp_setup)(void);
26 void (*prepare_cpus)(unsigned int max_cpus);
27};
28
29extern void register_smp_ops(struct plat_smp_ops *ops);
30
31static inline void plat_smp_setup(void)
32{
33 extern struct plat_smp_ops *mp_ops; /* private */
34
35 mp_ops->smp_setup();
36}
37
38#else /* !CONFIG_SMP */
39
40struct plat_smp_ops;
41
42static inline void plat_smp_setup(void)
43{
44 /* UP, nothing to do ... */
45}
46
47static inline void register_smp_ops(struct plat_smp_ops *ops)
48{
49}
50
51#endif /* !CONFIG_SMP */
52
53extern struct plat_smp_ops up_smp_ops;
54extern struct plat_smp_ops vsmp_smp_ops;
55
56#endif /* __ASM_SMP_OPS_H */
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 23265879cee9..84fef1aeec0c 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -11,14 +11,13 @@
11#ifndef __ASM_SMP_H 11#ifndef __ASM_SMP_H
12#define __ASM_SMP_H 12#define __ASM_SMP_H
13 13
14
15#ifdef CONFIG_SMP
16
17#include <linux/bitops.h> 14#include <linux/bitops.h>
18#include <linux/linkage.h> 15#include <linux/linkage.h>
19#include <linux/threads.h> 16#include <linux/threads.h>
20#include <linux/cpumask.h> 17#include <linux/cpumask.h>
18
21#include <asm/atomic.h> 19#include <asm/atomic.h>
20#include <asm/smp-ops.h>
22 21
23extern int smp_num_siblings; 22extern int smp_num_siblings;
24extern cpumask_t cpu_sibling_map[]; 23extern cpumask_t cpu_sibling_map[];
@@ -52,56 +51,6 @@ extern struct call_data_struct *call_data;
52extern cpumask_t phys_cpu_present_map; 51extern cpumask_t phys_cpu_present_map;
53#define cpu_possible_map phys_cpu_present_map 52#define cpu_possible_map phys_cpu_present_map
54 53
55/*
56 * These are defined by the board-specific code.
57 */
58
59/*
60 * Cause the function described by call_data to be executed on the passed
61 * cpu. When the function has finished, increment the finished field of
62 * call_data.
63 */
64extern void core_send_ipi(int cpu, unsigned int action);
65
66static inline void core_send_ipi_mask(cpumask_t mask, unsigned int action)
67{
68 unsigned int i;
69
70 for_each_cpu_mask(i, mask)
71 core_send_ipi(i, action);
72}
73
74
75/*
76 * Firmware CPU startup hook
77 */
78extern void prom_boot_secondary(int cpu, struct task_struct *idle);
79
80/*
81 * After we've done initial boot, this function is called to allow the
82 * board code to clean up state, if needed
83 */
84extern void prom_init_secondary(void);
85
86/*
87 * Populate cpu_possible_map before smp_init, called from setup_arch.
88 */
89extern void plat_smp_setup(void);
90
91/*
92 * Called in smp_prepare_cpus.
93 */
94extern void plat_prepare_cpus(unsigned int max_cpus);
95
96/*
97 * Last chance for the board code to finish SMP initialization before
98 * the CPU is "online".
99 */
100extern void prom_smp_finish(void);
101
102/* Hook for after all CPUs are online */
103extern void prom_cpus_done(void);
104
105extern void asmlinkage smp_bootstrap(void); 54extern void asmlinkage smp_bootstrap(void);
106 55
107/* 56/*
@@ -111,11 +60,11 @@ extern void asmlinkage smp_bootstrap(void);
111 */ 60 */
112static inline void smp_send_reschedule(int cpu) 61static inline void smp_send_reschedule(int cpu)
113{ 62{
114 core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF); 63 extern struct plat_smp_ops *mp_ops; /* private */
64
65 mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
115} 66}
116 67
117extern asmlinkage void smp_call_function_interrupt(void); 68extern asmlinkage void smp_call_function_interrupt(void);
118 69
119#endif /* CONFIG_SMP */
120
121#endif /* __ASM_SMP_H */ 70#endif /* __ASM_SMP_H */