diff options
Diffstat (limited to 'arch/powerpc/platforms/powermac/smp.c')
-rw-r--r-- | arch/powerpc/platforms/powermac/smp.c | 716 |
1 files changed, 716 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c new file mode 100644 index 000000000000..fb996336c58b --- /dev/null +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -0,0 +1,716 @@ | |||
1 | /* | ||
2 | * SMP support for power macintosh. | ||
3 | * | ||
4 | * We support both the old "powersurge" SMP architecture | ||
5 | * and the current Core99 (G4 PowerMac) machines. | ||
6 | * | ||
7 | * Note that we don't support the very first rev. of | ||
8 | * Apple/DayStar 2 CPUs board, the one with the funky | ||
9 | * watchdog. Hopefully, none of these should be there except | ||
10 | * maybe internally to Apple. I should probably still add some | ||
11 | * code to detect this card though and disable SMP. --BenH. | ||
12 | * | ||
13 | * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net) | ||
14 | * and Ben Herrenschmidt <benh@kernel.crashing.org>. | ||
15 | * | ||
16 | * Support for DayStar quad CPU cards | ||
17 | * Copyright (C) XLR8, Inc. 1994-2000 | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version | ||
22 | * 2 of the License, or (at your option) any later version. | ||
23 | */ | ||
24 | #include <linux/config.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/smp.h> | ||
28 | #include <linux/smp_lock.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/kernel_stat.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/errno.h> | ||
35 | #include <linux/hardirq.h> | ||
36 | #include <linux/cpu.h> | ||
37 | |||
38 | #include <asm/ptrace.h> | ||
39 | #include <asm/atomic.h> | ||
40 | #include <asm/irq.h> | ||
41 | #include <asm/page.h> | ||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/sections.h> | ||
44 | #include <asm/io.h> | ||
45 | #include <asm/prom.h> | ||
46 | #include <asm/smp.h> | ||
47 | #include <asm/residual.h> | ||
48 | #include <asm/machdep.h> | ||
49 | #include <asm/pmac_feature.h> | ||
50 | #include <asm/time.h> | ||
51 | #include <asm/mpic.h> | ||
52 | #include <asm/cacheflush.h> | ||
53 | #include <asm/keylargo.h> | ||
54 | |||
55 | /* | ||
56 | * Powersurge (old powermac SMP) support. | ||
57 | */ | ||
58 | |||
59 | extern void __secondary_start_pmac_0(void); | ||
60 | |||
61 | /* Addresses for powersurge registers */ | ||
62 | #define HAMMERHEAD_BASE 0xf8000000 | ||
63 | #define HHEAD_CONFIG 0x90 | ||
64 | #define HHEAD_SEC_INTR 0xc0 | ||
65 | |||
66 | /* register for interrupting the primary processor on the powersurge */ | ||
67 | /* N.B. this is actually the ethernet ROM! */ | ||
68 | #define PSURGE_PRI_INTR 0xf3019000 | ||
69 | |||
70 | /* register for storing the start address for the secondary processor */ | ||
71 | /* N.B. this is the PCI config space address register for the 1st bridge */ | ||
72 | #define PSURGE_START 0xf2800000 | ||
73 | |||
74 | /* Daystar/XLR8 4-CPU card */ | ||
75 | #define PSURGE_QUAD_REG_ADDR 0xf8800000 | ||
76 | |||
77 | #define PSURGE_QUAD_IRQ_SET 0 | ||
78 | #define PSURGE_QUAD_IRQ_CLR 1 | ||
79 | #define PSURGE_QUAD_IRQ_PRIMARY 2 | ||
80 | #define PSURGE_QUAD_CKSTOP_CTL 3 | ||
81 | #define PSURGE_QUAD_PRIMARY_ARB 4 | ||
82 | #define PSURGE_QUAD_BOARD_ID 6 | ||
83 | #define PSURGE_QUAD_WHICH_CPU 7 | ||
84 | #define PSURGE_QUAD_CKSTOP_RDBK 8 | ||
85 | #define PSURGE_QUAD_RESET_CTL 11 | ||
86 | |||
87 | #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v))) | ||
88 | #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f) | ||
89 | #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v))) | ||
90 | #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v))) | ||
91 | |||
92 | /* virtual addresses for the above */ | ||
93 | static volatile u8 __iomem *hhead_base; | ||
94 | static volatile u8 __iomem *quad_base; | ||
95 | static volatile u32 __iomem *psurge_pri_intr; | ||
96 | static volatile u8 __iomem *psurge_sec_intr; | ||
97 | static volatile u32 __iomem *psurge_start; | ||
98 | |||
99 | /* values for psurge_type */ | ||
100 | #define PSURGE_NONE -1 | ||
101 | #define PSURGE_DUAL 0 | ||
102 | #define PSURGE_QUAD_OKEE 1 | ||
103 | #define PSURGE_QUAD_COTTON 2 | ||
104 | #define PSURGE_QUAD_ICEGRASS 3 | ||
105 | |||
106 | /* what sort of powersurge board we have */ | ||
107 | static int psurge_type = PSURGE_NONE; | ||
108 | |||
109 | /* L2 and L3 cache settings to pass from CPU0 to CPU1 */ | ||
110 | volatile static long int core99_l2_cache; | ||
111 | volatile static long int core99_l3_cache; | ||
112 | |||
113 | /* Timebase freeze GPIO */ | ||
114 | static unsigned int core99_tb_gpio; | ||
115 | |||
116 | /* Sync flag for HW tb sync */ | ||
117 | static volatile int sec_tb_reset = 0; | ||
118 | static unsigned int pri_tb_hi, pri_tb_lo; | ||
119 | static unsigned int pri_tb_stamp; | ||
120 | |||
121 | static void __devinit core99_init_caches(int cpu) | ||
122 | { | ||
123 | if (!cpu_has_feature(CPU_FTR_L2CR)) | ||
124 | return; | ||
125 | |||
126 | if (cpu == 0) { | ||
127 | core99_l2_cache = _get_L2CR(); | ||
128 | printk("CPU0: L2CR is %lx\n", core99_l2_cache); | ||
129 | } else { | ||
130 | printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR()); | ||
131 | _set_L2CR(0); | ||
132 | _set_L2CR(core99_l2_cache); | ||
133 | printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache); | ||
134 | } | ||
135 | |||
136 | if (!cpu_has_feature(CPU_FTR_L3CR)) | ||
137 | return; | ||
138 | |||
139 | if (cpu == 0){ | ||
140 | core99_l3_cache = _get_L3CR(); | ||
141 | printk("CPU0: L3CR is %lx\n", core99_l3_cache); | ||
142 | } else { | ||
143 | printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR()); | ||
144 | _set_L3CR(0); | ||
145 | _set_L3CR(core99_l3_cache); | ||
146 | printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Set and clear IPIs for powersurge. | ||
152 | */ | ||
153 | static inline void psurge_set_ipi(int cpu) | ||
154 | { | ||
155 | if (psurge_type == PSURGE_NONE) | ||
156 | return; | ||
157 | if (cpu == 0) | ||
158 | in_be32(psurge_pri_intr); | ||
159 | else if (psurge_type == PSURGE_DUAL) | ||
160 | out_8(psurge_sec_intr, 0); | ||
161 | else | ||
162 | PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu); | ||
163 | } | ||
164 | |||
165 | static inline void psurge_clr_ipi(int cpu) | ||
166 | { | ||
167 | if (cpu > 0) { | ||
168 | switch(psurge_type) { | ||
169 | case PSURGE_DUAL: | ||
170 | out_8(psurge_sec_intr, ~0); | ||
171 | case PSURGE_NONE: | ||
172 | break; | ||
173 | default: | ||
174 | PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu); | ||
175 | } | ||
176 | } | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * On powersurge (old SMP powermac architecture) we don't have | ||
181 | * separate IPIs for separate messages like openpic does. Instead | ||
182 | * we have a bitmap for each processor, where a 1 bit means that | ||
183 | * the corresponding message is pending for that processor. | ||
184 | * Ideally each cpu's entry would be in a different cache line. | ||
185 | * -- paulus. | ||
186 | */ | ||
187 | static unsigned long psurge_smp_message[NR_CPUS]; | ||
188 | |||
189 | void psurge_smp_message_recv(struct pt_regs *regs) | ||
190 | { | ||
191 | int cpu = smp_processor_id(); | ||
192 | int msg; | ||
193 | |||
194 | /* clear interrupt */ | ||
195 | psurge_clr_ipi(cpu); | ||
196 | |||
197 | if (num_online_cpus() < 2) | ||
198 | return; | ||
199 | |||
200 | /* make sure there is a message there */ | ||
201 | for (msg = 0; msg < 4; msg++) | ||
202 | if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) | ||
203 | smp_message_recv(msg, regs); | ||
204 | } | ||
205 | |||
206 | irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs) | ||
207 | { | ||
208 | psurge_smp_message_recv(regs); | ||
209 | return IRQ_HANDLED; | ||
210 | } | ||
211 | |||
212 | static void smp_psurge_message_pass(int target, int msg, unsigned long data, | ||
213 | int wait) | ||
214 | { | ||
215 | int i; | ||
216 | |||
217 | if (num_online_cpus() < 2) | ||
218 | return; | ||
219 | |||
220 | for (i = 0; i < NR_CPUS; i++) { | ||
221 | if (!cpu_online(i)) | ||
222 | continue; | ||
223 | if (target == MSG_ALL | ||
224 | || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) | ||
225 | || target == i) { | ||
226 | set_bit(msg, &psurge_smp_message[i]); | ||
227 | psurge_set_ipi(i); | ||
228 | } | ||
229 | } | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Determine a quad card presence. We read the board ID register, we | ||
234 | * force the data bus to change to something else, and we read it again. | ||
235 | * It it's stable, then the register probably exist (ugh !) | ||
236 | */ | ||
237 | static int __init psurge_quad_probe(void) | ||
238 | { | ||
239 | int type; | ||
240 | unsigned int i; | ||
241 | |||
242 | type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID); | ||
243 | if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS | ||
244 | || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID)) | ||
245 | return PSURGE_DUAL; | ||
246 | |||
247 | /* looks OK, try a slightly more rigorous test */ | ||
248 | /* bogus is not necessarily cacheline-aligned, | ||
249 | though I don't suppose that really matters. -- paulus */ | ||
250 | for (i = 0; i < 100; i++) { | ||
251 | volatile u32 bogus[8]; | ||
252 | bogus[(0+i)%8] = 0x00000000; | ||
253 | bogus[(1+i)%8] = 0x55555555; | ||
254 | bogus[(2+i)%8] = 0xFFFFFFFF; | ||
255 | bogus[(3+i)%8] = 0xAAAAAAAA; | ||
256 | bogus[(4+i)%8] = 0x33333333; | ||
257 | bogus[(5+i)%8] = 0xCCCCCCCC; | ||
258 | bogus[(6+i)%8] = 0xCCCCCCCC; | ||
259 | bogus[(7+i)%8] = 0x33333333; | ||
260 | wmb(); | ||
261 | asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory"); | ||
262 | mb(); | ||
263 | if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID)) | ||
264 | return PSURGE_DUAL; | ||
265 | } | ||
266 | return type; | ||
267 | } | ||
268 | |||
269 | static void __init psurge_quad_init(void) | ||
270 | { | ||
271 | int procbits; | ||
272 | |||
273 | if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351); | ||
274 | procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU); | ||
275 | if (psurge_type == PSURGE_QUAD_ICEGRASS) | ||
276 | PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits); | ||
277 | else | ||
278 | PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits); | ||
279 | mdelay(33); | ||
280 | out_8(psurge_sec_intr, ~0); | ||
281 | PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits); | ||
282 | PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits); | ||
283 | if (psurge_type != PSURGE_QUAD_ICEGRASS) | ||
284 | PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits); | ||
285 | PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits); | ||
286 | mdelay(33); | ||
287 | PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits); | ||
288 | mdelay(33); | ||
289 | PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits); | ||
290 | mdelay(33); | ||
291 | } | ||
292 | |||
293 | static int __init smp_psurge_probe(void) | ||
294 | { | ||
295 | int i, ncpus; | ||
296 | |||
297 | /* We don't do SMP on the PPC601 -- paulus */ | ||
298 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
299 | return 1; | ||
300 | |||
301 | /* | ||
302 | * The powersurge cpu board can be used in the generation | ||
303 | * of powermacs that have a socket for an upgradeable cpu card, | ||
304 | * including the 7500, 8500, 9500, 9600. | ||
305 | * The device tree doesn't tell you if you have 2 cpus because | ||
306 | * OF doesn't know anything about the 2nd processor. | ||
307 | * Instead we look for magic bits in magic registers, | ||
308 | * in the hammerhead memory controller in the case of the | ||
309 | * dual-cpu powersurge board. -- paulus. | ||
310 | */ | ||
311 | if (find_devices("hammerhead") == NULL) | ||
312 | return 1; | ||
313 | |||
314 | hhead_base = ioremap(HAMMERHEAD_BASE, 0x800); | ||
315 | quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024); | ||
316 | psurge_sec_intr = hhead_base + HHEAD_SEC_INTR; | ||
317 | |||
318 | psurge_type = psurge_quad_probe(); | ||
319 | if (psurge_type != PSURGE_DUAL) { | ||
320 | psurge_quad_init(); | ||
321 | /* All released cards using this HW design have 4 CPUs */ | ||
322 | ncpus = 4; | ||
323 | } else { | ||
324 | iounmap(quad_base); | ||
325 | if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { | ||
326 | /* not a dual-cpu card */ | ||
327 | iounmap(hhead_base); | ||
328 | psurge_type = PSURGE_NONE; | ||
329 | return 1; | ||
330 | } | ||
331 | ncpus = 2; | ||
332 | } | ||
333 | |||
334 | psurge_start = ioremap(PSURGE_START, 4); | ||
335 | psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); | ||
336 | |||
337 | /* this is not actually strictly necessary -- paulus. */ | ||
338 | for (i = 1; i < ncpus; ++i) | ||
339 | smp_hw_index[i] = i; | ||
340 | |||
341 | if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); | ||
342 | |||
343 | return ncpus; | ||
344 | } | ||
345 | |||
346 | static void __init smp_psurge_kick_cpu(int nr) | ||
347 | { | ||
348 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; | ||
349 | unsigned long a; | ||
350 | |||
351 | /* may need to flush here if secondary bats aren't setup */ | ||
352 | for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32) | ||
353 | asm volatile("dcbf 0,%0" : : "r" (a) : "memory"); | ||
354 | asm volatile("sync"); | ||
355 | |||
356 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); | ||
357 | |||
358 | out_be32(psurge_start, start); | ||
359 | mb(); | ||
360 | |||
361 | psurge_set_ipi(nr); | ||
362 | udelay(10); | ||
363 | psurge_clr_ipi(nr); | ||
364 | |||
365 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * With the dual-cpu powersurge board, the decrementers and timebases | ||
370 | * of both cpus are frozen after the secondary cpu is started up, | ||
371 | * until we give the secondary cpu another interrupt. This routine | ||
372 | * uses this to get the timebases synchronized. | ||
373 | * -- paulus. | ||
374 | */ | ||
375 | static void __init psurge_dual_sync_tb(int cpu_nr) | ||
376 | { | ||
377 | int t; | ||
378 | |||
379 | set_dec(tb_ticks_per_jiffy); | ||
380 | set_tb(0, 0); | ||
381 | last_jiffy_stamp(cpu_nr) = 0; | ||
382 | |||
383 | if (cpu_nr > 0) { | ||
384 | mb(); | ||
385 | sec_tb_reset = 1; | ||
386 | return; | ||
387 | } | ||
388 | |||
389 | /* wait for the secondary to have reset its TB before proceeding */ | ||
390 | for (t = 10000000; t > 0 && !sec_tb_reset; --t) | ||
391 | ; | ||
392 | |||
393 | /* now interrupt the secondary, starting both TBs */ | ||
394 | psurge_set_ipi(1); | ||
395 | |||
396 | smp_tb_synchronized = 1; | ||
397 | } | ||
398 | |||
399 | static struct irqaction psurge_irqaction = { | ||
400 | .handler = psurge_primary_intr, | ||
401 | .flags = SA_INTERRUPT, | ||
402 | .mask = CPU_MASK_NONE, | ||
403 | .name = "primary IPI", | ||
404 | }; | ||
405 | |||
406 | static void __init smp_psurge_setup_cpu(int cpu_nr) | ||
407 | { | ||
408 | |||
409 | if (cpu_nr == 0) { | ||
410 | /* If we failed to start the second CPU, we should still | ||
411 | * send it an IPI to start the timebase & DEC or we might | ||
412 | * have them stuck. | ||
413 | */ | ||
414 | if (num_online_cpus() < 2) { | ||
415 | if (psurge_type == PSURGE_DUAL) | ||
416 | psurge_set_ipi(1); | ||
417 | return; | ||
418 | } | ||
419 | /* reset the entry point so if we get another intr we won't | ||
420 | * try to startup again */ | ||
421 | out_be32(psurge_start, 0x100); | ||
422 | if (setup_irq(30, &psurge_irqaction)) | ||
423 | printk(KERN_ERR "Couldn't get primary IPI interrupt"); | ||
424 | } | ||
425 | |||
426 | if (psurge_type == PSURGE_DUAL) | ||
427 | psurge_dual_sync_tb(cpu_nr); | ||
428 | } | ||
429 | |||
430 | void __init smp_psurge_take_timebase(void) | ||
431 | { | ||
432 | /* Dummy implementation */ | ||
433 | } | ||
434 | |||
435 | void __init smp_psurge_give_timebase(void) | ||
436 | { | ||
437 | /* Dummy implementation */ | ||
438 | } | ||
439 | |||
440 | static int __init smp_core99_probe(void) | ||
441 | { | ||
442 | #ifdef CONFIG_6xx | ||
443 | extern int powersave_nap; | ||
444 | #endif | ||
445 | struct device_node *cpus, *firstcpu; | ||
446 | int i, ncpus = 0, boot_cpu = -1; | ||
447 | u32 *tbprop = NULL; | ||
448 | |||
449 | if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345); | ||
450 | cpus = firstcpu = find_type_devices("cpu"); | ||
451 | while(cpus != NULL) { | ||
452 | u32 *regprop = (u32 *)get_property(cpus, "reg", NULL); | ||
453 | char *stateprop = (char *)get_property(cpus, "state", NULL); | ||
454 | if (regprop != NULL && stateprop != NULL && | ||
455 | !strncmp(stateprop, "running", 7)) | ||
456 | boot_cpu = *regprop; | ||
457 | ++ncpus; | ||
458 | cpus = cpus->next; | ||
459 | } | ||
460 | if (boot_cpu == -1) | ||
461 | printk(KERN_WARNING "Couldn't detect boot CPU !\n"); | ||
462 | if (boot_cpu != 0) | ||
463 | printk(KERN_WARNING "Boot CPU is %d, unsupported setup !\n", boot_cpu); | ||
464 | |||
465 | if (machine_is_compatible("MacRISC4")) { | ||
466 | extern struct smp_ops_t core99_smp_ops; | ||
467 | |||
468 | core99_smp_ops.take_timebase = smp_generic_take_timebase; | ||
469 | core99_smp_ops.give_timebase = smp_generic_give_timebase; | ||
470 | } else { | ||
471 | if (firstcpu != NULL) | ||
472 | tbprop = (u32 *)get_property(firstcpu, "timebase-enable", NULL); | ||
473 | if (tbprop) | ||
474 | core99_tb_gpio = *tbprop; | ||
475 | else | ||
476 | core99_tb_gpio = KL_GPIO_TB_ENABLE; | ||
477 | } | ||
478 | |||
479 | if (ncpus > 1) { | ||
480 | mpic_request_ipis(); | ||
481 | for (i = 1; i < ncpus; ++i) | ||
482 | smp_hw_index[i] = i; | ||
483 | #ifdef CONFIG_6xx | ||
484 | powersave_nap = 0; | ||
485 | #endif | ||
486 | core99_init_caches(0); | ||
487 | } | ||
488 | |||
489 | return ncpus; | ||
490 | } | ||
491 | |||
492 | static void __devinit smp_core99_kick_cpu(int nr) | ||
493 | { | ||
494 | unsigned long save_vector, new_vector; | ||
495 | unsigned long flags; | ||
496 | |||
497 | volatile unsigned long *vector | ||
498 | = ((volatile unsigned long *)(KERNELBASE+0x100)); | ||
499 | if (nr < 0 || nr > 3) | ||
500 | return; | ||
501 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); | ||
502 | |||
503 | local_irq_save(flags); | ||
504 | local_irq_disable(); | ||
505 | |||
506 | /* Save reset vector */ | ||
507 | save_vector = *vector; | ||
508 | |||
509 | /* Setup fake reset vector that does | ||
510 | * b __secondary_start_pmac_0 + nr*8 - KERNELBASE | ||
511 | */ | ||
512 | new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8; | ||
513 | *vector = 0x48000002 + new_vector - KERNELBASE; | ||
514 | |||
515 | /* flush data cache and inval instruction cache */ | ||
516 | flush_icache_range((unsigned long) vector, (unsigned long) vector + 4); | ||
517 | |||
518 | /* Put some life in our friend */ | ||
519 | pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); | ||
520 | |||
521 | /* FIXME: We wait a bit for the CPU to take the exception, I should | ||
522 | * instead wait for the entry code to set something for me. Well, | ||
523 | * ideally, all that crap will be done in prom.c and the CPU left | ||
524 | * in a RAM-based wait loop like CHRP. | ||
525 | */ | ||
526 | mdelay(1); | ||
527 | |||
528 | /* Restore our exception vector */ | ||
529 | *vector = save_vector; | ||
530 | flush_icache_range((unsigned long) vector, (unsigned long) vector + 4); | ||
531 | |||
532 | local_irq_restore(flags); | ||
533 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); | ||
534 | } | ||
535 | |||
536 | static void __devinit smp_core99_setup_cpu(int cpu_nr) | ||
537 | { | ||
538 | /* Setup L2/L3 */ | ||
539 | if (cpu_nr != 0) | ||
540 | core99_init_caches(cpu_nr); | ||
541 | |||
542 | /* Setup openpic */ | ||
543 | mpic_setup_this_cpu(); | ||
544 | |||
545 | if (cpu_nr == 0) { | ||
546 | #ifdef CONFIG_POWER4 | ||
547 | extern void g5_phy_disable_cpu1(void); | ||
548 | |||
549 | /* If we didn't start the second CPU, we must take | ||
550 | * it off the bus | ||
551 | */ | ||
552 | if (machine_is_compatible("MacRISC4") && | ||
553 | num_online_cpus() < 2) | ||
554 | g5_phy_disable_cpu1(); | ||
555 | #endif /* CONFIG_POWER4 */ | ||
556 | if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349); | ||
557 | } | ||
558 | } | ||
559 | |||
560 | /* not __init, called in sleep/wakeup code */ | ||
561 | void smp_core99_take_timebase(void) | ||
562 | { | ||
563 | unsigned long flags; | ||
564 | |||
565 | /* tell the primary we're here */ | ||
566 | sec_tb_reset = 1; | ||
567 | mb(); | ||
568 | |||
569 | /* wait for the primary to set pri_tb_hi/lo */ | ||
570 | while (sec_tb_reset < 2) | ||
571 | mb(); | ||
572 | |||
573 | /* set our stuff the same as the primary */ | ||
574 | local_irq_save(flags); | ||
575 | set_dec(1); | ||
576 | set_tb(pri_tb_hi, pri_tb_lo); | ||
577 | last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp; | ||
578 | mb(); | ||
579 | |||
580 | /* tell the primary we're done */ | ||
581 | sec_tb_reset = 0; | ||
582 | mb(); | ||
583 | local_irq_restore(flags); | ||
584 | } | ||
585 | |||
586 | /* not __init, called in sleep/wakeup code */ | ||
587 | void smp_core99_give_timebase(void) | ||
588 | { | ||
589 | unsigned long flags; | ||
590 | unsigned int t; | ||
591 | |||
592 | /* wait for the secondary to be in take_timebase */ | ||
593 | for (t = 100000; t > 0 && !sec_tb_reset; --t) | ||
594 | udelay(10); | ||
595 | if (!sec_tb_reset) { | ||
596 | printk(KERN_WARNING "Timeout waiting sync on second CPU\n"); | ||
597 | return; | ||
598 | } | ||
599 | |||
600 | /* freeze the timebase and read it */ | ||
601 | /* disable interrupts so the timebase is disabled for the | ||
602 | shortest possible time */ | ||
603 | local_irq_save(flags); | ||
604 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4); | ||
605 | pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); | ||
606 | mb(); | ||
607 | pri_tb_hi = get_tbu(); | ||
608 | pri_tb_lo = get_tbl(); | ||
609 | pri_tb_stamp = last_jiffy_stamp(smp_processor_id()); | ||
610 | mb(); | ||
611 | |||
612 | /* tell the secondary we're ready */ | ||
613 | sec_tb_reset = 2; | ||
614 | mb(); | ||
615 | |||
616 | /* wait for the secondary to have taken it */ | ||
617 | for (t = 100000; t > 0 && sec_tb_reset; --t) | ||
618 | udelay(10); | ||
619 | if (sec_tb_reset) | ||
620 | printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n"); | ||
621 | else | ||
622 | smp_tb_synchronized = 1; | ||
623 | |||
624 | /* Now, restart the timebase by leaving the GPIO to an open collector */ | ||
625 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0); | ||
626 | pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); | ||
627 | local_irq_restore(flags); | ||
628 | } | ||
629 | |||
630 | void smp_core99_message_pass(int target, int msg, unsigned long data, int wait) | ||
631 | { | ||
632 | cpumask_t mask = CPU_MASK_ALL; | ||
633 | /* make sure we're sending something that translates to an IPI */ | ||
634 | if (msg > 0x3) { | ||
635 | printk("SMP %d: smp_message_pass: unknown msg %d\n", | ||
636 | smp_processor_id(), msg); | ||
637 | return; | ||
638 | } | ||
639 | switch (target) { | ||
640 | case MSG_ALL: | ||
641 | mpic_send_ipi(msg, cpus_addr(mask)[0]); | ||
642 | break; | ||
643 | case MSG_ALL_BUT_SELF: | ||
644 | cpu_clear(smp_processor_id(), mask); | ||
645 | mpic_send_ipi(msg, cpus_addr(mask)[0]); | ||
646 | break; | ||
647 | default: | ||
648 | mpic_send_ipi(msg, 1 << target); | ||
649 | break; | ||
650 | } | ||
651 | } | ||
652 | |||
653 | |||
654 | /* PowerSurge-style Macs */ | ||
655 | struct smp_ops_t psurge_smp_ops = { | ||
656 | .message_pass = smp_psurge_message_pass, | ||
657 | .probe = smp_psurge_probe, | ||
658 | .kick_cpu = smp_psurge_kick_cpu, | ||
659 | .setup_cpu = smp_psurge_setup_cpu, | ||
660 | .give_timebase = smp_psurge_give_timebase, | ||
661 | .take_timebase = smp_psurge_take_timebase, | ||
662 | }; | ||
663 | |||
664 | /* Core99 Macs (dual G4s) */ | ||
665 | struct smp_ops_t core99_smp_ops = { | ||
666 | .message_pass = smp_core99_message_pass, | ||
667 | .probe = smp_core99_probe, | ||
668 | .kick_cpu = smp_core99_kick_cpu, | ||
669 | .setup_cpu = smp_core99_setup_cpu, | ||
670 | .give_timebase = smp_core99_give_timebase, | ||
671 | .take_timebase = smp_core99_take_timebase, | ||
672 | }; | ||
673 | |||
674 | #ifdef CONFIG_HOTPLUG_CPU | ||
675 | |||
676 | int __cpu_disable(void) | ||
677 | { | ||
678 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
679 | |||
680 | /* XXX reset cpu affinity here */ | ||
681 | mpic_cpu_set_priority(0xf); | ||
682 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
683 | mb(); | ||
684 | udelay(20); | ||
685 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */ | ||
690 | static int cpu_dead[NR_CPUS]; | ||
691 | |||
692 | void cpu_die(void) | ||
693 | { | ||
694 | local_irq_disable(); | ||
695 | cpu_dead[smp_processor_id()] = 1; | ||
696 | mb(); | ||
697 | low_cpu_die(); | ||
698 | } | ||
699 | |||
700 | void __cpu_die(unsigned int cpu) | ||
701 | { | ||
702 | int timeout; | ||
703 | |||
704 | timeout = 1000; | ||
705 | while (!cpu_dead[cpu]) { | ||
706 | if (--timeout == 0) { | ||
707 | printk("CPU %u refused to die!\n", cpu); | ||
708 | break; | ||
709 | } | ||
710 | msleep(1); | ||
711 | } | ||
712 | cpu_callin_map[cpu] = 0; | ||
713 | cpu_dead[cpu] = 0; | ||
714 | } | ||
715 | |||
716 | #endif | ||