diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sparc/kernel/sun4d_smp.c |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/sparc/kernel/sun4d_smp.c')
-rw-r--r-- | arch/sparc/kernel/sun4d_smp.c | 486 |
1 files changed, 486 insertions, 0 deletions
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c new file mode 100644 index 00000000000..cc1fc898495 --- /dev/null +++ b/arch/sparc/kernel/sun4d_smp.c | |||
@@ -0,0 +1,486 @@ | |||
1 | /* sun4d_smp.c: Sparc SS1000/SC2000 SMP support. | ||
2 | * | ||
3 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
4 | * | ||
5 | * Based on sun4m's smp.c, which is: | ||
6 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
7 | */ | ||
8 | |||
9 | #include <asm/head.h> | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/smp_lock.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/kernel_stat.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/swap.h> | ||
22 | #include <linux/profile.h> | ||
23 | |||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/atomic.h> | ||
26 | |||
27 | #include <asm/delay.h> | ||
28 | #include <asm/irq.h> | ||
29 | #include <asm/page.h> | ||
30 | #include <asm/pgalloc.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/oplib.h> | ||
33 | #include <asm/sbus.h> | ||
34 | #include <asm/sbi.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | #include <asm/cacheflush.h> | ||
37 | #include <asm/cpudata.h> | ||
38 | |||
39 | #define IRQ_CROSS_CALL 15 | ||
40 | |||
41 | extern ctxd_t *srmmu_ctx_table_phys; | ||
42 | |||
43 | extern void calibrate_delay(void); | ||
44 | |||
45 | extern volatile int smp_processors_ready; | ||
46 | extern int smp_num_cpus; | ||
47 | static int smp_highest_cpu; | ||
48 | extern volatile unsigned long cpu_callin_map[NR_CPUS]; | ||
49 | extern struct cpuinfo_sparc cpu_data[NR_CPUS]; | ||
50 | extern unsigned char boot_cpu_id; | ||
51 | extern int smp_activated; | ||
52 | extern volatile int __cpu_number_map[NR_CPUS]; | ||
53 | extern volatile int __cpu_logical_map[NR_CPUS]; | ||
54 | extern volatile unsigned long ipi_count; | ||
55 | extern volatile int smp_process_available; | ||
56 | extern volatile int smp_commenced; | ||
57 | extern int __smp4d_processor_id(void); | ||
58 | |||
59 | /* #define SMP_DEBUG */ | ||
60 | |||
61 | #ifdef SMP_DEBUG | ||
62 | #define SMP_PRINTK(x) printk x | ||
63 | #else | ||
64 | #define SMP_PRINTK(x) | ||
65 | #endif | ||
66 | |||
67 | static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val) | ||
68 | { | ||
69 | __asm__ __volatile__("swap [%1], %0\n\t" : | ||
70 | "=&r" (val), "=&r" (ptr) : | ||
71 | "0" (val), "1" (ptr)); | ||
72 | return val; | ||
73 | } | ||
74 | |||
75 | static void smp_setup_percpu_timer(void); | ||
76 | extern void cpu_probe(void); | ||
77 | extern void sun4d_distribute_irqs(void); | ||
78 | |||
79 | void __init smp4d_callin(void) | ||
80 | { | ||
81 | int cpuid = hard_smp4d_processor_id(); | ||
82 | extern spinlock_t sun4d_imsk_lock; | ||
83 | unsigned long flags; | ||
84 | |||
85 | /* Show we are alive */ | ||
86 | cpu_leds[cpuid] = 0x6; | ||
87 | show_leds(cpuid); | ||
88 | |||
89 | /* Enable level15 interrupt, disable level14 interrupt for now */ | ||
90 | cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); | ||
91 | |||
92 | local_flush_cache_all(); | ||
93 | local_flush_tlb_all(); | ||
94 | |||
95 | /* | ||
96 | * Unblock the master CPU _only_ when the scheduler state | ||
97 | * of all secondary CPUs will be up-to-date, so after | ||
98 | * the SMP initialization the master will be just allowed | ||
99 | * to call the scheduler code. | ||
100 | */ | ||
101 | /* Get our local ticker going. */ | ||
102 | smp_setup_percpu_timer(); | ||
103 | |||
104 | calibrate_delay(); | ||
105 | smp_store_cpu_info(cpuid); | ||
106 | local_flush_cache_all(); | ||
107 | local_flush_tlb_all(); | ||
108 | |||
109 | /* Allow master to continue. */ | ||
110 | swap((unsigned long *)&cpu_callin_map[cpuid], 1); | ||
111 | local_flush_cache_all(); | ||
112 | local_flush_tlb_all(); | ||
113 | |||
114 | cpu_probe(); | ||
115 | |||
116 | while((unsigned long)current_set[cpuid] < PAGE_OFFSET) | ||
117 | barrier(); | ||
118 | |||
119 | while(current_set[cpuid]->cpu != cpuid) | ||
120 | barrier(); | ||
121 | |||
122 | /* Fix idle thread fields. */ | ||
123 | __asm__ __volatile__("ld [%0], %%g6\n\t" | ||
124 | : : "r" (¤t_set[cpuid]) | ||
125 | : "memory" /* paranoid */); | ||
126 | |||
127 | cpu_leds[cpuid] = 0x9; | ||
128 | show_leds(cpuid); | ||
129 | |||
130 | /* Attach to the address space of init_task. */ | ||
131 | atomic_inc(&init_mm.mm_count); | ||
132 | current->active_mm = &init_mm; | ||
133 | |||
134 | local_flush_cache_all(); | ||
135 | local_flush_tlb_all(); | ||
136 | |||
137 | local_irq_enable(); /* We don't allow PIL 14 yet */ | ||
138 | |||
139 | while(!smp_commenced) | ||
140 | barrier(); | ||
141 | |||
142 | spin_lock_irqsave(&sun4d_imsk_lock, flags); | ||
143 | cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ | ||
144 | spin_unlock_irqrestore(&sun4d_imsk_lock, flags); | ||
145 | } | ||
146 | |||
147 | extern void init_IRQ(void); | ||
148 | extern void cpu_panic(void); | ||
149 | |||
150 | /* | ||
151 | * Cycle through the processors asking the PROM to start each one. | ||
152 | */ | ||
153 | |||
154 | extern struct linux_prom_registers smp_penguin_ctable; | ||
155 | extern unsigned long trapbase_cpu1[]; | ||
156 | extern unsigned long trapbase_cpu2[]; | ||
157 | extern unsigned long trapbase_cpu3[]; | ||
158 | |||
159 | void __init smp4d_boot_cpus(void) | ||
160 | { | ||
161 | int cpucount = 0; | ||
162 | int i, mid; | ||
163 | |||
164 | printk("Entering SMP Mode...\n"); | ||
165 | |||
166 | if (boot_cpu_id) | ||
167 | current_set[0] = NULL; | ||
168 | |||
169 | local_irq_enable(); | ||
170 | cpus_clear(cpu_present_map); | ||
171 | |||
172 | /* XXX This whole thing has to go. See sparc64. */ | ||
173 | for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) | ||
174 | cpu_set(mid, cpu_present_map); | ||
175 | SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0])); | ||
176 | for(i=0; i < NR_CPUS; i++) | ||
177 | __cpu_number_map[i] = -1; | ||
178 | for(i=0; i < NR_CPUS; i++) | ||
179 | __cpu_logical_map[i] = -1; | ||
180 | __cpu_number_map[boot_cpu_id] = 0; | ||
181 | __cpu_logical_map[0] = boot_cpu_id; | ||
182 | current_thread_info()->cpu = boot_cpu_id; | ||
183 | smp_store_cpu_info(boot_cpu_id); | ||
184 | smp_setup_percpu_timer(); | ||
185 | local_flush_cache_all(); | ||
186 | if (cpu_find_by_instance(1, NULL, NULL)) | ||
187 | return; /* Not an MP box. */ | ||
188 | SMP_PRINTK(("Iterating over CPUs\n")); | ||
189 | for(i = 0; i < NR_CPUS; i++) { | ||
190 | if(i == boot_cpu_id) | ||
191 | continue; | ||
192 | |||
193 | if (cpu_isset(i, cpu_present_map)) { | ||
194 | extern unsigned long sun4d_cpu_startup; | ||
195 | unsigned long *entry = &sun4d_cpu_startup; | ||
196 | struct task_struct *p; | ||
197 | int timeout; | ||
198 | int no; | ||
199 | |||
200 | /* Cook up an idler for this guy. */ | ||
201 | p = fork_idle(i); | ||
202 | cpucount++; | ||
203 | current_set[i] = p->thread_info; | ||
204 | for (no = 0; !cpu_find_by_instance(no, NULL, &mid) | ||
205 | && mid != i; no++) ; | ||
206 | |||
207 | /* | ||
208 | * Initialize the contexts table | ||
209 | * Since the call to prom_startcpu() trashes the structure, | ||
210 | * we need to re-initialize it for each cpu | ||
211 | */ | ||
212 | smp_penguin_ctable.which_io = 0; | ||
213 | smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; | ||
214 | smp_penguin_ctable.reg_size = 0; | ||
215 | |||
216 | /* whirrr, whirrr, whirrrrrrrrr... */ | ||
217 | SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node)); | ||
218 | local_flush_cache_all(); | ||
219 | prom_startcpu(cpu_data(no).prom_node, | ||
220 | &smp_penguin_ctable, 0, (char *)entry); | ||
221 | |||
222 | SMP_PRINTK(("prom_startcpu returned :)\n")); | ||
223 | |||
224 | /* wheee... it's going... */ | ||
225 | for(timeout = 0; timeout < 10000; timeout++) { | ||
226 | if(cpu_callin_map[i]) | ||
227 | break; | ||
228 | udelay(200); | ||
229 | } | ||
230 | |||
231 | if(cpu_callin_map[i]) { | ||
232 | /* Another "Red Snapper". */ | ||
233 | __cpu_number_map[i] = cpucount; | ||
234 | __cpu_logical_map[cpucount] = i; | ||
235 | } else { | ||
236 | cpucount--; | ||
237 | printk("Processor %d is stuck.\n", i); | ||
238 | } | ||
239 | } | ||
240 | if(!(cpu_callin_map[i])) { | ||
241 | cpu_clear(i, cpu_present_map); | ||
242 | __cpu_number_map[i] = -1; | ||
243 | } | ||
244 | } | ||
245 | local_flush_cache_all(); | ||
246 | if(cpucount == 0) { | ||
247 | printk("Error: only one Processor found.\n"); | ||
248 | cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id()); | ||
249 | } else { | ||
250 | unsigned long bogosum = 0; | ||
251 | |||
252 | for(i = 0; i < NR_CPUS; i++) { | ||
253 | if (cpu_isset(i, cpu_present_map)) { | ||
254 | bogosum += cpu_data(i).udelay_val; | ||
255 | smp_highest_cpu = i; | ||
256 | } | ||
257 | } | ||
258 | SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); | ||
259 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", | ||
260 | cpucount + 1, | ||
261 | bogosum/(500000/HZ), | ||
262 | (bogosum/(5000/HZ))%100); | ||
263 | smp_activated = 1; | ||
264 | smp_num_cpus = cpucount + 1; | ||
265 | } | ||
266 | |||
267 | /* Free unneeded trap tables */ | ||
268 | ClearPageReserved(virt_to_page(trapbase_cpu1)); | ||
269 | set_page_count(virt_to_page(trapbase_cpu1), 1); | ||
270 | free_page((unsigned long)trapbase_cpu1); | ||
271 | totalram_pages++; | ||
272 | num_physpages++; | ||
273 | |||
274 | ClearPageReserved(virt_to_page(trapbase_cpu2)); | ||
275 | set_page_count(virt_to_page(trapbase_cpu2), 1); | ||
276 | free_page((unsigned long)trapbase_cpu2); | ||
277 | totalram_pages++; | ||
278 | num_physpages++; | ||
279 | |||
280 | ClearPageReserved(virt_to_page(trapbase_cpu3)); | ||
281 | set_page_count(virt_to_page(trapbase_cpu3), 1); | ||
282 | free_page((unsigned long)trapbase_cpu3); | ||
283 | totalram_pages++; | ||
284 | num_physpages++; | ||
285 | |||
286 | /* Ok, they are spinning and ready to go. */ | ||
287 | smp_processors_ready = 1; | ||
288 | sun4d_distribute_irqs(); | ||
289 | } | ||
290 | |||
291 | static struct smp_funcall { | ||
292 | smpfunc_t func; | ||
293 | unsigned long arg1; | ||
294 | unsigned long arg2; | ||
295 | unsigned long arg3; | ||
296 | unsigned long arg4; | ||
297 | unsigned long arg5; | ||
298 | unsigned char processors_in[NR_CPUS]; /* Set when ipi entered. */ | ||
299 | unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */ | ||
300 | } ccall_info __attribute__((aligned(8))); | ||
301 | |||
302 | static DEFINE_SPINLOCK(cross_call_lock); | ||
303 | |||
304 | /* Cross calls must be serialized, at least currently. */ | ||
305 | void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, | ||
306 | unsigned long arg3, unsigned long arg4, unsigned long arg5) | ||
307 | { | ||
308 | if(smp_processors_ready) { | ||
309 | register int high = smp_highest_cpu; | ||
310 | unsigned long flags; | ||
311 | |||
312 | spin_lock_irqsave(&cross_call_lock, flags); | ||
313 | |||
314 | { | ||
315 | /* If you make changes here, make sure gcc generates proper code... */ | ||
316 | register smpfunc_t f asm("i0") = func; | ||
317 | register unsigned long a1 asm("i1") = arg1; | ||
318 | register unsigned long a2 asm("i2") = arg2; | ||
319 | register unsigned long a3 asm("i3") = arg3; | ||
320 | register unsigned long a4 asm("i4") = arg4; | ||
321 | register unsigned long a5 asm("i5") = arg5; | ||
322 | |||
323 | __asm__ __volatile__( | ||
324 | "std %0, [%6]\n\t" | ||
325 | "std %2, [%6 + 8]\n\t" | ||
326 | "std %4, [%6 + 16]\n\t" : : | ||
327 | "r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), | ||
328 | "r" (&ccall_info.func)); | ||
329 | } | ||
330 | |||
331 | /* Init receive/complete mapping, plus fire the IPI's off. */ | ||
332 | { | ||
333 | cpumask_t mask; | ||
334 | register int i; | ||
335 | |||
336 | mask = cpumask_of_cpu(hard_smp4d_processor_id()); | ||
337 | cpus_andnot(mask, cpu_present_map, mask); | ||
338 | for(i = 0; i <= high; i++) { | ||
339 | if (cpu_isset(i, mask)) { | ||
340 | ccall_info.processors_in[i] = 0; | ||
341 | ccall_info.processors_out[i] = 0; | ||
342 | sun4d_send_ipi(i, IRQ_CROSS_CALL); | ||
343 | } | ||
344 | } | ||
345 | } | ||
346 | |||
347 | { | ||
348 | register int i; | ||
349 | |||
350 | i = 0; | ||
351 | do { | ||
352 | while(!ccall_info.processors_in[i]) | ||
353 | barrier(); | ||
354 | } while(++i <= high); | ||
355 | |||
356 | i = 0; | ||
357 | do { | ||
358 | while(!ccall_info.processors_out[i]) | ||
359 | barrier(); | ||
360 | } while(++i <= high); | ||
361 | } | ||
362 | |||
363 | spin_unlock_irqrestore(&cross_call_lock, flags); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /* Running cross calls. */ | ||
368 | void smp4d_cross_call_irq(void) | ||
369 | { | ||
370 | int i = hard_smp4d_processor_id(); | ||
371 | |||
372 | ccall_info.processors_in[i] = 1; | ||
373 | ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, | ||
374 | ccall_info.arg4, ccall_info.arg5); | ||
375 | ccall_info.processors_out[i] = 1; | ||
376 | } | ||
377 | |||
378 | static int smp4d_stop_cpu_sender; | ||
379 | |||
380 | static void smp4d_stop_cpu(void) | ||
381 | { | ||
382 | int me = hard_smp4d_processor_id(); | ||
383 | |||
384 | if (me != smp4d_stop_cpu_sender) | ||
385 | while(1) barrier(); | ||
386 | } | ||
387 | |||
388 | /* Cross calls, in order to work efficiently and atomically do all | ||
389 | * the message passing work themselves, only stopcpu and reschedule | ||
390 | * messages come through here. | ||
391 | */ | ||
392 | void smp4d_message_pass(int target, int msg, unsigned long data, int wait) | ||
393 | { | ||
394 | int me = hard_smp4d_processor_id(); | ||
395 | |||
396 | SMP_PRINTK(("smp4d_message_pass %d %d %08lx %d\n", target, msg, data, wait)); | ||
397 | if (msg == MSG_STOP_CPU && target == MSG_ALL_BUT_SELF) { | ||
398 | unsigned long flags; | ||
399 | static DEFINE_SPINLOCK(stop_cpu_lock); | ||
400 | spin_lock_irqsave(&stop_cpu_lock, flags); | ||
401 | smp4d_stop_cpu_sender = me; | ||
402 | smp4d_cross_call((smpfunc_t)smp4d_stop_cpu, 0, 0, 0, 0, 0); | ||
403 | spin_unlock_irqrestore(&stop_cpu_lock, flags); | ||
404 | } | ||
405 | printk("Yeeee, trying to send SMP msg(%d) to %d on cpu %d\n", msg, target, me); | ||
406 | panic("Bogon SMP message pass."); | ||
407 | } | ||
408 | |||
409 | void smp4d_percpu_timer_interrupt(struct pt_regs *regs) | ||
410 | { | ||
411 | int cpu = hard_smp4d_processor_id(); | ||
412 | static int cpu_tick[NR_CPUS]; | ||
413 | static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd }; | ||
414 | |||
415 | bw_get_prof_limit(cpu); | ||
416 | bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */ | ||
417 | |||
418 | cpu_tick[cpu]++; | ||
419 | if (!(cpu_tick[cpu] & 15)) { | ||
420 | if (cpu_tick[cpu] == 0x60) | ||
421 | cpu_tick[cpu] = 0; | ||
422 | cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4]; | ||
423 | show_leds(cpu); | ||
424 | } | ||
425 | |||
426 | profile_tick(CPU_PROFILING, regs); | ||
427 | |||
428 | if(!--prof_counter(cpu)) { | ||
429 | int user = user_mode(regs); | ||
430 | |||
431 | irq_enter(); | ||
432 | update_process_times(user); | ||
433 | irq_exit(); | ||
434 | |||
435 | prof_counter(cpu) = prof_multiplier(cpu); | ||
436 | } | ||
437 | } | ||
438 | |||
439 | extern unsigned int lvl14_resolution; | ||
440 | |||
441 | static void __init smp_setup_percpu_timer(void) | ||
442 | { | ||
443 | int cpu = hard_smp4d_processor_id(); | ||
444 | |||
445 | prof_counter(cpu) = prof_multiplier(cpu) = 1; | ||
446 | load_profile_irq(cpu, lvl14_resolution); | ||
447 | } | ||
448 | |||
449 | void __init smp4d_blackbox_id(unsigned *addr) | ||
450 | { | ||
451 | int rd = *addr & 0x3e000000; | ||
452 | |||
453 | addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */ | ||
454 | addr[1] = 0x01000000; /* nop */ | ||
455 | addr[2] = 0x01000000; /* nop */ | ||
456 | } | ||
457 | |||
458 | void __init smp4d_blackbox_current(unsigned *addr) | ||
459 | { | ||
460 | int rd = *addr & 0x3e000000; | ||
461 | |||
462 | addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */ | ||
463 | addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */ | ||
464 | addr[4] = 0x01000000; /* nop */ | ||
465 | } | ||
466 | |||
467 | void __init sun4d_init_smp(void) | ||
468 | { | ||
469 | int i; | ||
470 | extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[]; | ||
471 | |||
472 | /* Patch ipi15 trap table */ | ||
473 | t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m); | ||
474 | |||
475 | /* And set btfixup... */ | ||
476 | BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id); | ||
477 | BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current); | ||
478 | BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM); | ||
479 | BTFIXUPSET_CALL(smp_message_pass, smp4d_message_pass, BTFIXUPCALL_NORM); | ||
480 | BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM); | ||
481 | |||
482 | for (i = 0; i < NR_CPUS; i++) { | ||
483 | ccall_info.processors_in[i] = 1; | ||
484 | ccall_info.processors_out[i] = 1; | ||
485 | } | ||
486 | } | ||