diff options
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 1952 |
1 files changed, 1952 insertions, 0 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c new file mode 100644 index 000000000000..b87f8548e75a --- /dev/null +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -0,0 +1,1952 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
2 | |||
3 | /* Copyright (C) 1999,2001 | ||
4 | * | ||
5 | * Author: J.E.J.Bottomley@HansenPartnership.com | ||
6 | * | ||
7 | * linux/arch/i386/kernel/voyager_smp.c | ||
8 | * | ||
9 | * This file provides all the same external entries as smp.c but uses | ||
10 | * the voyager hal to provide the functionality | ||
11 | */ | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/mc146818rtc.h> | ||
17 | #include <linux/cache.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/completion.h> | ||
23 | #include <asm/desc.h> | ||
24 | #include <asm/voyager.h> | ||
25 | #include <asm/vic.h> | ||
26 | #include <asm/mtrr.h> | ||
27 | #include <asm/pgalloc.h> | ||
28 | #include <asm/tlbflush.h> | ||
29 | #include <asm/arch_hooks.h> | ||
30 | |||
31 | /* TLB state -- visible externally, indexed physically */ | ||
32 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; | ||
33 | |||
34 | /* CPU IRQ affinity -- set to all ones initially */ | ||
35 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; | ||
36 | |||
37 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally | ||
38 | * indexed physically */ | ||
39 | struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; | ||
40 | EXPORT_SYMBOL(cpu_data); | ||
41 | |||
42 | /* physical ID of the CPU used to boot the system */ | ||
43 | unsigned char boot_cpu_id; | ||
44 | |||
45 | /* The memory line addresses for the Quad CPIs */ | ||
46 | struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned; | ||
47 | |||
48 | /* The masks for the Extended VIC processors, filled in by cat_init */ | ||
49 | __u32 voyager_extended_vic_processors = 0; | ||
50 | |||
51 | /* Masks for the extended Quad processors which cannot be VIC booted */ | ||
52 | __u32 voyager_allowed_boot_processors = 0; | ||
53 | |||
54 | /* The mask for the Quad Processors (both extended and non-extended) */ | ||
55 | __u32 voyager_quad_processors = 0; | ||
56 | |||
57 | /* Total count of live CPUs, used in process.c to display | ||
58 | * the CPU information and in irq.c for the per CPU irq | ||
59 | * activity count. Finally exported by i386_ksyms.c */ | ||
60 | static int voyager_extended_cpus = 1; | ||
61 | |||
62 | /* Have we found an SMP box - used by time.c to do the profiling | ||
63 | interrupt for timeslicing; do not set to 1 until the per CPU timer | ||
64 | interrupt is active */ | ||
65 | int smp_found_config = 0; | ||
66 | |||
67 | /* Used for the invalidate map that's also checked in the spinlock */ | ||
68 | static volatile unsigned long smp_invalidate_needed; | ||
69 | |||
70 | /* Bitmask of currently online CPUs - used by setup.c for | ||
71 | /proc/cpuinfo, visible externally but still physical */ | ||
72 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
73 | EXPORT_SYMBOL(cpu_online_map); | ||
74 | |||
75 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used | ||
76 | * by scheduler but indexed physically */ | ||
77 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | ||
78 | |||
79 | |||
80 | /* The internal functions */ | ||
81 | static void send_CPI(__u32 cpuset, __u8 cpi); | ||
82 | static void ack_CPI(__u8 cpi); | ||
83 | static int ack_QIC_CPI(__u8 cpi); | ||
84 | static void ack_special_QIC_CPI(__u8 cpi); | ||
85 | static void ack_VIC_CPI(__u8 cpi); | ||
86 | static void send_CPI_allbutself(__u8 cpi); | ||
87 | static void mask_vic_irq(unsigned int irq); | ||
88 | static void unmask_vic_irq(unsigned int irq); | ||
89 | static unsigned int startup_vic_irq(unsigned int irq); | ||
90 | static void enable_local_vic_irq(unsigned int irq); | ||
91 | static void disable_local_vic_irq(unsigned int irq); | ||
92 | static void before_handle_vic_irq(unsigned int irq); | ||
93 | static void after_handle_vic_irq(unsigned int irq); | ||
94 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | ||
95 | static void ack_vic_irq(unsigned int irq); | ||
96 | static void vic_enable_cpi(void); | ||
97 | static void do_boot_cpu(__u8 cpuid); | ||
98 | static void do_quad_bootstrap(void); | ||
99 | |||
100 | int hard_smp_processor_id(void); | ||
101 | int safe_smp_processor_id(void); | ||
102 | |||
103 | /* Inline functions */ | ||
104 | static inline void | ||
105 | send_one_QIC_CPI(__u8 cpu, __u8 cpi) | ||
106 | { | ||
107 | voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = | ||
108 | (smp_processor_id() << 16) + cpi; | ||
109 | } | ||
110 | |||
111 | static inline void | ||
112 | send_QIC_CPI(__u32 cpuset, __u8 cpi) | ||
113 | { | ||
114 | int cpu; | ||
115 | |||
116 | for_each_online_cpu(cpu) { | ||
117 | if(cpuset & (1<<cpu)) { | ||
118 | #ifdef VOYAGER_DEBUG | ||
119 | if(!cpu_isset(cpu, cpu_online_map)) | ||
120 | VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu)); | ||
121 | #endif | ||
122 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | ||
123 | } | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static inline void | ||
128 | wrapper_smp_local_timer_interrupt(void) | ||
129 | { | ||
130 | irq_enter(); | ||
131 | smp_local_timer_interrupt(); | ||
132 | irq_exit(); | ||
133 | } | ||
134 | |||
135 | static inline void | ||
136 | send_one_CPI(__u8 cpu, __u8 cpi) | ||
137 | { | ||
138 | if(voyager_quad_processors & (1<<cpu)) | ||
139 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | ||
140 | else | ||
141 | send_CPI(1<<cpu, cpi); | ||
142 | } | ||
143 | |||
144 | static inline void | ||
145 | send_CPI_allbutself(__u8 cpi) | ||
146 | { | ||
147 | __u8 cpu = smp_processor_id(); | ||
148 | __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); | ||
149 | send_CPI(mask, cpi); | ||
150 | } | ||
151 | |||
152 | static inline int | ||
153 | is_cpu_quad(void) | ||
154 | { | ||
155 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | ||
156 | return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); | ||
157 | } | ||
158 | |||
159 | static inline int | ||
160 | is_cpu_extended(void) | ||
161 | { | ||
162 | __u8 cpu = hard_smp_processor_id(); | ||
163 | |||
164 | return(voyager_extended_vic_processors & (1<<cpu)); | ||
165 | } | ||
166 | |||
167 | static inline int | ||
168 | is_cpu_vic_boot(void) | ||
169 | { | ||
170 | __u8 cpu = hard_smp_processor_id(); | ||
171 | |||
172 | return(voyager_extended_vic_processors | ||
173 | & voyager_allowed_boot_processors & (1<<cpu)); | ||
174 | } | ||
175 | |||
176 | |||
177 | static inline void | ||
178 | ack_CPI(__u8 cpi) | ||
179 | { | ||
180 | switch(cpi) { | ||
181 | case VIC_CPU_BOOT_CPI: | ||
182 | if(is_cpu_quad() && !is_cpu_vic_boot()) | ||
183 | ack_QIC_CPI(cpi); | ||
184 | else | ||
185 | ack_VIC_CPI(cpi); | ||
186 | break; | ||
187 | case VIC_SYS_INT: | ||
188 | case VIC_CMN_INT: | ||
189 | /* These are slightly strange. Even on the Quad card, | ||
190 | * They are vectored as VIC CPIs */ | ||
191 | if(is_cpu_quad()) | ||
192 | ack_special_QIC_CPI(cpi); | ||
193 | else | ||
194 | ack_VIC_CPI(cpi); | ||
195 | break; | ||
196 | default: | ||
197 | printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi); | ||
198 | break; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* local variables */ | ||
203 | |||
204 | /* The VIC IRQ descriptors -- these look almost identical to the | ||
205 | * 8259 IRQs except that masks and things must be kept per processor | ||
206 | */ | ||
207 | static struct irq_chip vic_chip = { | ||
208 | .name = "VIC", | ||
209 | .startup = startup_vic_irq, | ||
210 | .mask = mask_vic_irq, | ||
211 | .unmask = unmask_vic_irq, | ||
212 | .set_affinity = set_vic_irq_affinity, | ||
213 | }; | ||
214 | |||
215 | /* used to count up as CPUs are brought on line (starts at 0) */ | ||
216 | static int cpucount = 0; | ||
217 | |||
218 | /* steal a page from the bottom of memory for the trampoline and | ||
219 | * squirrel its address away here. This will be in kernel virtual | ||
220 | * space */ | ||
221 | static __u32 trampoline_base; | ||
222 | |||
223 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ | ||
224 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; | ||
225 | static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; | ||
226 | static DEFINE_PER_CPU(int, prof_counter) = 1; | ||
227 | |||
228 | /* the map used to check if a CPU has booted */ | ||
229 | static __u32 cpu_booted_map; | ||
230 | |||
231 | /* the synchronize flag used to hold all secondary CPUs spinning in | ||
232 | * a tight loop until the boot sequence is ready for them */ | ||
233 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | ||
234 | |||
235 | /* This is for the new dynamic CPU boot code */ | ||
236 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | ||
237 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | ||
238 | EXPORT_SYMBOL(cpu_callout_map); | ||
239 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
240 | EXPORT_SYMBOL(cpu_possible_map); | ||
241 | |||
242 | /* The per processor IRQ masks (these are usually kept in sync) */ | ||
243 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | ||
244 | |||
245 | /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */ | ||
246 | static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; | ||
247 | |||
248 | /* Lock for enable/disable of VIC interrupts */ | ||
249 | static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); | ||
250 | |||
251 | /* The boot processor is correctly set up in PC mode when it | ||
252 | * comes up, but the secondaries need their master/slave 8259 | ||
253 | * pairs initializing correctly */ | ||
254 | |||
255 | /* Interrupt counters (per cpu) and total - used to try to | ||
256 | * even up the interrupt handling routines */ | ||
257 | static long vic_intr_total = 0; | ||
258 | static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 }; | ||
259 | static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 }; | ||
260 | |||
261 | /* Since we can only use CPI0, we fake all the other CPIs */ | ||
262 | static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; | ||
263 | |||
264 | /* debugging routine to read the isr of the cpu's pic */ | ||
265 | static inline __u16 | ||
266 | vic_read_isr(void) | ||
267 | { | ||
268 | __u16 isr; | ||
269 | |||
270 | outb(0x0b, 0xa0); | ||
271 | isr = inb(0xa0) << 8; | ||
272 | outb(0x0b, 0x20); | ||
273 | isr |= inb(0x20); | ||
274 | |||
275 | return isr; | ||
276 | } | ||
277 | |||
278 | static __init void | ||
279 | qic_setup(void) | ||
280 | { | ||
281 | if(!is_cpu_quad()) { | ||
282 | /* not a quad, no setup */ | ||
283 | return; | ||
284 | } | ||
285 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | ||
286 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | ||
287 | |||
288 | if(is_cpu_extended()) { | ||
289 | /* the QIC duplicate of the VIC base register */ | ||
290 | outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); | ||
291 | outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); | ||
292 | |||
293 | /* FIXME: should set up the QIC timer and memory parity | ||
294 | * error vectors here */ | ||
295 | } | ||
296 | } | ||
297 | |||
298 | static __init void | ||
299 | vic_setup_pic(void) | ||
300 | { | ||
301 | outb(1, VIC_REDIRECT_REGISTER_1); | ||
302 | /* clear the claim registers for dynamic routing */ | ||
303 | outb(0, VIC_CLAIM_REGISTER_0); | ||
304 | outb(0, VIC_CLAIM_REGISTER_1); | ||
305 | |||
306 | outb(0, VIC_PRIORITY_REGISTER); | ||
307 | /* Set the Primary and Secondary Microchannel vector | ||
308 | * bases to be the same as the ordinary interrupts | ||
309 | * | ||
310 | * FIXME: This would be more efficient using separate | ||
311 | * vectors. */ | ||
312 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | ||
313 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | ||
314 | /* Now initiallise the master PIC belonging to this CPU by | ||
315 | * sending the four ICWs */ | ||
316 | |||
317 | /* ICW1: level triggered, ICW4 needed */ | ||
318 | outb(0x19, 0x20); | ||
319 | |||
320 | /* ICW2: vector base */ | ||
321 | outb(FIRST_EXTERNAL_VECTOR, 0x21); | ||
322 | |||
323 | /* ICW3: slave at line 2 */ | ||
324 | outb(0x04, 0x21); | ||
325 | |||
326 | /* ICW4: 8086 mode */ | ||
327 | outb(0x01, 0x21); | ||
328 | |||
329 | /* now the same for the slave PIC */ | ||
330 | |||
331 | /* ICW1: level trigger, ICW4 needed */ | ||
332 | outb(0x19, 0xA0); | ||
333 | |||
334 | /* ICW2: slave vector base */ | ||
335 | outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); | ||
336 | |||
337 | /* ICW3: slave ID */ | ||
338 | outb(0x02, 0xA1); | ||
339 | |||
340 | /* ICW4: 8086 mode */ | ||
341 | outb(0x01, 0xA1); | ||
342 | } | ||
343 | |||
344 | static void | ||
345 | do_quad_bootstrap(void) | ||
346 | { | ||
347 | if(is_cpu_quad() && is_cpu_vic_boot()) { | ||
348 | int i; | ||
349 | unsigned long flags; | ||
350 | __u8 cpuid = hard_smp_processor_id(); | ||
351 | |||
352 | local_irq_save(flags); | ||
353 | |||
354 | for(i = 0; i<4; i++) { | ||
355 | /* FIXME: this would be >>3 &0x7 on the 32 way */ | ||
356 | if(((cpuid >> 2) & 0x03) == i) | ||
357 | /* don't lower our own mask! */ | ||
358 | continue; | ||
359 | |||
360 | /* masquerade as local Quad CPU */ | ||
361 | outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID); | ||
362 | /* enable the startup CPI */ | ||
363 | outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1); | ||
364 | /* restore cpu id */ | ||
365 | outb(0, QIC_PROCESSOR_ID); | ||
366 | } | ||
367 | local_irq_restore(flags); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | |||
372 | /* Set up all the basic stuff: read the SMP config and make all the | ||
373 | * SMP information reflect only the boot cpu. All others will be | ||
374 | * brought on-line later. */ | ||
375 | void __init | ||
376 | find_smp_config(void) | ||
377 | { | ||
378 | int i; | ||
379 | |||
380 | boot_cpu_id = hard_smp_processor_id(); | ||
381 | |||
382 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); | ||
383 | |||
384 | /* initialize the CPU structures (moved from smp_boot_cpus) */ | ||
385 | for(i=0; i<NR_CPUS; i++) { | ||
386 | cpu_irq_affinity[i] = ~0; | ||
387 | } | ||
388 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); | ||
389 | |||
390 | /* The boot CPU must be extended */ | ||
391 | voyager_extended_vic_processors = 1<<boot_cpu_id; | ||
392 | /* initially, all of the first 8 cpu's can boot */ | ||
393 | voyager_allowed_boot_processors = 0xff; | ||
394 | /* set up everything for just this CPU, we can alter | ||
395 | * this as we start the other CPUs later */ | ||
396 | /* now get the CPU disposition from the extended CMOS */ | ||
397 | cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); | ||
398 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; | ||
399 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16; | ||
400 | cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; | ||
401 | cpu_possible_map = phys_cpu_present_map; | ||
402 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]); | ||
403 | /* Here we set up the VIC to enable SMP */ | ||
404 | /* enable the CPIs by writing the base vector to their register */ | ||
405 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); | ||
406 | outb(1, VIC_REDIRECT_REGISTER_1); | ||
407 | /* set the claim registers for static routing --- Boot CPU gets | ||
408 | * all interrupts untill all other CPUs started */ | ||
409 | outb(0xff, VIC_CLAIM_REGISTER_0); | ||
410 | outb(0xff, VIC_CLAIM_REGISTER_1); | ||
411 | /* Set the Primary and Secondary Microchannel vector | ||
412 | * bases to be the same as the ordinary interrupts | ||
413 | * | ||
414 | * FIXME: This would be more efficient using separate | ||
415 | * vectors. */ | ||
416 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | ||
417 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | ||
418 | |||
419 | /* Finally tell the firmware that we're driving */ | ||
420 | outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG, | ||
421 | VOYAGER_SUS_IN_CONTROL_PORT); | ||
422 | |||
423 | current_thread_info()->cpu = boot_cpu_id; | ||
424 | x86_write_percpu(cpu_number, boot_cpu_id); | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * The bootstrap kernel entry code has set these up. Save them | ||
429 | * for a given CPU, id is physical */ | ||
430 | void __init | ||
431 | smp_store_cpu_info(int id) | ||
432 | { | ||
433 | struct cpuinfo_x86 *c=&cpu_data[id]; | ||
434 | |||
435 | *c = boot_cpu_data; | ||
436 | |||
437 | identify_secondary_cpu(c); | ||
438 | } | ||
439 | |||
440 | /* set up the trampoline and return the physical address of the code */ | ||
441 | static __u32 __init | ||
442 | setup_trampoline(void) | ||
443 | { | ||
444 | /* these two are global symbols in trampoline.S */ | ||
445 | extern __u8 trampoline_end[]; | ||
446 | extern __u8 trampoline_data[]; | ||
447 | |||
448 | memcpy((__u8 *)trampoline_base, trampoline_data, | ||
449 | trampoline_end - trampoline_data); | ||
450 | return virt_to_phys((__u8 *)trampoline_base); | ||
451 | } | ||
452 | |||
453 | /* Routine initially called when a non-boot CPU is brought online */ | ||
454 | static void __init | ||
455 | start_secondary(void *unused) | ||
456 | { | ||
457 | __u8 cpuid = hard_smp_processor_id(); | ||
458 | /* external functions not defined in the headers */ | ||
459 | extern void calibrate_delay(void); | ||
460 | |||
461 | cpu_init(); | ||
462 | |||
463 | /* OK, we're in the routine */ | ||
464 | ack_CPI(VIC_CPU_BOOT_CPI); | ||
465 | |||
466 | /* setup the 8259 master slave pair belonging to this CPU --- | ||
467 | * we won't actually receive any until the boot CPU | ||
468 | * relinquishes it's static routing mask */ | ||
469 | vic_setup_pic(); | ||
470 | |||
471 | qic_setup(); | ||
472 | |||
473 | if(is_cpu_quad() && !is_cpu_vic_boot()) { | ||
474 | /* clear the boot CPI */ | ||
475 | __u8 dummy; | ||
476 | |||
477 | dummy = voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; | ||
478 | printk("read dummy %d\n", dummy); | ||
479 | } | ||
480 | |||
481 | /* lower the mask to receive CPIs */ | ||
482 | vic_enable_cpi(); | ||
483 | |||
484 | VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); | ||
485 | |||
486 | /* enable interrupts */ | ||
487 | local_irq_enable(); | ||
488 | |||
489 | /* get our bogomips */ | ||
490 | calibrate_delay(); | ||
491 | |||
492 | /* save our processor parameters */ | ||
493 | smp_store_cpu_info(cpuid); | ||
494 | |||
495 | /* if we're a quad, we may need to bootstrap other CPUs */ | ||
496 | do_quad_bootstrap(); | ||
497 | |||
498 | /* FIXME: this is rather a poor hack to prevent the CPU | ||
499 | * activating softirqs while it's supposed to be waiting for | ||
500 | * permission to proceed. Without this, the new per CPU stuff | ||
501 | * in the softirqs will fail */ | ||
502 | local_irq_disable(); | ||
503 | cpu_set(cpuid, cpu_callin_map); | ||
504 | |||
505 | /* signal that we're done */ | ||
506 | cpu_booted_map = 1; | ||
507 | |||
508 | while (!cpu_isset(cpuid, smp_commenced_mask)) | ||
509 | rep_nop(); | ||
510 | local_irq_enable(); | ||
511 | |||
512 | local_flush_tlb(); | ||
513 | |||
514 | cpu_set(cpuid, cpu_online_map); | ||
515 | wmb(); | ||
516 | cpu_idle(); | ||
517 | } | ||
518 | |||
519 | |||
520 | /* Routine to kick start the given CPU and wait for it to report ready | ||
521 | * (or timeout in startup). When this routine returns, the requested | ||
522 | * CPU is either fully running and configured or known to be dead. | ||
523 | * | ||
524 | * We call this routine sequentially 1 CPU at a time, so no need for | ||
525 | * locking */ | ||
526 | |||
527 | static void __init | ||
528 | do_boot_cpu(__u8 cpu) | ||
529 | { | ||
530 | struct task_struct *idle; | ||
531 | int timeout; | ||
532 | unsigned long flags; | ||
533 | int quad_boot = (1<<cpu) & voyager_quad_processors | ||
534 | & ~( voyager_extended_vic_processors | ||
535 | & voyager_allowed_boot_processors); | ||
536 | |||
537 | /* This is an area in head.S which was used to set up the | ||
538 | * initial kernel stack. We need to alter this to give the | ||
539 | * booting CPU a new stack (taken from its idle process) */ | ||
540 | extern struct { | ||
541 | __u8 *esp; | ||
542 | unsigned short ss; | ||
543 | } stack_start; | ||
544 | /* This is the format of the CPI IDT gate (in real mode) which | ||
545 | * we're hijacking to boot the CPU */ | ||
546 | union IDTFormat { | ||
547 | struct seg { | ||
548 | __u16 Offset; | ||
549 | __u16 Segment; | ||
550 | } idt; | ||
551 | __u32 val; | ||
552 | } hijack_source; | ||
553 | |||
554 | __u32 *hijack_vector; | ||
555 | __u32 start_phys_address = setup_trampoline(); | ||
556 | |||
557 | /* There's a clever trick to this: The linux trampoline is | ||
558 | * compiled to begin at absolute location zero, so make the | ||
559 | * address zero but have the data segment selector compensate | ||
560 | * for the actual address */ | ||
561 | hijack_source.idt.Offset = start_phys_address & 0x000F; | ||
562 | hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; | ||
563 | |||
564 | cpucount++; | ||
565 | alternatives_smp_switch(1); | ||
566 | |||
567 | idle = fork_idle(cpu); | ||
568 | if(IS_ERR(idle)) | ||
569 | panic("failed fork for CPU%d", cpu); | ||
570 | idle->thread.eip = (unsigned long) start_secondary; | ||
571 | /* init_tasks (in sched.c) is indexed logically */ | ||
572 | stack_start.esp = (void *) idle->thread.esp; | ||
573 | |||
574 | init_gdt(cpu); | ||
575 | per_cpu(current_task, cpu) = idle; | ||
576 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | ||
577 | irq_ctx_init(cpu); | ||
578 | |||
579 | /* Note: Don't modify initial ss override */ | ||
580 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, | ||
581 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, | ||
582 | hijack_source.idt.Offset, stack_start.esp)); | ||
583 | |||
584 | /* init lowmem identity mapping */ | ||
585 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | ||
586 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); | ||
587 | flush_tlb_all(); | ||
588 | |||
589 | if(quad_boot) { | ||
590 | printk("CPU %d: non extended Quad boot\n", cpu); | ||
591 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4); | ||
592 | *hijack_vector = hijack_source.val; | ||
593 | } else { | ||
594 | printk("CPU%d: extended VIC boot\n", cpu); | ||
595 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4); | ||
596 | *hijack_vector = hijack_source.val; | ||
597 | /* VIC errata, may also receive interrupt at this address */ | ||
598 | hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4); | ||
599 | *hijack_vector = hijack_source.val; | ||
600 | } | ||
601 | /* All non-boot CPUs start with interrupts fully masked. Need | ||
602 | * to lower the mask of the CPI we're about to send. We do | ||
603 | * this in the VIC by masquerading as the processor we're | ||
604 | * about to boot and lowering its interrupt mask */ | ||
605 | local_irq_save(flags); | ||
606 | if(quad_boot) { | ||
607 | send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); | ||
608 | } else { | ||
609 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | ||
610 | /* here we're altering registers belonging to `cpu' */ | ||
611 | |||
612 | outb(VIC_BOOT_INTERRUPT_MASK, 0x21); | ||
613 | /* now go back to our original identity */ | ||
614 | outb(boot_cpu_id, VIC_PROCESSOR_ID); | ||
615 | |||
616 | /* and boot the CPU */ | ||
617 | |||
618 | send_CPI((1<<cpu), VIC_CPU_BOOT_CPI); | ||
619 | } | ||
620 | cpu_booted_map = 0; | ||
621 | local_irq_restore(flags); | ||
622 | |||
623 | /* now wait for it to become ready (or timeout) */ | ||
624 | for(timeout = 0; timeout < 50000; timeout++) { | ||
625 | if(cpu_booted_map) | ||
626 | break; | ||
627 | udelay(100); | ||
628 | } | ||
629 | /* reset the page table */ | ||
630 | zap_low_mappings(); | ||
631 | |||
632 | if (cpu_booted_map) { | ||
633 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", | ||
634 | cpu, smp_processor_id())); | ||
635 | |||
636 | printk("CPU%d: ", cpu); | ||
637 | print_cpu_info(&cpu_data[cpu]); | ||
638 | wmb(); | ||
639 | cpu_set(cpu, cpu_callout_map); | ||
640 | cpu_set(cpu, cpu_present_map); | ||
641 | } | ||
642 | else { | ||
643 | printk("CPU%d FAILED TO BOOT: ", cpu); | ||
644 | if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5) | ||
645 | printk("Stuck.\n"); | ||
646 | else | ||
647 | printk("Not responding.\n"); | ||
648 | |||
649 | cpucount--; | ||
650 | } | ||
651 | } | ||
652 | |||
653 | void __init | ||
654 | smp_boot_cpus(void) | ||
655 | { | ||
656 | int i; | ||
657 | |||
658 | /* CAT BUS initialisation must be done after the memory */ | ||
659 | /* FIXME: The L4 has a catbus too, it just needs to be | ||
660 | * accessed in a totally different way */ | ||
661 | if(voyager_level == 5) { | ||
662 | voyager_cat_init(); | ||
663 | |||
664 | /* now that the cat has probed the Voyager System Bus, sanity | ||
665 | * check the cpu map */ | ||
666 | if( ((voyager_quad_processors | voyager_extended_vic_processors) | ||
667 | & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) { | ||
668 | /* should panic */ | ||
669 | printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n"); | ||
670 | } | ||
671 | } else if(voyager_level == 4) | ||
672 | voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0]; | ||
673 | |||
674 | /* this sets up the idle task to run on the current cpu */ | ||
675 | voyager_extended_cpus = 1; | ||
676 | /* Remove the global_irq_holder setting, it triggers a BUG() on | ||
677 | * schedule at the moment */ | ||
678 | //global_irq_holder = boot_cpu_id; | ||
679 | |||
680 | /* FIXME: Need to do something about this but currently only works | ||
681 | * on CPUs with a tsc which none of mine have. | ||
682 | smp_tune_scheduling(); | ||
683 | */ | ||
684 | smp_store_cpu_info(boot_cpu_id); | ||
685 | printk("CPU%d: ", boot_cpu_id); | ||
686 | print_cpu_info(&cpu_data[boot_cpu_id]); | ||
687 | |||
688 | if(is_cpu_quad()) { | ||
689 | /* booting on a Quad CPU */ | ||
690 | printk("VOYAGER SMP: Boot CPU is Quad\n"); | ||
691 | qic_setup(); | ||
692 | do_quad_bootstrap(); | ||
693 | } | ||
694 | |||
695 | /* enable our own CPIs */ | ||
696 | vic_enable_cpi(); | ||
697 | |||
698 | cpu_set(boot_cpu_id, cpu_online_map); | ||
699 | cpu_set(boot_cpu_id, cpu_callout_map); | ||
700 | |||
701 | /* loop over all the extended VIC CPUs and boot them. The | ||
702 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ | ||
703 | for(i = 0; i < NR_CPUS; i++) { | ||
704 | if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) | ||
705 | continue; | ||
706 | do_boot_cpu(i); | ||
707 | /* This udelay seems to be needed for the Quad boots | ||
708 | * don't remove unless you know what you're doing */ | ||
709 | udelay(1000); | ||
710 | } | ||
711 | /* we could compute the total bogomips here, but why bother?, | ||
712 | * Code added from smpboot.c */ | ||
713 | { | ||
714 | unsigned long bogosum = 0; | ||
715 | for (i = 0; i < NR_CPUS; i++) | ||
716 | if (cpu_isset(i, cpu_online_map)) | ||
717 | bogosum += cpu_data[i].loops_per_jiffy; | ||
718 | printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | ||
719 | cpucount+1, | ||
720 | bogosum/(500000/HZ), | ||
721 | (bogosum/(5000/HZ))%100); | ||
722 | } | ||
723 | voyager_extended_cpus = hweight32(voyager_extended_vic_processors); | ||
724 | printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus); | ||
725 | /* that's it, switch to symmetric mode */ | ||
726 | outb(0, VIC_PRIORITY_REGISTER); | ||
727 | outb(0, VIC_CLAIM_REGISTER_0); | ||
728 | outb(0, VIC_CLAIM_REGISTER_1); | ||
729 | |||
730 | VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); | ||
731 | } | ||
732 | |||
733 | /* Reload the secondary CPUs task structure (this function does not | ||
734 | * return ) */ | ||
735 | void __init | ||
736 | initialize_secondary(void) | ||
737 | { | ||
738 | #if 0 | ||
739 | // AC kernels only | ||
740 | set_current(hard_get_current()); | ||
741 | #endif | ||
742 | |||
743 | /* | ||
744 | * We don't actually need to load the full TSS, | ||
745 | * basically just the stack pointer and the eip. | ||
746 | */ | ||
747 | |||
748 | asm volatile( | ||
749 | "movl %0,%%esp\n\t" | ||
750 | "jmp *%1" | ||
751 | : | ||
752 | :"r" (current->thread.esp),"r" (current->thread.eip)); | ||
753 | } | ||
754 | |||
755 | /* handle a Voyager SYS_INT -- If we don't, the base board will | ||
756 | * panic the system. | ||
757 | * | ||
758 | * System interrupts occur because some problem was detected on the | ||
759 | * various busses. To find out what you have to probe all the | ||
760 | * hardware via the CAT bus. FIXME: At the moment we do nothing. */ | ||
761 | fastcall void | ||
762 | smp_vic_sys_interrupt(struct pt_regs *regs) | ||
763 | { | ||
764 | ack_CPI(VIC_SYS_INT); | ||
765 | printk("Voyager SYSTEM INTERRUPT\n"); | ||
766 | } | ||
767 | |||
768 | /* Handle a voyager CMN_INT; These interrupts occur either because of | ||
769 | * a system status change or because a single bit memory error | ||
770 | * occurred. FIXME: At the moment, ignore all this. */ | ||
771 | fastcall void | ||
772 | smp_vic_cmn_interrupt(struct pt_regs *regs) | ||
773 | { | ||
774 | static __u8 in_cmn_int = 0; | ||
775 | static DEFINE_SPINLOCK(cmn_int_lock); | ||
776 | |||
777 | /* common ints are broadcast, so make sure we only do this once */ | ||
778 | _raw_spin_lock(&cmn_int_lock); | ||
779 | if(in_cmn_int) | ||
780 | goto unlock_end; | ||
781 | |||
782 | in_cmn_int++; | ||
783 | _raw_spin_unlock(&cmn_int_lock); | ||
784 | |||
785 | VDEBUG(("Voyager COMMON INTERRUPT\n")); | ||
786 | |||
787 | if(voyager_level == 5) | ||
788 | voyager_cat_do_common_interrupt(); | ||
789 | |||
790 | _raw_spin_lock(&cmn_int_lock); | ||
791 | in_cmn_int = 0; | ||
792 | unlock_end: | ||
793 | _raw_spin_unlock(&cmn_int_lock); | ||
794 | ack_CPI(VIC_CMN_INT); | ||
795 | } | ||
796 | |||
797 | /* | ||
798 | * Reschedule call back. Nothing to do, all the work is done | ||
799 | * automatically when we return from the interrupt. */ | ||
800 | static void | ||
801 | smp_reschedule_interrupt(void) | ||
802 | { | ||
803 | /* do nothing */ | ||
804 | } | ||
805 | |||
806 | static struct mm_struct * flush_mm; | ||
807 | static unsigned long flush_va; | ||
808 | static DEFINE_SPINLOCK(tlbstate_lock); | ||
809 | #define FLUSH_ALL 0xffffffff | ||
810 | |||
811 | /* | ||
812 | * We cannot call mmdrop() because we are in interrupt context, | ||
813 | * instead update mm->cpu_vm_mask. | ||
814 | * | ||
815 | * We need to reload %cr3 since the page tables may be going | ||
816 | * away from under us.. | ||
817 | */ | ||
818 | static inline void | ||
819 | leave_mm (unsigned long cpu) | ||
820 | { | ||
821 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | ||
822 | BUG(); | ||
823 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | ||
824 | load_cr3(swapper_pg_dir); | ||
825 | } | ||
826 | |||
827 | |||
828 | /* | ||
829 | * Invalidate call-back | ||
830 | */ | ||
831 | static void | ||
832 | smp_invalidate_interrupt(void) | ||
833 | { | ||
834 | __u8 cpu = smp_processor_id(); | ||
835 | |||
836 | if (!test_bit(cpu, &smp_invalidate_needed)) | ||
837 | return; | ||
838 | /* This will flood messages. Don't uncomment unless you see | ||
839 | * Problems with cross cpu invalidation | ||
840 | VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", | ||
841 | smp_processor_id())); | ||
842 | */ | ||
843 | |||
844 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | ||
845 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | ||
846 | if (flush_va == FLUSH_ALL) | ||
847 | local_flush_tlb(); | ||
848 | else | ||
849 | __flush_tlb_one(flush_va); | ||
850 | } else | ||
851 | leave_mm(cpu); | ||
852 | } | ||
853 | smp_mb__before_clear_bit(); | ||
854 | clear_bit(cpu, &smp_invalidate_needed); | ||
855 | smp_mb__after_clear_bit(); | ||
856 | } | ||
857 | |||
858 | /* All the new flush operations for 2.4 */ | ||
859 | |||
860 | |||
861 | /* This routine is called with a physical cpu mask */ | ||
862 | static void | ||
863 | voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, | ||
864 | unsigned long va) | ||
865 | { | ||
866 | int stuck = 50000; | ||
867 | |||
868 | if (!cpumask) | ||
869 | BUG(); | ||
870 | if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask) | ||
871 | BUG(); | ||
872 | if (cpumask & (1 << smp_processor_id())) | ||
873 | BUG(); | ||
874 | if (!mm) | ||
875 | BUG(); | ||
876 | |||
877 | spin_lock(&tlbstate_lock); | ||
878 | |||
879 | flush_mm = mm; | ||
880 | flush_va = va; | ||
881 | atomic_set_mask(cpumask, &smp_invalidate_needed); | ||
882 | /* | ||
883 | * We have to send the CPI only to | ||
884 | * CPUs affected. | ||
885 | */ | ||
886 | send_CPI(cpumask, VIC_INVALIDATE_CPI); | ||
887 | |||
888 | while (smp_invalidate_needed) { | ||
889 | mb(); | ||
890 | if(--stuck == 0) { | ||
891 | printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id()); | ||
892 | break; | ||
893 | } | ||
894 | } | ||
895 | |||
896 | /* Uncomment only to debug invalidation problems | ||
897 | VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); | ||
898 | */ | ||
899 | |||
900 | flush_mm = NULL; | ||
901 | flush_va = 0; | ||
902 | spin_unlock(&tlbstate_lock); | ||
903 | } | ||
904 | |||
905 | void | ||
906 | flush_tlb_current_task(void) | ||
907 | { | ||
908 | struct mm_struct *mm = current->mm; | ||
909 | unsigned long cpu_mask; | ||
910 | |||
911 | preempt_disable(); | ||
912 | |||
913 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | ||
914 | local_flush_tlb(); | ||
915 | if (cpu_mask) | ||
916 | voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | ||
917 | |||
918 | preempt_enable(); | ||
919 | } | ||
920 | |||
921 | |||
922 | void | ||
923 | flush_tlb_mm (struct mm_struct * mm) | ||
924 | { | ||
925 | unsigned long cpu_mask; | ||
926 | |||
927 | preempt_disable(); | ||
928 | |||
929 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | ||
930 | |||
931 | if (current->active_mm == mm) { | ||
932 | if (current->mm) | ||
933 | local_flush_tlb(); | ||
934 | else | ||
935 | leave_mm(smp_processor_id()); | ||
936 | } | ||
937 | if (cpu_mask) | ||
938 | voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | ||
939 | |||
940 | preempt_enable(); | ||
941 | } | ||
942 | |||
943 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | ||
944 | { | ||
945 | struct mm_struct *mm = vma->vm_mm; | ||
946 | unsigned long cpu_mask; | ||
947 | |||
948 | preempt_disable(); | ||
949 | |||
950 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | ||
951 | if (current->active_mm == mm) { | ||
952 | if(current->mm) | ||
953 | __flush_tlb_one(va); | ||
954 | else | ||
955 | leave_mm(smp_processor_id()); | ||
956 | } | ||
957 | |||
958 | if (cpu_mask) | ||
959 | voyager_flush_tlb_others(cpu_mask, mm, va); | ||
960 | |||
961 | preempt_enable(); | ||
962 | } | ||
963 | EXPORT_SYMBOL(flush_tlb_page); | ||
964 | |||
965 | /* enable the requested IRQs */ | ||
966 | static void | ||
967 | smp_enable_irq_interrupt(void) | ||
968 | { | ||
969 | __u8 irq; | ||
970 | __u8 cpu = get_cpu(); | ||
971 | |||
972 | VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, | ||
973 | vic_irq_enable_mask[cpu])); | ||
974 | |||
975 | spin_lock(&vic_irq_lock); | ||
976 | for(irq = 0; irq < 16; irq++) { | ||
977 | if(vic_irq_enable_mask[cpu] & (1<<irq)) | ||
978 | enable_local_vic_irq(irq); | ||
979 | } | ||
980 | vic_irq_enable_mask[cpu] = 0; | ||
981 | spin_unlock(&vic_irq_lock); | ||
982 | |||
983 | put_cpu_no_resched(); | ||
984 | } | ||
985 | |||
986 | /* | ||
987 | * CPU halt call-back | ||
988 | */ | ||
989 | static void | ||
990 | smp_stop_cpu_function(void *dummy) | ||
991 | { | ||
992 | VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); | ||
993 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
994 | local_irq_disable(); | ||
995 | for(;;) | ||
996 | halt(); | ||
997 | } | ||
998 | |||
999 | static DEFINE_SPINLOCK(call_lock); | ||
1000 | |||
1001 | struct call_data_struct { | ||
1002 | void (*func) (void *info); | ||
1003 | void *info; | ||
1004 | volatile unsigned long started; | ||
1005 | volatile unsigned long finished; | ||
1006 | int wait; | ||
1007 | }; | ||
1008 | |||
1009 | static struct call_data_struct * call_data; | ||
1010 | |||
1011 | /* execute a thread on a new CPU. The function to be called must be | ||
1012 | * previously set up. This is used to schedule a function for | ||
1013 | * execution on all CPU's - set up the function then broadcast a | ||
1014 | * function_interrupt CPI to come here on each CPU */ | ||
1015 | static void | ||
1016 | smp_call_function_interrupt(void) | ||
1017 | { | ||
1018 | void (*func) (void *info) = call_data->func; | ||
1019 | void *info = call_data->info; | ||
1020 | /* must take copy of wait because call_data may be replaced | ||
1021 | * unless the function is waiting for us to finish */ | ||
1022 | int wait = call_data->wait; | ||
1023 | __u8 cpu = smp_processor_id(); | ||
1024 | |||
1025 | /* | ||
1026 | * Notify initiating CPU that I've grabbed the data and am | ||
1027 | * about to execute the function | ||
1028 | */ | ||
1029 | mb(); | ||
1030 | if(!test_and_clear_bit(cpu, &call_data->started)) { | ||
1031 | /* If the bit wasn't set, this could be a replay */ | ||
1032 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu); | ||
1033 | return; | ||
1034 | } | ||
1035 | /* | ||
1036 | * At this point the info structure may be out of scope unless wait==1 | ||
1037 | */ | ||
1038 | irq_enter(); | ||
1039 | (*func)(info); | ||
1040 | irq_exit(); | ||
1041 | if (wait) { | ||
1042 | mb(); | ||
1043 | clear_bit(cpu, &call_data->finished); | ||
1044 | } | ||
1045 | } | ||
1046 | |||
1047 | static int | ||
1048 | voyager_smp_call_function_mask (cpumask_t cpumask, | ||
1049 | void (*func) (void *info), void *info, | ||
1050 | int wait) | ||
1051 | { | ||
1052 | struct call_data_struct data; | ||
1053 | u32 mask = cpus_addr(cpumask)[0]; | ||
1054 | |||
1055 | mask &= ~(1<<smp_processor_id()); | ||
1056 | |||
1057 | if (!mask) | ||
1058 | return 0; | ||
1059 | |||
1060 | /* Can deadlock when called with interrupts disabled */ | ||
1061 | WARN_ON(irqs_disabled()); | ||
1062 | |||
1063 | data.func = func; | ||
1064 | data.info = info; | ||
1065 | data.started = mask; | ||
1066 | data.wait = wait; | ||
1067 | if (wait) | ||
1068 | data.finished = mask; | ||
1069 | |||
1070 | spin_lock(&call_lock); | ||
1071 | call_data = &data; | ||
1072 | wmb(); | ||
1073 | /* Send a message to all other CPUs and wait for them to respond */ | ||
1074 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | ||
1075 | |||
1076 | /* Wait for response */ | ||
1077 | while (data.started) | ||
1078 | barrier(); | ||
1079 | |||
1080 | if (wait) | ||
1081 | while (data.finished) | ||
1082 | barrier(); | ||
1083 | |||
1084 | spin_unlock(&call_lock); | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | /* Sorry about the name. In an APIC based system, the APICs | ||
1090 | * themselves are programmed to send a timer interrupt. This is used | ||
1091 | * by linux to reschedule the processor. Voyager doesn't have this, | ||
1092 | * so we use the system clock to interrupt one processor, which in | ||
1093 | * turn, broadcasts a timer CPI to all the others --- we receive that | ||
1094 | * CPI here. We don't use this actually for counting so losing | ||
1095 | * ticks doesn't matter | ||
1096 | * | ||
1097 | * FIXME: For those CPU's which actually have a local APIC, we could | ||
1098 | * try to use it to trigger this interrupt instead of having to | ||
1099 | * broadcast the timer tick. Unfortunately, all my pentium DYADs have | ||
1100 | * no local APIC, so I can't do this | ||
1101 | * | ||
1102 | * This function is currently a placeholder and is unused in the code */ | ||
1103 | fastcall void | ||
1104 | smp_apic_timer_interrupt(struct pt_regs *regs) | ||
1105 | { | ||
1106 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
1107 | wrapper_smp_local_timer_interrupt(); | ||
1108 | set_irq_regs(old_regs); | ||
1109 | } | ||
1110 | |||
1111 | /* All of the QUAD interrupt GATES */ | ||
1112 | fastcall void | ||
1113 | smp_qic_timer_interrupt(struct pt_regs *regs) | ||
1114 | { | ||
1115 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
1116 | ack_QIC_CPI(QIC_TIMER_CPI); | ||
1117 | wrapper_smp_local_timer_interrupt(); | ||
1118 | set_irq_regs(old_regs); | ||
1119 | } | ||
1120 | |||
1121 | fastcall void | ||
1122 | smp_qic_invalidate_interrupt(struct pt_regs *regs) | ||
1123 | { | ||
1124 | ack_QIC_CPI(QIC_INVALIDATE_CPI); | ||
1125 | smp_invalidate_interrupt(); | ||
1126 | } | ||
1127 | |||
1128 | fastcall void | ||
1129 | smp_qic_reschedule_interrupt(struct pt_regs *regs) | ||
1130 | { | ||
1131 | ack_QIC_CPI(QIC_RESCHEDULE_CPI); | ||
1132 | smp_reschedule_interrupt(); | ||
1133 | } | ||
1134 | |||
1135 | fastcall void | ||
1136 | smp_qic_enable_irq_interrupt(struct pt_regs *regs) | ||
1137 | { | ||
1138 | ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); | ||
1139 | smp_enable_irq_interrupt(); | ||
1140 | } | ||
1141 | |||
1142 | fastcall void | ||
1143 | smp_qic_call_function_interrupt(struct pt_regs *regs) | ||
1144 | { | ||
1145 | ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); | ||
1146 | smp_call_function_interrupt(); | ||
1147 | } | ||
1148 | |||
1149 | fastcall void | ||
1150 | smp_vic_cpi_interrupt(struct pt_regs *regs) | ||
1151 | { | ||
1152 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
1153 | __u8 cpu = smp_processor_id(); | ||
1154 | |||
1155 | if(is_cpu_quad()) | ||
1156 | ack_QIC_CPI(VIC_CPI_LEVEL0); | ||
1157 | else | ||
1158 | ack_VIC_CPI(VIC_CPI_LEVEL0); | ||
1159 | |||
1160 | if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) | ||
1161 | wrapper_smp_local_timer_interrupt(); | ||
1162 | if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) | ||
1163 | smp_invalidate_interrupt(); | ||
1164 | if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) | ||
1165 | smp_reschedule_interrupt(); | ||
1166 | if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) | ||
1167 | smp_enable_irq_interrupt(); | ||
1168 | if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) | ||
1169 | smp_call_function_interrupt(); | ||
1170 | set_irq_regs(old_regs); | ||
1171 | } | ||
1172 | |||
1173 | static void | ||
1174 | do_flush_tlb_all(void* info) | ||
1175 | { | ||
1176 | unsigned long cpu = smp_processor_id(); | ||
1177 | |||
1178 | __flush_tlb_all(); | ||
1179 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | ||
1180 | leave_mm(cpu); | ||
1181 | } | ||
1182 | |||
1183 | |||
1184 | /* flush the TLB of every active CPU in the system */ | ||
1185 | void | ||
1186 | flush_tlb_all(void) | ||
1187 | { | ||
1188 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | ||
1189 | } | ||
1190 | |||
1191 | /* used to set up the trampoline for other CPUs when the memory manager | ||
1192 | * is sorted out */ | ||
1193 | void __init | ||
1194 | smp_alloc_memory(void) | ||
1195 | { | ||
1196 | trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE); | ||
1197 | if(__pa(trampoline_base) >= 0x93000) | ||
1198 | BUG(); | ||
1199 | } | ||
1200 | |||
1201 | /* send a reschedule CPI to one CPU by physical CPU number*/ | ||
1202 | static void | ||
1203 | voyager_smp_send_reschedule(int cpu) | ||
1204 | { | ||
1205 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); | ||
1206 | } | ||
1207 | |||
1208 | |||
1209 | int | ||
1210 | hard_smp_processor_id(void) | ||
1211 | { | ||
1212 | __u8 i; | ||
1213 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | ||
1214 | if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) | ||
1215 | return cpumask & 0x1F; | ||
1216 | |||
1217 | for(i = 0; i < 8; i++) { | ||
1218 | if(cpumask & (1<<i)) | ||
1219 | return i; | ||
1220 | } | ||
1221 | printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); | ||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | int | ||
1226 | safe_smp_processor_id(void) | ||
1227 | { | ||
1228 | return hard_smp_processor_id(); | ||
1229 | } | ||
1230 | |||
1231 | /* broadcast a halt to all other CPUs */ | ||
1232 | static void | ||
1233 | voyager_smp_send_stop(void) | ||
1234 | { | ||
1235 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | ||
1236 | } | ||
1237 | |||
1238 | /* this function is triggered in time.c when a clock tick fires | ||
1239 | * we need to re-broadcast the tick to all CPUs */ | ||
1240 | void | ||
1241 | smp_vic_timer_interrupt(void) | ||
1242 | { | ||
1243 | send_CPI_allbutself(VIC_TIMER_CPI); | ||
1244 | smp_local_timer_interrupt(); | ||
1245 | } | ||
1246 | |||
1247 | /* local (per CPU) timer interrupt. It does both profiling and | ||
1248 | * process statistics/rescheduling. | ||
1249 | * | ||
1250 | * We do profiling in every local tick, statistics/rescheduling | ||
1251 | * happen only every 'profiling multiplier' ticks. The default | ||
1252 | * multiplier is 1 and it can be changed by writing the new multiplier | ||
1253 | * value into /proc/profile. | ||
1254 | */ | ||
1255 | void | ||
1256 | smp_local_timer_interrupt(void) | ||
1257 | { | ||
1258 | int cpu = smp_processor_id(); | ||
1259 | long weight; | ||
1260 | |||
1261 | profile_tick(CPU_PROFILING); | ||
1262 | if (--per_cpu(prof_counter, cpu) <= 0) { | ||
1263 | /* | ||
1264 | * The multiplier may have changed since the last time we got | ||
1265 | * to this point as a result of the user writing to | ||
1266 | * /proc/profile. In this case we need to adjust the APIC | ||
1267 | * timer accordingly. | ||
1268 | * | ||
1269 | * Interrupts are already masked off at this point. | ||
1270 | */ | ||
1271 | per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu); | ||
1272 | if (per_cpu(prof_counter, cpu) != | ||
1273 | per_cpu(prof_old_multiplier, cpu)) { | ||
1274 | /* FIXME: need to update the vic timer tick here */ | ||
1275 | per_cpu(prof_old_multiplier, cpu) = | ||
1276 | per_cpu(prof_counter, cpu); | ||
1277 | } | ||
1278 | |||
1279 | update_process_times(user_mode_vm(get_irq_regs())); | ||
1280 | } | ||
1281 | |||
1282 | if( ((1<<cpu) & voyager_extended_vic_processors) == 0) | ||
1283 | /* only extended VIC processors participate in | ||
1284 | * interrupt distribution */ | ||
1285 | return; | ||
1286 | |||
1287 | /* | ||
1288 | * We take the 'long' return path, and there every subsystem | ||
1289 | * grabs the apropriate locks (kernel lock/ irq lock). | ||
1290 | * | ||
1291 | * we might want to decouple profiling from the 'long path', | ||
1292 | * and do the profiling totally in assembly. | ||
1293 | * | ||
1294 | * Currently this isn't too much of an issue (performance wise), | ||
1295 | * we can take more than 100K local irqs per second on a 100 MHz P5. | ||
1296 | */ | ||
1297 | |||
1298 | if((++vic_tick[cpu] & 0x7) != 0) | ||
1299 | return; | ||
1300 | /* get here every 16 ticks (about every 1/6 of a second) */ | ||
1301 | |||
1302 | /* Change our priority to give someone else a chance at getting | ||
1303 | * the IRQ. The algorithm goes like this: | ||
1304 | * | ||
1305 | * In the VIC, the dynamically routed interrupt is always | ||
1306 | * handled by the lowest priority eligible (i.e. receiving | ||
1307 | * interrupts) CPU. If >1 eligible CPUs are equal lowest, the | ||
1308 | * lowest processor number gets it. | ||
1309 | * | ||
1310 | * The priority of a CPU is controlled by a special per-CPU | ||
1311 | * VIC priority register which is 3 bits wide 0 being lowest | ||
1312 | * and 7 highest priority.. | ||
1313 | * | ||
1314 | * Therefore we subtract the average number of interrupts from | ||
1315 | * the number we've fielded. If this number is negative, we | ||
1316 | * lower the activity count and if it is positive, we raise | ||
1317 | * it. | ||
1318 | * | ||
1319 | * I'm afraid this still leads to odd looking interrupt counts: | ||
1320 | * the totals are all roughly equal, but the individual ones | ||
1321 | * look rather skewed. | ||
1322 | * | ||
1323 | * FIXME: This algorithm is total crap when mixed with SMP | ||
1324 | * affinity code since we now try to even up the interrupt | ||
1325 | * counts when an affinity binding is keeping them on a | ||
1326 | * particular CPU*/ | ||
1327 | weight = (vic_intr_count[cpu]*voyager_extended_cpus | ||
1328 | - vic_intr_total) >> 4; | ||
1329 | weight += 4; | ||
1330 | if(weight > 7) | ||
1331 | weight = 7; | ||
1332 | if(weight < 0) | ||
1333 | weight = 0; | ||
1334 | |||
1335 | outb((__u8)weight, VIC_PRIORITY_REGISTER); | ||
1336 | |||
1337 | #ifdef VOYAGER_DEBUG | ||
1338 | if((vic_tick[cpu] & 0xFFF) == 0) { | ||
1339 | /* print this message roughly every 25 secs */ | ||
1340 | printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", | ||
1341 | cpu, vic_tick[cpu], weight); | ||
1342 | } | ||
1343 | #endif | ||
1344 | } | ||
1345 | |||
1346 | /* setup the profiling timer */ | ||
1347 | int | ||
1348 | setup_profiling_timer(unsigned int multiplier) | ||
1349 | { | ||
1350 | int i; | ||
1351 | |||
1352 | if ( (!multiplier)) | ||
1353 | return -EINVAL; | ||
1354 | |||
1355 | /* | ||
1356 | * Set the new multiplier for each CPU. CPUs don't start using the | ||
1357 | * new values until the next timer interrupt in which they do process | ||
1358 | * accounting. | ||
1359 | */ | ||
1360 | for (i = 0; i < NR_CPUS; ++i) | ||
1361 | per_cpu(prof_multiplier, i) = multiplier; | ||
1362 | |||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | /* This is a bit of a mess, but forced on us by the genirq changes | ||
1367 | * there's no genirq handler that really does what voyager wants | ||
1368 | * so hack it up with the simple IRQ handler */ | ||
1369 | static void fastcall | ||
1370 | handle_vic_irq(unsigned int irq, struct irq_desc *desc) | ||
1371 | { | ||
1372 | before_handle_vic_irq(irq); | ||
1373 | handle_simple_irq(irq, desc); | ||
1374 | after_handle_vic_irq(irq); | ||
1375 | } | ||
1376 | |||
1377 | |||
1378 | /* The CPIs are handled in the per cpu 8259s, so they must be | ||
1379 | * enabled to be received: FIX: enabling the CPIs in the early | ||
1380 | * boot sequence interferes with bug checking; enable them later | ||
1381 | * on in smp_init */ | ||
1382 | #define VIC_SET_GATE(cpi, vector) \ | ||
1383 | set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector)) | ||
1384 | #define QIC_SET_GATE(cpi, vector) \ | ||
1385 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) | ||
1386 | |||
1387 | void __init | ||
1388 | smp_intr_init(void) | ||
1389 | { | ||
1390 | int i; | ||
1391 | |||
1392 | /* initialize the per cpu irq mask to all disabled */ | ||
1393 | for(i = 0; i < NR_CPUS; i++) | ||
1394 | vic_irq_mask[i] = 0xFFFF; | ||
1395 | |||
1396 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); | ||
1397 | |||
1398 | VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt); | ||
1399 | VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt); | ||
1400 | |||
1401 | QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt); | ||
1402 | QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt); | ||
1403 | QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); | ||
1404 | QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); | ||
1405 | QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); | ||
1406 | |||
1407 | |||
1408 | /* now put the VIC descriptor into the first 48 IRQs | ||
1409 | * | ||
1410 | * This is for later: first 16 correspond to PC IRQs; next 16 | ||
1411 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ | ||
1412 | for(i = 0; i < 48; i++) | ||
1413 | set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); | ||
1414 | } | ||
1415 | |||
1416 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per | ||
1417 | * processor to receive CPI */ | ||
1418 | static void | ||
1419 | send_CPI(__u32 cpuset, __u8 cpi) | ||
1420 | { | ||
1421 | int cpu; | ||
1422 | __u32 quad_cpuset = (cpuset & voyager_quad_processors); | ||
1423 | |||
1424 | if(cpi < VIC_START_FAKE_CPI) { | ||
1425 | /* fake CPI are only used for booting, so send to the | ||
1426 | * extended quads as well---Quads must be VIC booted */ | ||
1427 | outb((__u8)(cpuset), VIC_CPI_Registers[cpi]); | ||
1428 | return; | ||
1429 | } | ||
1430 | if(quad_cpuset) | ||
1431 | send_QIC_CPI(quad_cpuset, cpi); | ||
1432 | cpuset &= ~quad_cpuset; | ||
1433 | cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ | ||
1434 | if(cpuset == 0) | ||
1435 | return; | ||
1436 | for_each_online_cpu(cpu) { | ||
1437 | if(cpuset & (1<<cpu)) | ||
1438 | set_bit(cpi, &vic_cpi_mailbox[cpu]); | ||
1439 | } | ||
1440 | if(cpuset) | ||
1441 | outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); | ||
1442 | } | ||
1443 | |||
1444 | /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and | ||
1445 | * set the cache line to shared by reading it. | ||
1446 | * | ||
1447 | * DON'T make this inline otherwise the cache line read will be | ||
1448 | * optimised away | ||
1449 | * */ | ||
1450 | static int | ||
1451 | ack_QIC_CPI(__u8 cpi) { | ||
1452 | __u8 cpu = hard_smp_processor_id(); | ||
1453 | |||
1454 | cpi &= 7; | ||
1455 | |||
1456 | outb(1<<cpi, QIC_INTERRUPT_CLEAR1); | ||
1457 | return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; | ||
1458 | } | ||
1459 | |||
1460 | static void | ||
1461 | ack_special_QIC_CPI(__u8 cpi) | ||
1462 | { | ||
1463 | switch(cpi) { | ||
1464 | case VIC_CMN_INT: | ||
1465 | outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); | ||
1466 | break; | ||
1467 | case VIC_SYS_INT: | ||
1468 | outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0); | ||
1469 | break; | ||
1470 | } | ||
1471 | /* also clear at the VIC, just in case (nop for non-extended proc) */ | ||
1472 | ack_VIC_CPI(cpi); | ||
1473 | } | ||
1474 | |||
1475 | /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ | ||
1476 | static void | ||
1477 | ack_VIC_CPI(__u8 cpi) | ||
1478 | { | ||
1479 | #ifdef VOYAGER_DEBUG | ||
1480 | unsigned long flags; | ||
1481 | __u16 isr; | ||
1482 | __u8 cpu = smp_processor_id(); | ||
1483 | |||
1484 | local_irq_save(flags); | ||
1485 | isr = vic_read_isr(); | ||
1486 | if((isr & (1<<(cpi &7))) == 0) { | ||
1487 | printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); | ||
1488 | } | ||
1489 | #endif | ||
1490 | /* send specific EOI; the two system interrupts have | ||
1491 | * bit 4 set for a separate vector but behave as the | ||
1492 | * corresponding 3 bit intr */ | ||
1493 | outb_p(0x60|(cpi & 7),0x20); | ||
1494 | |||
1495 | #ifdef VOYAGER_DEBUG | ||
1496 | if((vic_read_isr() & (1<<(cpi &7))) != 0) { | ||
1497 | printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); | ||
1498 | } | ||
1499 | local_irq_restore(flags); | ||
1500 | #endif | ||
1501 | } | ||
1502 | |||
1503 | /* cribbed with thanks from irq.c */ | ||
1504 | #define __byte(x,y) (((unsigned char *)&(y))[x]) | ||
1505 | #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) | ||
1506 | #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) | ||
1507 | |||
1508 | static unsigned int | ||
1509 | startup_vic_irq(unsigned int irq) | ||
1510 | { | ||
1511 | unmask_vic_irq(irq); | ||
1512 | |||
1513 | return 0; | ||
1514 | } | ||
1515 | |||
1516 | /* The enable and disable routines. This is where we run into | ||
1517 | * conflicting architectural philosophy. Fundamentally, the voyager | ||
1518 | * architecture does not expect to have to disable interrupts globally | ||
1519 | * (the IRQ controllers belong to each CPU). The processor masquerade | ||
1520 | * which is used to start the system shouldn't be used in a running OS | ||
1521 | * since it will cause great confusion if two separate CPUs drive to | ||
1522 | * the same IRQ controller (I know, I've tried it). | ||
1523 | * | ||
1524 | * The solution is a variant on the NCR lazy SPL design: | ||
1525 | * | ||
1526 | * 1) To disable an interrupt, do nothing (other than set the | ||
1527 | * IRQ_DISABLED flag). This dares the interrupt actually to arrive. | ||
1528 | * | ||
1529 | * 2) If the interrupt dares to come in, raise the local mask against | ||
1530 | * it (this will result in all the CPU masks being raised | ||
1531 | * eventually). | ||
1532 | * | ||
1533 | * 3) To enable the interrupt, lower the mask on the local CPU and | ||
1534 | * broadcast an Interrupt enable CPI which causes all other CPUs to | ||
1535 | * adjust their masks accordingly. */ | ||
1536 | |||
1537 | static void | ||
1538 | unmask_vic_irq(unsigned int irq) | ||
1539 | { | ||
1540 | /* linux doesn't to processor-irq affinity, so enable on | ||
1541 | * all CPUs we know about */ | ||
1542 | int cpu = smp_processor_id(), real_cpu; | ||
1543 | __u16 mask = (1<<irq); | ||
1544 | __u32 processorList = 0; | ||
1545 | unsigned long flags; | ||
1546 | |||
1547 | VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n", | ||
1548 | irq, cpu, cpu_irq_affinity[cpu])); | ||
1549 | spin_lock_irqsave(&vic_irq_lock, flags); | ||
1550 | for_each_online_cpu(real_cpu) { | ||
1551 | if(!(voyager_extended_vic_processors & (1<<real_cpu))) | ||
1552 | continue; | ||
1553 | if(!(cpu_irq_affinity[real_cpu] & mask)) { | ||
1554 | /* irq has no affinity for this CPU, ignore */ | ||
1555 | continue; | ||
1556 | } | ||
1557 | if(real_cpu == cpu) { | ||
1558 | enable_local_vic_irq(irq); | ||
1559 | } | ||
1560 | else if(vic_irq_mask[real_cpu] & mask) { | ||
1561 | vic_irq_enable_mask[real_cpu] |= mask; | ||
1562 | processorList |= (1<<real_cpu); | ||
1563 | } | ||
1564 | } | ||
1565 | spin_unlock_irqrestore(&vic_irq_lock, flags); | ||
1566 | if(processorList) | ||
1567 | send_CPI(processorList, VIC_ENABLE_IRQ_CPI); | ||
1568 | } | ||
1569 | |||
1570 | static void | ||
1571 | mask_vic_irq(unsigned int irq) | ||
1572 | { | ||
1573 | /* lazy disable, do nothing */ | ||
1574 | } | ||
1575 | |||
1576 | static void | ||
1577 | enable_local_vic_irq(unsigned int irq) | ||
1578 | { | ||
1579 | __u8 cpu = smp_processor_id(); | ||
1580 | __u16 mask = ~(1 << irq); | ||
1581 | __u16 old_mask = vic_irq_mask[cpu]; | ||
1582 | |||
1583 | vic_irq_mask[cpu] &= mask; | ||
1584 | if(vic_irq_mask[cpu] == old_mask) | ||
1585 | return; | ||
1586 | |||
1587 | VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", | ||
1588 | irq, cpu)); | ||
1589 | |||
1590 | if (irq & 8) { | ||
1591 | outb_p(cached_A1(cpu),0xA1); | ||
1592 | (void)inb_p(0xA1); | ||
1593 | } | ||
1594 | else { | ||
1595 | outb_p(cached_21(cpu),0x21); | ||
1596 | (void)inb_p(0x21); | ||
1597 | } | ||
1598 | } | ||
1599 | |||
1600 | static void | ||
1601 | disable_local_vic_irq(unsigned int irq) | ||
1602 | { | ||
1603 | __u8 cpu = smp_processor_id(); | ||
1604 | __u16 mask = (1 << irq); | ||
1605 | __u16 old_mask = vic_irq_mask[cpu]; | ||
1606 | |||
1607 | if(irq == 7) | ||
1608 | return; | ||
1609 | |||
1610 | vic_irq_mask[cpu] |= mask; | ||
1611 | if(old_mask == vic_irq_mask[cpu]) | ||
1612 | return; | ||
1613 | |||
1614 | VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", | ||
1615 | irq, cpu)); | ||
1616 | |||
1617 | if (irq & 8) { | ||
1618 | outb_p(cached_A1(cpu),0xA1); | ||
1619 | (void)inb_p(0xA1); | ||
1620 | } | ||
1621 | else { | ||
1622 | outb_p(cached_21(cpu),0x21); | ||
1623 | (void)inb_p(0x21); | ||
1624 | } | ||
1625 | } | ||
1626 | |||
1627 | /* The VIC is level triggered, so the ack can only be issued after the | ||
1628 | * interrupt completes. However, we do Voyager lazy interrupt | ||
1629 | * handling here: It is an extremely expensive operation to mask an | ||
1630 | * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If | ||
1631 | * this interrupt actually comes in, then we mask and ack here to push | ||
1632 | * the interrupt off to another CPU */ | ||
1633 | static void | ||
1634 | before_handle_vic_irq(unsigned int irq) | ||
1635 | { | ||
1636 | irq_desc_t *desc = irq_desc + irq; | ||
1637 | __u8 cpu = smp_processor_id(); | ||
1638 | |||
1639 | _raw_spin_lock(&vic_irq_lock); | ||
1640 | vic_intr_total++; | ||
1641 | vic_intr_count[cpu]++; | ||
1642 | |||
1643 | if(!(cpu_irq_affinity[cpu] & (1<<irq))) { | ||
1644 | /* The irq is not in our affinity mask, push it off | ||
1645 | * onto another CPU */ | ||
1646 | VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n", | ||
1647 | irq, cpu)); | ||
1648 | disable_local_vic_irq(irq); | ||
1649 | /* set IRQ_INPROGRESS to prevent the handler in irq.c from | ||
1650 | * actually calling the interrupt routine */ | ||
1651 | desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; | ||
1652 | } else if(desc->status & IRQ_DISABLED) { | ||
1653 | /* Damn, the interrupt actually arrived, do the lazy | ||
1654 | * disable thing. The interrupt routine in irq.c will | ||
1655 | * not handle a IRQ_DISABLED interrupt, so nothing more | ||
1656 | * need be done here */ | ||
1657 | VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n", | ||
1658 | irq, cpu)); | ||
1659 | disable_local_vic_irq(irq); | ||
1660 | desc->status |= IRQ_REPLAY; | ||
1661 | } else { | ||
1662 | desc->status &= ~IRQ_REPLAY; | ||
1663 | } | ||
1664 | |||
1665 | _raw_spin_unlock(&vic_irq_lock); | ||
1666 | } | ||
1667 | |||
1668 | /* Finish the VIC interrupt: basically mask */ | ||
1669 | static void | ||
1670 | after_handle_vic_irq(unsigned int irq) | ||
1671 | { | ||
1672 | irq_desc_t *desc = irq_desc + irq; | ||
1673 | |||
1674 | _raw_spin_lock(&vic_irq_lock); | ||
1675 | { | ||
1676 | unsigned int status = desc->status & ~IRQ_INPROGRESS; | ||
1677 | #ifdef VOYAGER_DEBUG | ||
1678 | __u16 isr; | ||
1679 | #endif | ||
1680 | |||
1681 | desc->status = status; | ||
1682 | if ((status & IRQ_DISABLED)) | ||
1683 | disable_local_vic_irq(irq); | ||
1684 | #ifdef VOYAGER_DEBUG | ||
1685 | /* DEBUG: before we ack, check what's in progress */ | ||
1686 | isr = vic_read_isr(); | ||
1687 | if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) { | ||
1688 | int i; | ||
1689 | __u8 cpu = smp_processor_id(); | ||
1690 | __u8 real_cpu; | ||
1691 | int mask; /* Um... initialize me??? --RR */ | ||
1692 | |||
1693 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", | ||
1694 | cpu, irq); | ||
1695 | for_each_possible_cpu(real_cpu, mask) { | ||
1696 | |||
1697 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, | ||
1698 | VIC_PROCESSOR_ID); | ||
1699 | isr = vic_read_isr(); | ||
1700 | if(isr & (1<<irq)) { | ||
1701 | printk("VOYAGER SMP: CPU%d ack irq %d\n", | ||
1702 | real_cpu, irq); | ||
1703 | ack_vic_irq(irq); | ||
1704 | } | ||
1705 | outb(cpu, VIC_PROCESSOR_ID); | ||
1706 | } | ||
1707 | } | ||
1708 | #endif /* VOYAGER_DEBUG */ | ||
1709 | /* as soon as we ack, the interrupt is eligible for | ||
1710 | * receipt by another CPU so everything must be in | ||
1711 | * order here */ | ||
1712 | ack_vic_irq(irq); | ||
1713 | if(status & IRQ_REPLAY) { | ||
1714 | /* replay is set if we disable the interrupt | ||
1715 | * in the before_handle_vic_irq() routine, so | ||
1716 | * clear the in progress bit here to allow the | ||
1717 | * next CPU to handle this correctly */ | ||
1718 | desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS); | ||
1719 | } | ||
1720 | #ifdef VOYAGER_DEBUG | ||
1721 | isr = vic_read_isr(); | ||
1722 | if((isr & (1<<irq)) != 0) | ||
1723 | printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n", | ||
1724 | irq, isr); | ||
1725 | #endif /* VOYAGER_DEBUG */ | ||
1726 | } | ||
1727 | _raw_spin_unlock(&vic_irq_lock); | ||
1728 | |||
1729 | /* All code after this point is out of the main path - the IRQ | ||
1730 | * may be intercepted by another CPU if reasserted */ | ||
1731 | } | ||
1732 | |||
1733 | |||
1734 | /* Linux processor - interrupt affinity manipulations. | ||
1735 | * | ||
1736 | * For each processor, we maintain a 32 bit irq affinity mask. | ||
1737 | * Initially it is set to all 1's so every processor accepts every | ||
1738 | * interrupt. In this call, we change the processor's affinity mask: | ||
1739 | * | ||
1740 | * Change from enable to disable: | ||
1741 | * | ||
1742 | * If the interrupt ever comes in to the processor, we will disable it | ||
1743 | * and ack it to push it off to another CPU, so just accept the mask here. | ||
1744 | * | ||
1745 | * Change from disable to enable: | ||
1746 | * | ||
1747 | * change the mask and then do an interrupt enable CPI to re-enable on | ||
1748 | * the selected processors */ | ||
1749 | |||
1750 | void | ||
1751 | set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | ||
1752 | { | ||
1753 | /* Only extended processors handle interrupts */ | ||
1754 | unsigned long real_mask; | ||
1755 | unsigned long irq_mask = 1 << irq; | ||
1756 | int cpu; | ||
1757 | |||
1758 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | ||
1759 | |||
1760 | if(cpus_addr(mask)[0] == 0) | ||
1761 | /* can't have no cpu's to accept the interrupt -- extremely | ||
1762 | * bad things will happen */ | ||
1763 | return; | ||
1764 | |||
1765 | if(irq == 0) | ||
1766 | /* can't change the affinity of the timer IRQ. This | ||
1767 | * is due to the constraint in the voyager | ||
1768 | * architecture that the CPI also comes in on and IRQ | ||
1769 | * line and we have chosen IRQ0 for this. If you | ||
1770 | * raise the mask on this interrupt, the processor | ||
1771 | * will no-longer be able to accept VIC CPIs */ | ||
1772 | return; | ||
1773 | |||
1774 | if(irq >= 32) | ||
1775 | /* You can only have 32 interrupts in a voyager system | ||
1776 | * (and 32 only if you have a secondary microchannel | ||
1777 | * bus) */ | ||
1778 | return; | ||
1779 | |||
1780 | for_each_online_cpu(cpu) { | ||
1781 | unsigned long cpu_mask = 1 << cpu; | ||
1782 | |||
1783 | if(cpu_mask & real_mask) { | ||
1784 | /* enable the interrupt for this cpu */ | ||
1785 | cpu_irq_affinity[cpu] |= irq_mask; | ||
1786 | } else { | ||
1787 | /* disable the interrupt for this cpu */ | ||
1788 | cpu_irq_affinity[cpu] &= ~irq_mask; | ||
1789 | } | ||
1790 | } | ||
1791 | /* this is magic, we now have the correct affinity maps, so | ||
1792 | * enable the interrupt. This will send an enable CPI to | ||
1793 | * those cpu's who need to enable it in their local masks, | ||
1794 | * causing them to correct for the new affinity . If the | ||
1795 | * interrupt is currently globally disabled, it will simply be | ||
1796 | * disabled again as it comes in (voyager lazy disable). If | ||
1797 | * the affinity map is tightened to disable the interrupt on a | ||
1798 | * cpu, it will be pushed off when it comes in */ | ||
1799 | unmask_vic_irq(irq); | ||
1800 | } | ||
1801 | |||
1802 | static void | ||
1803 | ack_vic_irq(unsigned int irq) | ||
1804 | { | ||
1805 | if (irq & 8) { | ||
1806 | outb(0x62,0x20); /* Specific EOI to cascade */ | ||
1807 | outb(0x60|(irq & 7),0xA0); | ||
1808 | } else { | ||
1809 | outb(0x60 | (irq & 7),0x20); | ||
1810 | } | ||
1811 | } | ||
1812 | |||
1813 | /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 | ||
1814 | * but are not vectored by it. This means that the 8259 mask must be | ||
1815 | * lowered to receive them */ | ||
1816 | static __init void | ||
1817 | vic_enable_cpi(void) | ||
1818 | { | ||
1819 | __u8 cpu = smp_processor_id(); | ||
1820 | |||
1821 | /* just take a copy of the current mask (nop for boot cpu) */ | ||
1822 | vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; | ||
1823 | |||
1824 | enable_local_vic_irq(VIC_CPI_LEVEL0); | ||
1825 | enable_local_vic_irq(VIC_CPI_LEVEL1); | ||
1826 | /* for sys int and cmn int */ | ||
1827 | enable_local_vic_irq(7); | ||
1828 | |||
1829 | if(is_cpu_quad()) { | ||
1830 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | ||
1831 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | ||
1832 | VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", | ||
1833 | cpu, QIC_CPI_ENABLE)); | ||
1834 | } | ||
1835 | |||
1836 | VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n", | ||
1837 | cpu, vic_irq_mask[cpu])); | ||
1838 | } | ||
1839 | |||
1840 | void | ||
1841 | voyager_smp_dump() | ||
1842 | { | ||
1843 | int old_cpu = smp_processor_id(), cpu; | ||
1844 | |||
1845 | /* dump the interrupt masks of each processor */ | ||
1846 | for_each_online_cpu(cpu) { | ||
1847 | __u16 imr, isr, irr; | ||
1848 | unsigned long flags; | ||
1849 | |||
1850 | local_irq_save(flags); | ||
1851 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | ||
1852 | imr = (inb(0xa1) << 8) | inb(0x21); | ||
1853 | outb(0x0a, 0xa0); | ||
1854 | irr = inb(0xa0) << 8; | ||
1855 | outb(0x0a, 0x20); | ||
1856 | irr |= inb(0x20); | ||
1857 | outb(0x0b, 0xa0); | ||
1858 | isr = inb(0xa0) << 8; | ||
1859 | outb(0x0b, 0x20); | ||
1860 | isr |= inb(0x20); | ||
1861 | outb(old_cpu, VIC_PROCESSOR_ID); | ||
1862 | local_irq_restore(flags); | ||
1863 | printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n", | ||
1864 | cpu, vic_irq_mask[cpu], imr, irr, isr); | ||
1865 | #if 0 | ||
1866 | /* These lines are put in to try to unstick an un ack'd irq */ | ||
1867 | if(isr != 0) { | ||
1868 | int irq; | ||
1869 | for(irq=0; irq<16; irq++) { | ||
1870 | if(isr & (1<<irq)) { | ||
1871 | printk("\tCPU%d: ack irq %d\n", | ||
1872 | cpu, irq); | ||
1873 | local_irq_save(flags); | ||
1874 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, | ||
1875 | VIC_PROCESSOR_ID); | ||
1876 | ack_vic_irq(irq); | ||
1877 | outb(old_cpu, VIC_PROCESSOR_ID); | ||
1878 | local_irq_restore(flags); | ||
1879 | } | ||
1880 | } | ||
1881 | } | ||
1882 | #endif | ||
1883 | } | ||
1884 | } | ||
1885 | |||
1886 | void | ||
1887 | smp_voyager_power_off(void *dummy) | ||
1888 | { | ||
1889 | if(smp_processor_id() == boot_cpu_id) | ||
1890 | voyager_power_off(); | ||
1891 | else | ||
1892 | smp_stop_cpu_function(NULL); | ||
1893 | } | ||
1894 | |||
1895 | static void __init | ||
1896 | voyager_smp_prepare_cpus(unsigned int max_cpus) | ||
1897 | { | ||
1898 | /* FIXME: ignore max_cpus for now */ | ||
1899 | smp_boot_cpus(); | ||
1900 | } | ||
1901 | |||
1902 | static void __devinit voyager_smp_prepare_boot_cpu(void) | ||
1903 | { | ||
1904 | init_gdt(smp_processor_id()); | ||
1905 | switch_to_new_gdt(); | ||
1906 | |||
1907 | cpu_set(smp_processor_id(), cpu_online_map); | ||
1908 | cpu_set(smp_processor_id(), cpu_callout_map); | ||
1909 | cpu_set(smp_processor_id(), cpu_possible_map); | ||
1910 | cpu_set(smp_processor_id(), cpu_present_map); | ||
1911 | } | ||
1912 | |||
1913 | static int __devinit | ||
1914 | voyager_cpu_up(unsigned int cpu) | ||
1915 | { | ||
1916 | /* This only works at boot for x86. See "rewrite" above. */ | ||
1917 | if (cpu_isset(cpu, smp_commenced_mask)) | ||
1918 | return -ENOSYS; | ||
1919 | |||
1920 | /* In case one didn't come up */ | ||
1921 | if (!cpu_isset(cpu, cpu_callin_map)) | ||
1922 | return -EIO; | ||
1923 | /* Unleash the CPU! */ | ||
1924 | cpu_set(cpu, smp_commenced_mask); | ||
1925 | while (!cpu_isset(cpu, cpu_online_map)) | ||
1926 | mb(); | ||
1927 | return 0; | ||
1928 | } | ||
1929 | |||
1930 | static void __init | ||
1931 | voyager_smp_cpus_done(unsigned int max_cpus) | ||
1932 | { | ||
1933 | zap_low_mappings(); | ||
1934 | } | ||
1935 | |||
1936 | void __init | ||
1937 | smp_setup_processor_id(void) | ||
1938 | { | ||
1939 | current_thread_info()->cpu = hard_smp_processor_id(); | ||
1940 | x86_write_percpu(cpu_number, hard_smp_processor_id()); | ||
1941 | } | ||
1942 | |||
1943 | struct smp_ops smp_ops = { | ||
1944 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, | ||
1945 | .smp_prepare_cpus = voyager_smp_prepare_cpus, | ||
1946 | .cpu_up = voyager_cpu_up, | ||
1947 | .smp_cpus_done = voyager_smp_cpus_done, | ||
1948 | |||
1949 | .smp_send_stop = voyager_smp_send_stop, | ||
1950 | .smp_send_reschedule = voyager_smp_send_reschedule, | ||
1951 | .smp_call_function_mask = voyager_smp_call_function_mask, | ||
1952 | }; | ||