diff options
-rw-r--r-- | arch/parisc/kernel/irq.c | 149 | ||||
-rw-r--r-- | arch/parisc/kernel/smp.c | 3 | ||||
-rw-r--r-- | drivers/parisc/iosapic.c | 3 | ||||
-rw-r--r-- | include/asm-parisc/irq.h | 2 |
4 files changed, 93 insertions, 64 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index c53bfeb4bf94..9bdd0197ceb7 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -45,6 +45,17 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *); | |||
45 | */ | 45 | */ |
46 | static volatile unsigned long cpu_eiem = 0; | 46 | static volatile unsigned long cpu_eiem = 0; |
47 | 47 | ||
48 | /* | ||
49 | ** ack bitmap ... habitually set to 1, but reset to zero | ||
50 | ** between ->ack() and ->end() of the interrupt to prevent | ||
51 | ** re-interruption of a processing interrupt. | ||
52 | */ | ||
53 | static volatile unsigned long global_ack_eiem = ~0UL; | ||
54 | /* | ||
55 | ** Local bitmap, same as above but for per-cpu interrupts | ||
56 | */ | ||
57 | static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; | ||
58 | |||
48 | static void cpu_disable_irq(unsigned int irq) | 59 | static void cpu_disable_irq(unsigned int irq) |
49 | { | 60 | { |
50 | unsigned long eirr_bit = EIEM_MASK(irq); | 61 | unsigned long eirr_bit = EIEM_MASK(irq); |
@@ -62,13 +73,6 @@ static void cpu_enable_irq(unsigned int irq) | |||
62 | 73 | ||
63 | cpu_eiem |= eirr_bit; | 74 | cpu_eiem |= eirr_bit; |
64 | 75 | ||
65 | /* FIXME: while our interrupts aren't nested, we cannot reset | ||
66 | * the eiem mask if we're already in an interrupt. Once we | ||
67 | * implement nested interrupts, this can go away | ||
68 | */ | ||
69 | if (!in_interrupt()) | ||
70 | set_eiem(cpu_eiem); | ||
71 | |||
72 | /* This is just a simple NOP IPI. But what it does is cause | 76 | /* This is just a simple NOP IPI. But what it does is cause |
73 | * all the other CPUs to do a set_eiem(cpu_eiem) at the end | 77 | * all the other CPUs to do a set_eiem(cpu_eiem) at the end |
74 | * of the interrupt handler */ | 78 | * of the interrupt handler */ |
@@ -84,13 +88,45 @@ static unsigned int cpu_startup_irq(unsigned int irq) | |||
84 | void no_ack_irq(unsigned int irq) { } | 88 | void no_ack_irq(unsigned int irq) { } |
85 | void no_end_irq(unsigned int irq) { } | 89 | void no_end_irq(unsigned int irq) { } |
86 | 90 | ||
91 | void cpu_ack_irq(unsigned int irq) | ||
92 | { | ||
93 | unsigned long mask = EIEM_MASK(irq); | ||
94 | int cpu = smp_processor_id(); | ||
95 | |||
96 | /* Clear in EIEM so we can no longer process */ | ||
97 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status)) | ||
98 | per_cpu(local_ack_eiem, cpu) &= ~mask; | ||
99 | else | ||
100 | global_ack_eiem &= ~mask; | ||
101 | |||
102 | /* disable the interrupt */ | ||
103 | set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu)); | ||
104 | /* and now ack it */ | ||
105 | mtctl(mask, 23); | ||
106 | } | ||
107 | |||
108 | void cpu_end_irq(unsigned int irq) | ||
109 | { | ||
110 | unsigned long mask = EIEM_MASK(irq); | ||
111 | int cpu = smp_processor_id(); | ||
112 | |||
113 | /* set it in the eiems---it's no longer in process */ | ||
114 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status)) | ||
115 | per_cpu(local_ack_eiem, cpu) |= mask; | ||
116 | else | ||
117 | global_ack_eiem |= mask; | ||
118 | |||
119 | /* enable the interrupt */ | ||
120 | set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu)); | ||
121 | } | ||
122 | |||
87 | #ifdef CONFIG_SMP | 123 | #ifdef CONFIG_SMP |
88 | int cpu_check_affinity(unsigned int irq, cpumask_t *dest) | 124 | int cpu_check_affinity(unsigned int irq, cpumask_t *dest) |
89 | { | 125 | { |
90 | int cpu_dest; | 126 | int cpu_dest; |
91 | 127 | ||
92 | /* timer and ipi have to always be received on all CPUs */ | 128 | /* timer and ipi have to always be received on all CPUs */ |
93 | if (irq == TIMER_IRQ || irq == IPI_IRQ) { | 129 | if (CHECK_IRQ_PER_CPU(irq)) { |
94 | /* Bad linux design decision. The mask has already | 130 | /* Bad linux design decision. The mask has already |
95 | * been set; we must reset it */ | 131 | * been set; we must reset it */ |
96 | irq_desc[irq].affinity = CPU_MASK_ALL; | 132 | irq_desc[irq].affinity = CPU_MASK_ALL; |
@@ -119,8 +155,8 @@ static struct hw_interrupt_type cpu_interrupt_type = { | |||
119 | .shutdown = cpu_disable_irq, | 155 | .shutdown = cpu_disable_irq, |
120 | .enable = cpu_enable_irq, | 156 | .enable = cpu_enable_irq, |
121 | .disable = cpu_disable_irq, | 157 | .disable = cpu_disable_irq, |
122 | .ack = no_ack_irq, | 158 | .ack = cpu_ack_irq, |
123 | .end = no_end_irq, | 159 | .end = cpu_end_irq, |
124 | #ifdef CONFIG_SMP | 160 | #ifdef CONFIG_SMP |
125 | .set_affinity = cpu_set_affinity_irq, | 161 | .set_affinity = cpu_set_affinity_irq, |
126 | #endif | 162 | #endif |
@@ -298,82 +334,69 @@ unsigned int txn_alloc_data(unsigned int virt_irq) | |||
298 | return virt_irq - CPU_IRQ_BASE; | 334 | return virt_irq - CPU_IRQ_BASE; |
299 | } | 335 | } |
300 | 336 | ||
337 | static inline int eirr_to_irq(unsigned long eirr) | ||
338 | { | ||
339 | #ifdef CONFIG_64BIT | ||
340 | int bit = fls64(eirr); | ||
341 | #else | ||
342 | int bit = fls(eirr); | ||
343 | #endif | ||
344 | return (BITS_PER_LONG - bit) + TIMER_IRQ; | ||
345 | } | ||
346 | |||
301 | /* ONLY called from entry.S:intr_extint() */ | 347 | /* ONLY called from entry.S:intr_extint() */ |
302 | void do_cpu_irq_mask(struct pt_regs *regs) | 348 | void do_cpu_irq_mask(struct pt_regs *regs) |
303 | { | 349 | { |
304 | unsigned long eirr_val; | 350 | unsigned long eirr_val; |
305 | 351 | int irq, cpu = smp_processor_id(); | |
306 | irq_enter(); | ||
307 | |||
308 | /* | ||
309 | * Don't allow TIMER or IPI nested interrupts. | ||
310 | * Allowing any single interrupt to nest can lead to that CPU | ||
311 | * handling interrupts with all enabled interrupts unmasked. | ||
312 | */ | ||
313 | set_eiem(0UL); | ||
314 | |||
315 | /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) | ||
316 | * 2) We loop here on EIRR contents in order to avoid | ||
317 | * nested interrupts or having to take another interrupt | ||
318 | * when we could have just handled it right away. | ||
319 | */ | ||
320 | for (;;) { | ||
321 | unsigned long bit = (1UL << (BITS_PER_LONG - 1)); | ||
322 | unsigned int irq; | ||
323 | eirr_val = mfctl(23) & cpu_eiem; | ||
324 | if (!eirr_val) | ||
325 | break; | ||
326 | |||
327 | mtctl(eirr_val, 23); /* reset bits we are going to process */ | ||
328 | |||
329 | /* Work our way from MSb to LSb...same order we alloc EIRs */ | ||
330 | for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { | ||
331 | #ifdef CONFIG_SMP | 352 | #ifdef CONFIG_SMP |
332 | cpumask_t dest = irq_desc[irq].affinity; | 353 | cpumask_t dest; |
333 | #endif | 354 | #endif |
334 | if (!(bit & eirr_val)) | ||
335 | continue; | ||
336 | 355 | ||
337 | /* clear bit in mask - can exit loop sooner */ | 356 | local_irq_disable(); |
338 | eirr_val &= ~bit; | 357 | irq_enter(); |
339 | 358 | ||
340 | #ifdef CONFIG_SMP | 359 | eirr_val = mfctl(23) & cpu_eiem & global_ack_eiem & |
341 | /* FIXME: because generic set affinity mucks | 360 | per_cpu(local_ack_eiem, cpu); |
342 | * with the affinity before sending it to us | 361 | if (!eirr_val) |
343 | * we can get the situation where the affinity is | 362 | goto set_out; |
344 | * wrong for our CPU type interrupts */ | 363 | irq = eirr_to_irq(eirr_val); |
345 | if (irq != TIMER_IRQ && irq != IPI_IRQ && | ||
346 | !cpu_isset(smp_processor_id(), dest)) { | ||
347 | int cpu = first_cpu(dest); | ||
348 | |||
349 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", | ||
350 | irq, smp_processor_id(), cpu); | ||
351 | gsc_writel(irq + CPU_IRQ_BASE, | ||
352 | cpu_data[cpu].hpa); | ||
353 | continue; | ||
354 | } | ||
355 | #endif | ||
356 | 364 | ||
357 | __do_IRQ(irq, regs); | 365 | #ifdef CONFIG_SMP |
358 | } | 366 | dest = irq_desc[irq].affinity; |
367 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && | ||
368 | !cpu_isset(smp_processor_id(), dest)) { | ||
369 | int cpu = first_cpu(dest); | ||
370 | |||
371 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", | ||
372 | irq, smp_processor_id(), cpu); | ||
373 | gsc_writel(irq + CPU_IRQ_BASE, | ||
374 | cpu_data[cpu].hpa); | ||
375 | goto set_out; | ||
359 | } | 376 | } |
377 | #endif | ||
378 | __do_IRQ(irq, regs); | ||
360 | 379 | ||
361 | set_eiem(cpu_eiem); /* restore original mask */ | 380 | out: |
362 | irq_exit(); | 381 | irq_exit(); |
363 | } | 382 | return; |
364 | 383 | ||
384 | set_out: | ||
385 | set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu)); | ||
386 | goto out; | ||
387 | } | ||
365 | 388 | ||
366 | static struct irqaction timer_action = { | 389 | static struct irqaction timer_action = { |
367 | .handler = timer_interrupt, | 390 | .handler = timer_interrupt, |
368 | .name = "timer", | 391 | .name = "timer", |
369 | .flags = IRQF_DISABLED, | 392 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU, |
370 | }; | 393 | }; |
371 | 394 | ||
372 | #ifdef CONFIG_SMP | 395 | #ifdef CONFIG_SMP |
373 | static struct irqaction ipi_action = { | 396 | static struct irqaction ipi_action = { |
374 | .handler = ipi_interrupt, | 397 | .handler = ipi_interrupt, |
375 | .name = "IPI", | 398 | .name = "IPI", |
376 | .flags = IRQF_DISABLED, | 399 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
377 | }; | 400 | }; |
378 | #endif | 401 | #endif |
379 | 402 | ||
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index f33e8de438dc..faad338f310e 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -262,6 +262,9 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
262 | this_cpu, which); | 262 | this_cpu, which); |
263 | return IRQ_NONE; | 263 | return IRQ_NONE; |
264 | } /* Switch */ | 264 | } /* Switch */ |
265 | /* let in any pending interrupts */ | ||
266 | local_irq_enable(); | ||
267 | local_irq_disable(); | ||
265 | } /* while (ops) */ | 268 | } /* while (ops) */ |
266 | } | 269 | } |
267 | return IRQ_HANDLED; | 270 | return IRQ_HANDLED; |
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 1fbda77cefc2..90489ade632e 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c | |||
@@ -692,6 +692,7 @@ static void iosapic_end_irq(unsigned int irq) | |||
692 | DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq, | 692 | DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq, |
693 | vi->eoi_addr, vi->eoi_data); | 693 | vi->eoi_addr, vi->eoi_data); |
694 | iosapic_eoi(vi->eoi_addr, vi->eoi_data); | 694 | iosapic_eoi(vi->eoi_addr, vi->eoi_data); |
695 | cpu_end_irq(irq); | ||
695 | } | 696 | } |
696 | 697 | ||
697 | static unsigned int iosapic_startup_irq(unsigned int irq) | 698 | static unsigned int iosapic_startup_irq(unsigned int irq) |
@@ -728,7 +729,7 @@ static struct hw_interrupt_type iosapic_interrupt_type = { | |||
728 | .shutdown = iosapic_disable_irq, | 729 | .shutdown = iosapic_disable_irq, |
729 | .enable = iosapic_enable_irq, | 730 | .enable = iosapic_enable_irq, |
730 | .disable = iosapic_disable_irq, | 731 | .disable = iosapic_disable_irq, |
731 | .ack = no_ack_irq, | 732 | .ack = cpu_ack_irq, |
732 | .end = iosapic_end_irq, | 733 | .end = iosapic_end_irq, |
733 | #ifdef CONFIG_SMP | 734 | #ifdef CONFIG_SMP |
734 | .set_affinity = iosapic_set_affinity_irq, | 735 | .set_affinity = iosapic_set_affinity_irq, |
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h index 6e29cfa2812d..399c81981ed5 100644 --- a/include/asm-parisc/irq.h +++ b/include/asm-parisc/irq.h | |||
@@ -39,6 +39,8 @@ struct irq_chip; | |||
39 | */ | 39 | */ |
40 | void no_ack_irq(unsigned int irq); | 40 | void no_ack_irq(unsigned int irq); |
41 | void no_end_irq(unsigned int irq); | 41 | void no_end_irq(unsigned int irq); |
42 | void cpu_ack_irq(unsigned int irq); | ||
43 | void cpu_end_irq(unsigned int irq); | ||
42 | 44 | ||
43 | extern int txn_alloc_irq(unsigned int nbits); | 45 | extern int txn_alloc_irq(unsigned int nbits); |
44 | extern int txn_claim_irq(int); | 46 | extern int txn_claim_irq(int); |