diff options
Diffstat (limited to 'arch/parisc/kernel/irq.c')
-rw-r--r-- | arch/parisc/kernel/irq.c | 110 |
1 files changed, 92 insertions, 18 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 006385dbee66..197936d9359a 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -30,6 +30,9 @@ | |||
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <asm/io.h> | ||
34 | |||
35 | #include <asm/smp.h> | ||
33 | 36 | ||
34 | #undef PARISC_IRQ_CR16_COUNTS | 37 | #undef PARISC_IRQ_CR16_COUNTS |
35 | 38 | ||
@@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *); | |||
43 | */ | 46 | */ |
44 | static volatile unsigned long cpu_eiem = 0; | 47 | static volatile unsigned long cpu_eiem = 0; |
45 | 48 | ||
46 | static void cpu_set_eiem(void *info) | 49 | static void cpu_disable_irq(unsigned int irq) |
47 | { | ||
48 | set_eiem((unsigned long) info); | ||
49 | } | ||
50 | |||
51 | static inline void cpu_disable_irq(unsigned int irq) | ||
52 | { | 50 | { |
53 | unsigned long eirr_bit = EIEM_MASK(irq); | 51 | unsigned long eirr_bit = EIEM_MASK(irq); |
54 | 52 | ||
55 | cpu_eiem &= ~eirr_bit; | 53 | cpu_eiem &= ~eirr_bit; |
56 | on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); | 54 | /* Do nothing on the other CPUs. If they get this interrupt, |
55 | * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't | ||
56 | * handle it, and the set_eiem() at the bottom will ensure it | ||
57 | * then gets disabled */ | ||
57 | } | 58 | } |
58 | 59 | ||
59 | static void cpu_enable_irq(unsigned int irq) | 60 | static void cpu_enable_irq(unsigned int irq) |
60 | { | 61 | { |
61 | unsigned long eirr_bit = EIEM_MASK(irq); | 62 | unsigned long eirr_bit = EIEM_MASK(irq); |
62 | 63 | ||
63 | mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ | ||
64 | cpu_eiem |= eirr_bit; | 64 | cpu_eiem |= eirr_bit; |
65 | on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); | 65 | |
66 | /* FIXME: while our interrupts aren't nested, we cannot reset | ||
67 | * the eiem mask if we're already in an interrupt. Once we | ||
68 | * implement nested interrupts, this can go away | ||
69 | */ | ||
70 | if (!in_interrupt()) | ||
71 | set_eiem(cpu_eiem); | ||
72 | |||
73 | /* This is just a simple NOP IPI. But what it does is cause | ||
74 | * all the other CPUs to do a set_eiem(cpu_eiem) at the end | ||
75 | * of the interrupt handler */ | ||
76 | smp_send_all_nop(); | ||
66 | } | 77 | } |
67 | 78 | ||
68 | static unsigned int cpu_startup_irq(unsigned int irq) | 79 | static unsigned int cpu_startup_irq(unsigned int irq) |
@@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq) | |||
74 | void no_ack_irq(unsigned int irq) { } | 85 | void no_ack_irq(unsigned int irq) { } |
75 | void no_end_irq(unsigned int irq) { } | 86 | void no_end_irq(unsigned int irq) { } |
76 | 87 | ||
88 | #ifdef CONFIG_SMP | ||
89 | int cpu_check_affinity(unsigned int irq, cpumask_t *dest) | ||
90 | { | ||
91 | int cpu_dest; | ||
92 | |||
93 | /* timer and ipi have to always be received on all CPUs */ | ||
94 | if (irq == TIMER_IRQ || irq == IPI_IRQ) { | ||
95 | /* Bad linux design decision. The mask has already | ||
96 | * been set; we must reset it */ | ||
97 | irq_affinity[irq] = CPU_MASK_ALL; | ||
98 | return -EINVAL; | ||
99 | } | ||
100 | |||
101 | /* whatever mask they set, we just allow one CPU */ | ||
102 | cpu_dest = first_cpu(*dest); | ||
103 | *dest = cpumask_of_cpu(cpu_dest); | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) | ||
109 | { | ||
110 | if (cpu_check_affinity(irq, &dest)) | ||
111 | return; | ||
112 | |||
113 | irq_affinity[irq] = dest; | ||
114 | } | ||
115 | #endif | ||
116 | |||
77 | static struct hw_interrupt_type cpu_interrupt_type = { | 117 | static struct hw_interrupt_type cpu_interrupt_type = { |
78 | .typename = "CPU", | 118 | .typename = "CPU", |
79 | .startup = cpu_startup_irq, | 119 | .startup = cpu_startup_irq, |
@@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = { | |||
82 | .disable = cpu_disable_irq, | 122 | .disable = cpu_disable_irq, |
83 | .ack = no_ack_irq, | 123 | .ack = no_ack_irq, |
84 | .end = no_end_irq, | 124 | .end = no_end_irq, |
85 | // .set_affinity = cpu_set_affinity_irq, | 125 | #ifdef CONFIG_SMP |
126 | .set_affinity = cpu_set_affinity_irq, | ||
127 | #endif | ||
86 | }; | 128 | }; |
87 | 129 | ||
88 | int show_interrupts(struct seq_file *p, void *v) | 130 | int show_interrupts(struct seq_file *p, void *v) |
@@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide) | |||
219 | return -1; | 261 | return -1; |
220 | } | 262 | } |
221 | 263 | ||
264 | |||
265 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) | ||
266 | { | ||
267 | #ifdef CONFIG_SMP | ||
268 | irq_affinity[irq] = cpumask_of_cpu(cpu); | ||
269 | #endif | ||
270 | |||
271 | return cpu_data[cpu].txn_addr; | ||
272 | } | ||
273 | |||
274 | |||
222 | unsigned long txn_alloc_addr(unsigned int virt_irq) | 275 | unsigned long txn_alloc_addr(unsigned int virt_irq) |
223 | { | 276 | { |
224 | static int next_cpu = -1; | 277 | static int next_cpu = -1; |
@@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq) | |||
233 | if (next_cpu >= NR_CPUS) | 286 | if (next_cpu >= NR_CPUS) |
234 | next_cpu = 0; /* nothing else, assign monarch */ | 287 | next_cpu = 0; /* nothing else, assign monarch */ |
235 | 288 | ||
236 | return cpu_data[next_cpu].txn_addr; | 289 | return txn_affinity_addr(virt_irq, next_cpu); |
237 | } | 290 | } |
238 | 291 | ||
239 | 292 | ||
@@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
250 | irq_enter(); | 303 | irq_enter(); |
251 | 304 | ||
252 | /* | 305 | /* |
253 | * Only allow interrupt processing to be interrupted by the | 306 | * Don't allow TIMER or IPI nested interrupts. |
254 | * timer tick | 307 | * Allowing any single interrupt to nest can lead to that CPU |
308 | * handling interrupts with all enabled interrupts unmasked. | ||
255 | */ | 309 | */ |
256 | set_eiem(EIEM_MASK(TIMER_IRQ)); | 310 | set_eiem(0UL); |
257 | 311 | ||
258 | /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) | 312 | /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) |
259 | * 2) We loop here on EIRR contents in order to avoid | 313 | * 2) We loop here on EIRR contents in order to avoid |
@@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
267 | if (!eirr_val) | 321 | if (!eirr_val) |
268 | break; | 322 | break; |
269 | 323 | ||
270 | if (eirr_val & EIEM_MASK(TIMER_IRQ)) | ||
271 | set_eiem(0); | ||
272 | |||
273 | mtctl(eirr_val, 23); /* reset bits we are going to process */ | 324 | mtctl(eirr_val, 23); /* reset bits we are going to process */ |
274 | 325 | ||
275 | /* Work our way from MSb to LSb...same order we alloc EIRs */ | 326 | /* Work our way from MSb to LSb...same order we alloc EIRs */ |
276 | for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { | 327 | for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { |
328 | #ifdef CONFIG_SMP | ||
329 | cpumask_t dest = irq_affinity[irq]; | ||
330 | #endif | ||
277 | if (!(bit & eirr_val)) | 331 | if (!(bit & eirr_val)) |
278 | continue; | 332 | continue; |
279 | 333 | ||
280 | /* clear bit in mask - can exit loop sooner */ | 334 | /* clear bit in mask - can exit loop sooner */ |
281 | eirr_val &= ~bit; | 335 | eirr_val &= ~bit; |
282 | 336 | ||
337 | #ifdef CONFIG_SMP | ||
338 | /* FIXME: because generic set affinity mucks | ||
339 | * with the affinity before sending it to us | ||
340 | * we can get the situation where the affinity is | ||
341 | * wrong for our CPU type interrupts */ | ||
342 | if (irq != TIMER_IRQ && irq != IPI_IRQ && | ||
343 | !cpu_isset(smp_processor_id(), dest)) { | ||
344 | int cpu = first_cpu(dest); | ||
345 | |||
346 | printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", | ||
347 | irq, smp_processor_id(), cpu); | ||
348 | gsc_writel(irq + CPU_IRQ_BASE, | ||
349 | cpu_data[cpu].hpa); | ||
350 | continue; | ||
351 | } | ||
352 | #endif | ||
353 | |||
283 | __do_IRQ(irq, regs); | 354 | __do_IRQ(irq, regs); |
284 | } | 355 | } |
285 | } | 356 | } |
286 | set_eiem(cpu_eiem); | 357 | |
358 | set_eiem(cpu_eiem); /* restore original mask */ | ||
287 | irq_exit(); | 359 | irq_exit(); |
288 | } | 360 | } |
289 | 361 | ||
@@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
291 | static struct irqaction timer_action = { | 363 | static struct irqaction timer_action = { |
292 | .handler = timer_interrupt, | 364 | .handler = timer_interrupt, |
293 | .name = "timer", | 365 | .name = "timer", |
366 | .flags = SA_INTERRUPT, | ||
294 | }; | 367 | }; |
295 | 368 | ||
296 | #ifdef CONFIG_SMP | 369 | #ifdef CONFIG_SMP |
297 | static struct irqaction ipi_action = { | 370 | static struct irqaction ipi_action = { |
298 | .handler = ipi_interrupt, | 371 | .handler = ipi_interrupt, |
299 | .name = "IPI", | 372 | .name = "IPI", |
373 | .flags = SA_INTERRUPT, | ||
300 | }; | 374 | }; |
301 | #endif | 375 | #endif |
302 | 376 | ||