aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJames Bottomley <jejb@parisc-linux.org>2005-11-17 16:28:37 -0500
committerKyle McMartin <kyle@parisc-linux.org>2005-11-17 16:28:37 -0500
commitc2ab64d09815cc4d48347ee3679658f197455a2a (patch)
tree7b6bde77c712c4db52717f70d593c5d8f4ce6bf9 /arch
parent1d4c452a85503cdb4bca5925cf698b61d3aa43a0 (diff)
[PARISC] Add IRQ affinities
This really only adds them for the machines I can check SMP on, which is CPU interrupts and IOSAPIC (so not any of the GSC based machines). With this patch, irqbalanced can be used to maintain irq balancing. Unfortunately, irqbalanced is a bit x86 centric, so it doesn't do an incredibly good job, but it does work. Signed-off-by: James Bottomley <jejb@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/kernel/irq.c60
1 files changed, 58 insertions, 2 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 865611c15531..2626405e70c4 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -30,6 +30,7 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <asm/io.h>
33 34
34#include <asm/smp.h> 35#include <asm/smp.h>
35 36
@@ -84,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
84void no_ack_irq(unsigned int irq) { } 85void no_ack_irq(unsigned int irq) { }
85void no_end_irq(unsigned int irq) { } 86void no_end_irq(unsigned int irq) { }
86 87
88#ifdef CONFIG_SMP
89int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
90{
91 int cpu_dest;
92
93 /* timer and ipi have to always be received on all CPUs */
94 if (irq == TIMER_IRQ || irq == IPI_IRQ) {
95 /* Bad linux design decision. The mask has already
96 * been set; we must reset it */
97 irq_affinity[irq] = CPU_MASK_ALL;
98 return -EINVAL;
99 }
100
101 /* whatever mask they set, we just allow one CPU */
102 cpu_dest = first_cpu(*dest);
103 *dest = cpumask_of_cpu(cpu_dest);
104
105 return 0;
106}
107
108static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
109{
110 if (cpu_check_affinity(irq, &dest))
111 return;
112
113 irq_affinity[irq] = dest;
114}
115#endif
116
87static struct hw_interrupt_type cpu_interrupt_type = { 117static struct hw_interrupt_type cpu_interrupt_type = {
88 .typename = "CPU", 118 .typename = "CPU",
89 .startup = cpu_startup_irq, 119 .startup = cpu_startup_irq,
@@ -92,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
92 .disable = cpu_disable_irq, 122 .disable = cpu_disable_irq,
93 .ack = no_ack_irq, 123 .ack = no_ack_irq,
94 .end = no_end_irq, 124 .end = no_end_irq,
95// .set_affinity = cpu_set_affinity_irq, 125#ifdef CONFIG_SMP
126 .set_affinity = cpu_set_affinity_irq,
127#endif
96}; 128};
97 129
98int show_interrupts(struct seq_file *p, void *v) 130int show_interrupts(struct seq_file *p, void *v)
@@ -229,6 +261,13 @@ int txn_alloc_irq(unsigned int bits_wide)
229 return -1; 261 return -1;
230} 262}
231 263
264unsigned long txn_affinity_addr(unsigned int irq, int cpu)
265{
266 irq_affinity[irq] = cpumask_of_cpu(cpu);
267
268 return cpu_data[cpu].txn_addr;
269}
270
232unsigned long txn_alloc_addr(unsigned int virt_irq) 271unsigned long txn_alloc_addr(unsigned int virt_irq)
233{ 272{
234 static int next_cpu = -1; 273 static int next_cpu = -1;
@@ -243,7 +282,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
243 if (next_cpu >= NR_CPUS) 282 if (next_cpu >= NR_CPUS)
244 next_cpu = 0; /* nothing else, assign monarch */ 283 next_cpu = 0; /* nothing else, assign monarch */
245 284
246 return cpu_data[next_cpu].txn_addr; 285 return txn_affinity_addr(virt_irq, next_cpu);
247} 286}
248 287
249 288
@@ -282,12 +321,29 @@ void do_cpu_irq_mask(struct pt_regs *regs)
282 321
283 /* Work our way from MSb to LSb...same order we alloc EIRs */ 322 /* Work our way from MSb to LSb...same order we alloc EIRs */
284 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { 323 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
324 cpumask_t dest = irq_affinity[irq];
325
285 if (!(bit & eirr_val)) 326 if (!(bit & eirr_val))
286 continue; 327 continue;
287 328
288 /* clear bit in mask - can exit loop sooner */ 329 /* clear bit in mask - can exit loop sooner */
289 eirr_val &= ~bit; 330 eirr_val &= ~bit;
290 331
332 /* FIXME: because generic set affinity mucks
333 * with the affinity before sending it to us
334 * we can get the situation where the affinity is
335 * wrong for our CPU type interrupts */
336 if (irq != TIMER_IRQ && irq != IPI_IRQ &&
337 !cpu_isset(smp_processor_id(), dest)) {
338 int cpu = first_cpu(dest);
339
340 printk("rethrowing irq %d from %d to %d\n",
341 irq, smp_processor_id(), cpu);
342 gsc_writel(irq + CPU_IRQ_BASE,
343 cpu_data[cpu].hpa);
344 continue;
345 }
346
291 __do_IRQ(irq, regs); 347 __do_IRQ(irq, regs);
292 } 348 }
293 } 349 }