aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/irq.c')
-rw-r--r--arch/parisc/kernel/irq.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 49482806863f..2b5f5915dd1d 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -112,7 +112,7 @@ void cpu_end_irq(unsigned int irq)
112} 112}
113 113
114#ifdef CONFIG_SMP 114#ifdef CONFIG_SMP
115int cpu_check_affinity(unsigned int irq, cpumask_t *dest) 115int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
116{ 116{
117 int cpu_dest; 117 int cpu_dest;
118 118
@@ -120,23 +120,25 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
120 if (CHECK_IRQ_PER_CPU(irq)) { 120 if (CHECK_IRQ_PER_CPU(irq)) {
121 /* Bad linux design decision. The mask has already 121 /* Bad linux design decision. The mask has already
122 * been set; we must reset it */ 122 * been set; we must reset it */
123 cpumask_setall(irq_desc[irq].affinity); 123 cpumask_setall(&irq_desc[irq].affinity);
124 return -EINVAL; 124 return -EINVAL;
125 } 125 }
126 126
127 /* whatever mask they set, we just allow one CPU */ 127 /* whatever mask they set, we just allow one CPU */
128 cpu_dest = first_cpu(*dest); 128 cpu_dest = first_cpu(*dest);
129 *dest = cpumask_of_cpu(cpu_dest);
130 129
131 return 0; 130 return cpu_dest;
132} 131}
133 132
134static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) 133static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
135{ 134{
136 if (cpu_check_affinity(irq, dest)) 135 int cpu_dest;
136
137 cpu_dest = cpu_check_affinity(irq, dest);
138 if (cpu_dest < 0)
137 return; 139 return;
138 140
139 cpumask_copy(irq_desc[irq].affinity, dest); 141 cpumask_copy(&irq_desc[irq].affinity, dest);
140} 142}
141#endif 143#endif
142 144
@@ -295,7 +297,7 @@ int txn_alloc_irq(unsigned int bits_wide)
295unsigned long txn_affinity_addr(unsigned int irq, int cpu) 297unsigned long txn_affinity_addr(unsigned int irq, int cpu)
296{ 298{
297#ifdef CONFIG_SMP 299#ifdef CONFIG_SMP
298 cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 300 cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu));
299#endif 301#endif
300 302
301 return per_cpu(cpu_data, cpu).txn_addr; 303 return per_cpu(cpu_data, cpu).txn_addr;
@@ -352,7 +354,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
352 irq = eirr_to_irq(eirr_val); 354 irq = eirr_to_irq(eirr_val);
353 355
354#ifdef CONFIG_SMP 356#ifdef CONFIG_SMP
355 cpumask_copy(&dest, irq_desc[irq].affinity); 357 cpumask_copy(&dest, &irq_desc[irq].affinity);
356 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && 358 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
357 !cpu_isset(smp_processor_id(), dest)) { 359 !cpu_isset(smp_processor_id(), dest)) {
358 int cpu = first_cpu(dest); 360 int cpu = first_cpu(dest);