diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Makefile | 3 | ||||
-rw-r--r-- | kernel/irq/manage.c | 23 | ||||
-rw-r--r-- | kernel/irq/migration.c | 65 |
3 files changed, 84 insertions, 7 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 49378738ff5e..2b33f852be3e 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -1,5 +1,4 @@ | |||
1 | 1 | ||
2 | obj-y := handle.o manage.o spurious.o | 2 | obj-y := handle.o manage.o spurious.o migration.o |
3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
5 | |||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 97d5559997d2..6edfcef291e8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -204,10 +204,14 @@ int setup_irq(unsigned int irq, struct irqaction * new) | |||
204 | p = &desc->action; | 204 | p = &desc->action; |
205 | if ((old = *p) != NULL) { | 205 | if ((old = *p) != NULL) { |
206 | /* Can't share interrupts unless both agree to */ | 206 | /* Can't share interrupts unless both agree to */ |
207 | if (!(old->flags & new->flags & SA_SHIRQ)) { | 207 | if (!(old->flags & new->flags & SA_SHIRQ)) |
208 | spin_unlock_irqrestore(&desc->lock,flags); | 208 | goto mismatch; |
209 | return -EBUSY; | 209 | |
210 | } | 210 | #if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) |
211 | /* All handlers must agree on per-cpuness */ | ||
212 | if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU)) | ||
213 | goto mismatch; | ||
214 | #endif | ||
211 | 215 | ||
212 | /* add new interrupt at end of irq queue */ | 216 | /* add new interrupt at end of irq queue */ |
213 | do { | 217 | do { |
@@ -218,7 +222,10 @@ int setup_irq(unsigned int irq, struct irqaction * new) | |||
218 | } | 222 | } |
219 | 223 | ||
220 | *p = new; | 224 | *p = new; |
221 | 225 | #if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) | |
226 | if (new->flags & SA_PERCPU_IRQ) | ||
227 | desc->status |= IRQ_PER_CPU; | ||
228 | #endif | ||
222 | if (!shared) { | 229 | if (!shared) { |
223 | desc->depth = 0; | 230 | desc->depth = 0; |
224 | desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | | 231 | desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | |
@@ -236,6 +243,12 @@ int setup_irq(unsigned int irq, struct irqaction * new) | |||
236 | register_handler_proc(irq, new); | 243 | register_handler_proc(irq, new); |
237 | 244 | ||
238 | return 0; | 245 | return 0; |
246 | |||
247 | mismatch: | ||
248 | spin_unlock_irqrestore(&desc->lock, flags); | ||
249 | printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__); | ||
250 | dump_stack(); | ||
251 | return -EBUSY; | ||
239 | } | 252 | } |
240 | 253 | ||
241 | /** | 254 | /** |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c new file mode 100644 index 000000000000..52a8655fa080 --- /dev/null +++ b/kernel/irq/migration.c | |||
@@ -0,0 +1,65 @@ | |||
1 | #include <linux/irq.h> | ||
2 | |||
3 | #if defined(CONFIG_GENERIC_PENDING_IRQ) | ||
4 | |||
5 | void set_pending_irq(unsigned int irq, cpumask_t mask) | ||
6 | { | ||
7 | irq_desc_t *desc = irq_desc + irq; | ||
8 | unsigned long flags; | ||
9 | |||
10 | spin_lock_irqsave(&desc->lock, flags); | ||
11 | desc->move_irq = 1; | ||
12 | pending_irq_cpumask[irq] = mask; | ||
13 | spin_unlock_irqrestore(&desc->lock, flags); | ||
14 | } | ||
15 | |||
16 | void move_native_irq(int irq) | ||
17 | { | ||
18 | cpumask_t tmp; | ||
19 | irq_desc_t *desc = irq_descp(irq); | ||
20 | |||
21 | if (likely(!desc->move_irq)) | ||
22 | return; | ||
23 | |||
24 | /* | ||
25 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | ||
26 | */ | ||
27 | if (CHECK_IRQ_PER_CPU(desc->status)) { | ||
28 | WARN_ON(1); | ||
29 | return; | ||
30 | } | ||
31 | |||
32 | desc->move_irq = 0; | ||
33 | |||
34 | if (likely(cpus_empty(pending_irq_cpumask[irq]))) | ||
35 | return; | ||
36 | |||
37 | if (!desc->handler->set_affinity) | ||
38 | return; | ||
39 | |||
40 | assert_spin_locked(&desc->lock); | ||
41 | |||
42 | cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); | ||
43 | |||
44 | /* | ||
45 | * If there was a valid mask to work with, please | ||
46 | * do the disable, re-program, enable sequence. | ||
47 | * This is *not* particularly important for level triggered | ||
48 | * but in a edge trigger case, we might be setting rte | ||
49 | * when an active trigger is comming in. This could | ||
50 | * cause some ioapics to mal-function. | ||
51 | * Being paranoid i guess! | ||
52 | */ | ||
53 | if (unlikely(!cpus_empty(tmp))) { | ||
54 | if (likely(!(desc->status & IRQ_DISABLED))) | ||
55 | desc->handler->disable(irq); | ||
56 | |||
57 | desc->handler->set_affinity(irq,tmp); | ||
58 | |||
59 | if (likely(!(desc->status & IRQ_DISABLED))) | ||
60 | desc->handler->enable(irq); | ||
61 | } | ||
62 | cpus_clear(pending_irq_cpumask[irq]); | ||
63 | } | ||
64 | |||
65 | #endif | ||