aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/migration.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 09:43:54 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 10:06:11 -0400
commit7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch)
tree5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /kernel/irq/migration.c
parent7d754596756240fa918b94cd0c3011c77a638987 (diff)
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
Merge 'Linux v3.0' into Litmus
Some notes: * Litmus^RT scheduling class is the topmost scheduling class (above stop_sched_class). * scheduler_ipi() function (e.g., in smp_reschedule_interrupt()) may increase IPI latencies. * Added path into schedule() to quickly re-evaluate scheduling decision without becoming preemptive again. This used to be a standard path before the removal of BKL. Conflicts: Makefile arch/arm/kernel/calls.S arch/arm/kernel/smp.c arch/x86/include/asm/unistd_32.h arch/x86/kernel/smp.c arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/printk.c kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'kernel/irq/migration.c')
-rw-r--r--kernel/irq/migration.c43
1 files changed, 26 insertions, 17 deletions
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 241962280836..47420908fba0 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,27 +4,28 @@
4 4
5#include "internals.h" 5#include "internals.h"
6 6
7void move_masked_irq(int irq) 7void irq_move_masked_irq(struct irq_data *idata)
8{ 8{
9 struct irq_desc *desc = irq_to_desc(irq); 9 struct irq_desc *desc = irq_data_to_desc(idata);
10 struct irq_chip *chip = idata->chip;
10 11
11 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 12 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
12 return; 13 return;
13 14
14 /* 15 /*
15 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. 16 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
16 */ 17 */
17 if (CHECK_IRQ_PER_CPU(desc->status)) { 18 if (!irqd_can_balance(&desc->irq_data)) {
18 WARN_ON(1); 19 WARN_ON(1);
19 return; 20 return;
20 } 21 }
21 22
22 desc->status &= ~IRQ_MOVE_PENDING; 23 irqd_clr_move_pending(&desc->irq_data);
23 24
24 if (unlikely(cpumask_empty(desc->pending_mask))) 25 if (unlikely(cpumask_empty(desc->pending_mask)))
25 return; 26 return;
26 27
27 if (!desc->chip->set_affinity) 28 if (!chip->irq_set_affinity)
28 return; 29 return;
29 30
30 assert_raw_spin_locked(&desc->lock); 31 assert_raw_spin_locked(&desc->lock);
@@ -34,7 +35,7 @@ void move_masked_irq(int irq)
34 * do the disable, re-program, enable sequence. 35 * do the disable, re-program, enable sequence.
35 * This is *not* particularly important for level triggered 36 * This is *not* particularly important for level triggered
36 * but in a edge trigger case, we might be setting rte 37 * but in a edge trigger case, we might be setting rte
37 * when an active trigger is comming in. This could 38 * when an active trigger is coming in. This could
38 * cause some ioapics to mal-function. 39 * cause some ioapics to mal-function.
39 * Being paranoid i guess! 40 * Being paranoid i guess!
40 * 41 *
@@ -43,26 +44,34 @@ void move_masked_irq(int irq)
43 */ 44 */
44 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
45 < nr_cpu_ids)) 46 < nr_cpu_ids))
46 if (!desc->chip->set_affinity(irq, desc->pending_mask)) { 47 if (!chip->irq_set_affinity(&desc->irq_data,
47 cpumask_copy(desc->affinity, desc->pending_mask); 48 desc->pending_mask, false)) {
49 cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
48 irq_set_thread_affinity(desc); 50 irq_set_thread_affinity(desc);
49 } 51 }
50 52
51 cpumask_clear(desc->pending_mask); 53 cpumask_clear(desc->pending_mask);
52} 54}
53 55
54void move_native_irq(int irq) 56void irq_move_irq(struct irq_data *idata)
55{ 57{
56 struct irq_desc *desc = irq_to_desc(irq); 58 bool masked;
57 59
58 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 60 if (likely(!irqd_is_setaffinity_pending(idata)))
59 return; 61 return;
60 62
61 if (unlikely(desc->status & IRQ_DISABLED)) 63 if (unlikely(irqd_irq_disabled(idata)))
62 return; 64 return;
63 65
64 desc->chip->mask(irq); 66 /*
65 move_masked_irq(irq); 67 * Be careful vs. already masked interrupts. If this is a
66 desc->chip->unmask(irq); 68 * threaded interrupt with ONESHOT set, we can end up with an
69 * interrupt storm.
70 */
71 masked = irqd_irq_masked(idata);
72 if (!masked)
73 idata->chip->irq_mask(idata);
74 irq_move_masked_irq(idata);
75 if (!masked)
76 idata->chip->irq_unmask(idata);
67} 77}
68