diff options
Diffstat (limited to 'kernel/irq/migration.c')
-rw-r--r-- | kernel/irq/migration.c | 44 |
1 files changed, 31 insertions, 13 deletions
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 1d254194048..ec4806d4778 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -4,23 +4,23 @@ | |||
4 | 4 | ||
5 | #include "internals.h" | 5 | #include "internals.h" |
6 | 6 | ||
7 | void move_masked_irq(int irq) | 7 | void irq_move_masked_irq(struct irq_data *idata) |
8 | { | 8 | { |
9 | struct irq_desc *desc = irq_to_desc(irq); | 9 | struct irq_desc *desc = irq_data_to_desc(idata); |
10 | struct irq_chip *chip = desc->irq_data.chip; | 10 | struct irq_chip *chip = idata->chip; |
11 | 11 | ||
12 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 12 | if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) |
13 | return; | 13 | return; |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | 16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. |
17 | */ | 17 | */ |
18 | if (CHECK_IRQ_PER_CPU(desc->status)) { | 18 | if (!irqd_can_balance(&desc->irq_data)) { |
19 | WARN_ON(1); | 19 | WARN_ON(1); |
20 | return; | 20 | return; |
21 | } | 21 | } |
22 | 22 | ||
23 | desc->status &= ~IRQ_MOVE_PENDING; | 23 | irqd_clr_move_pending(&desc->irq_data); |
24 | 24 | ||
25 | if (unlikely(cpumask_empty(desc->pending_mask))) | 25 | if (unlikely(cpumask_empty(desc->pending_mask))) |
26 | return; | 26 | return; |
@@ -53,18 +53,36 @@ void move_masked_irq(int irq) | |||
53 | cpumask_clear(desc->pending_mask); | 53 | cpumask_clear(desc->pending_mask); |
54 | } | 54 | } |
55 | 55 | ||
56 | void move_native_irq(int irq) | 56 | void move_masked_irq(int irq) |
57 | { | ||
58 | irq_move_masked_irq(irq_get_irq_data(irq)); | ||
59 | } | ||
60 | |||
61 | void irq_move_irq(struct irq_data *idata) | ||
57 | { | 62 | { |
58 | struct irq_desc *desc = irq_to_desc(irq); | 63 | struct irq_desc *desc = irq_data_to_desc(idata); |
64 | bool masked; | ||
59 | 65 | ||
60 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 66 | if (likely(!irqd_is_setaffinity_pending(idata))) |
61 | return; | 67 | return; |
62 | 68 | ||
63 | if (unlikely(desc->status & IRQ_DISABLED)) | 69 | if (unlikely(desc->istate & IRQS_DISABLED)) |
64 | return; | 70 | return; |
65 | 71 | ||
66 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 72 | /* |
67 | move_masked_irq(irq); | 73 | * Be careful vs. already masked interrupts. If this is a |
68 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 74 | * threaded interrupt with ONESHOT set, we can end up with an |
75 | * interrupt storm. | ||
76 | */ | ||
77 | masked = desc->istate & IRQS_MASKED; | ||
78 | if (!masked) | ||
79 | idata->chip->irq_mask(idata); | ||
80 | irq_move_masked_irq(idata); | ||
81 | if (!masked) | ||
82 | idata->chip->irq_unmask(idata); | ||
69 | } | 83 | } |
70 | 84 | ||
85 | void move_native_irq(int irq) | ||
86 | { | ||
87 | irq_move_irq(irq_get_irq_data(irq)); | ||
88 | } | ||