diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq/chip.c | 8 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 3 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 39 | ||||
| -rw-r--r-- | kernel/irq/migration.c | 13 | ||||
| -rw-r--r-- | kernel/smpboot.c | 17 |
5 files changed, 44 insertions, 36 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index fc275e4f629b..eebd6d5cfb44 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq) | |||
| 275 | kstat_incr_irqs_this_cpu(irq, desc); | 275 | kstat_incr_irqs_this_cpu(irq, desc); |
| 276 | 276 | ||
| 277 | action = desc->action; | 277 | action = desc->action; |
| 278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) | 278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
| 279 | desc->istate |= IRQS_PENDING; | ||
| 279 | goto out_unlock; | 280 | goto out_unlock; |
| 281 | } | ||
| 280 | 282 | ||
| 281 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 283 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
| 282 | raw_spin_unlock_irq(&desc->lock); | 284 | raw_spin_unlock_irq(&desc->lock); |
| @@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
| 324 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 326 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 325 | kstat_incr_irqs_this_cpu(irq, desc); | 327 | kstat_incr_irqs_this_cpu(irq, desc); |
| 326 | 328 | ||
| 327 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) | 329 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
| 330 | desc->istate |= IRQS_PENDING; | ||
| 328 | goto out_unlock; | 331 | goto out_unlock; |
| 332 | } | ||
| 329 | 333 | ||
| 330 | handle_irq_event(desc); | 334 | handle_irq_event(desc); |
| 331 | 335 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 8e5c56b3b7d9..001fa5bab490 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); | |||
| 101 | 101 | ||
| 102 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 102 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
| 103 | 103 | ||
| 104 | extern int irq_do_set_affinity(struct irq_data *data, | ||
| 105 | const struct cpumask *dest, bool force); | ||
| 106 | |||
| 104 | /* Inline functions for support of irq chips on slow busses */ | 107 | /* Inline functions for support of irq chips on slow busses */ |
| 105 | static inline void chip_bus_lock(struct irq_desc *desc) | 108 | static inline void chip_bus_lock(struct irq_desc *desc) |
| 106 | { | 109 | { |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ea0c6c2ae6f7..8c548232ba39 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -142,6 +142,25 @@ static inline void | |||
| 142 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 142 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
| 143 | #endif | 143 | #endif |
| 144 | 144 | ||
| 145 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
| 146 | bool force) | ||
| 147 | { | ||
| 148 | struct irq_desc *desc = irq_data_to_desc(data); | ||
| 149 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
| 150 | int ret; | ||
| 151 | |||
| 152 | ret = chip->irq_set_affinity(data, mask, false); | ||
| 153 | switch (ret) { | ||
| 154 | case IRQ_SET_MASK_OK: | ||
| 155 | cpumask_copy(data->affinity, mask); | ||
| 156 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 157 | irq_set_thread_affinity(desc); | ||
| 158 | ret = 0; | ||
| 159 | } | ||
| 160 | |||
| 161 | return ret; | ||
| 162 | } | ||
| 163 | |||
| 145 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | 164 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) |
| 146 | { | 165 | { |
| 147 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 166 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
| @@ -152,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | |||
| 152 | return -EINVAL; | 171 | return -EINVAL; |
| 153 | 172 | ||
| 154 | if (irq_can_move_pcntxt(data)) { | 173 | if (irq_can_move_pcntxt(data)) { |
| 155 | ret = chip->irq_set_affinity(data, mask, false); | 174 | ret = irq_do_set_affinity(data, mask, false); |
| 156 | switch (ret) { | ||
| 157 | case IRQ_SET_MASK_OK: | ||
| 158 | cpumask_copy(data->affinity, mask); | ||
| 159 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 160 | irq_set_thread_affinity(desc); | ||
| 161 | ret = 0; | ||
| 162 | } | ||
| 163 | } else { | 175 | } else { |
| 164 | irqd_set_move_pending(data); | 176 | irqd_set_move_pending(data); |
| 165 | irq_copy_pending(desc, mask); | 177 | irq_copy_pending(desc, mask); |
| @@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |||
| 283 | static int | 295 | static int |
| 284 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 296 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
| 285 | { | 297 | { |
| 286 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 287 | struct cpumask *set = irq_default_affinity; | 298 | struct cpumask *set = irq_default_affinity; |
| 288 | int ret, node = desc->irq_data.node; | 299 | int node = desc->irq_data.node; |
| 289 | 300 | ||
| 290 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 301 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
| 291 | if (!irq_can_set_affinity(irq)) | 302 | if (!irq_can_set_affinity(irq)) |
| @@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
| 311 | if (cpumask_intersects(mask, nodemask)) | 322 | if (cpumask_intersects(mask, nodemask)) |
| 312 | cpumask_and(mask, mask, nodemask); | 323 | cpumask_and(mask, mask, nodemask); |
| 313 | } | 324 | } |
| 314 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | 325 | irq_do_set_affinity(&desc->irq_data, mask, false); |
| 315 | switch (ret) { | ||
| 316 | case IRQ_SET_MASK_OK: | ||
| 317 | cpumask_copy(desc->irq_data.affinity, mask); | ||
| 318 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 319 | irq_set_thread_affinity(desc); | ||
| 320 | } | ||
| 321 | return 0; | 326 | return 0; |
| 322 | } | 327 | } |
| 323 | #else | 328 | #else |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index c3c89751b327..ca3f4aaff707 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
| @@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata) | |||
| 42 | * For correct operation this depends on the caller | 42 | * For correct operation this depends on the caller |
| 43 | * masking the irqs. | 43 | * masking the irqs. |
| 44 | */ | 44 | */ |
| 45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 45 | if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) |
| 46 | < nr_cpu_ids)) { | 46 | irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); |
| 47 | int ret = chip->irq_set_affinity(&desc->irq_data, | ||
| 48 | desc->pending_mask, false); | ||
| 49 | switch (ret) { | ||
| 50 | case IRQ_SET_MASK_OK: | ||
| 51 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); | ||
| 52 | case IRQ_SET_MASK_OK_NOCOPY: | ||
| 53 | irq_set_thread_affinity(desc); | ||
| 54 | } | ||
| 55 | } | ||
| 56 | 47 | ||
| 57 | cpumask_clear(desc->pending_mask); | 48 | cpumask_clear(desc->pending_mask); |
| 58 | } | 49 | } |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index e1a797e028a3..98f60c5caa1b 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
| @@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void) | |||
| 31 | per_cpu(idle_threads, smp_processor_id()) = current; | 31 | per_cpu(idle_threads, smp_processor_id()) = current; |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | /** | ||
| 35 | * idle_init - Initialize the idle thread for a cpu | ||
| 36 | * @cpu: The cpu for which the idle thread should be initialized | ||
| 37 | * | ||
| 38 | * Creates the thread if it does not exist. | ||
| 39 | */ | ||
| 34 | static inline void idle_init(unsigned int cpu) | 40 | static inline void idle_init(unsigned int cpu) |
| 35 | { | 41 | { |
| 36 | struct task_struct *tsk = per_cpu(idle_threads, cpu); | 42 | struct task_struct *tsk = per_cpu(idle_threads, cpu); |
| @@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu) | |||
| 45 | } | 51 | } |
| 46 | 52 | ||
| 47 | /** | 53 | /** |
| 48 | * idle_thread_init - Initialize the idle thread for a cpu | 54 | * idle_threads_init - Initialize idle threads for all cpus |
| 49 | * @cpu: The cpu for which the idle thread should be initialized | ||
| 50 | * | ||
| 51 | * Creates the thread if it does not exist. | ||
| 52 | */ | 55 | */ |
| 53 | void __init idle_threads_init(void) | 56 | void __init idle_threads_init(void) |
| 54 | { | 57 | { |
| 55 | unsigned int cpu; | 58 | unsigned int cpu, boot_cpu; |
| 59 | |||
| 60 | boot_cpu = smp_processor_id(); | ||
| 56 | 61 | ||
| 57 | for_each_possible_cpu(cpu) { | 62 | for_each_possible_cpu(cpu) { |
| 58 | if (cpu != smp_processor_id()) | 63 | if (cpu != boot_cpu) |
| 59 | idle_init(cpu); | 64 | idle_init(cpu); |
| 60 | } | 65 | } |
| 61 | } | 66 | } |
