diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2015-12-31 11:30:48 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-15 07:44:00 -0500 |
commit | 3716fd27a604d61a91cda47083504971486b80f1 (patch) | |
tree | 7c996f0b3da0891e61a62304eb9290159956fd08 /arch/x86 | |
parent | 95ffeb4b5baca266e1d0d2bc90f1513e6f419cdd (diff) |
x86/irq: Check vector allocation early
__assign_irq_vector() uses the vector_cpumask which is assigned by
apic->vector_allocation_domain() without doing basic sanity checks. That can
result in a situation where the final assignement of a newly found vector
fails in apic->cpu_mask_to_apicid_and(). So we have to do rollbacks for no
reason.
apic->cpu_mask_to_apicid_and() only fails if
vector_cpumask & requested_cpumask & cpu_online_mask
is empty.
Check for this condition right away and if the result is empty try immediately
the next possible cpu in the requested mask. So in case of a failure the old
setting is unchanged and we can remove the rollback code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Borislav Petkov <bp@alien8.de>
Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Jeremiah Mahler <jmmahler@gmail.com>
Cc: andy.shevchenko@gmail.com
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: stable@vger.kernel.org #4.3+
Link: http://lkml.kernel.org/r/20151231160106.561877324@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/apic/vector.c | 38 |
1 files changed, 25 insertions, 13 deletions
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index cef31955ab18..940e18d4dbcd 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -31,7 +31,7 @@ struct apic_chip_data { | |||
31 | struct irq_domain *x86_vector_domain; | 31 | struct irq_domain *x86_vector_domain; |
32 | EXPORT_SYMBOL_GPL(x86_vector_domain); | 32 | EXPORT_SYMBOL_GPL(x86_vector_domain); |
33 | static DEFINE_RAW_SPINLOCK(vector_lock); | 33 | static DEFINE_RAW_SPINLOCK(vector_lock); |
34 | static cpumask_var_t vector_cpumask, searched_cpumask; | 34 | static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask; |
35 | static struct irq_chip lapic_controller; | 35 | static struct irq_chip lapic_controller; |
36 | #ifdef CONFIG_X86_IO_APIC | 36 | #ifdef CONFIG_X86_IO_APIC |
37 | static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; | 37 | static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; |
@@ -130,8 +130,20 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, | |||
130 | while (cpu < nr_cpu_ids) { | 130 | while (cpu < nr_cpu_ids) { |
131 | int new_cpu, vector, offset; | 131 | int new_cpu, vector, offset; |
132 | 132 | ||
133 | /* Get the possible target cpus for @mask/@cpu from the apic */ | ||
133 | apic->vector_allocation_domain(cpu, vector_cpumask, mask); | 134 | apic->vector_allocation_domain(cpu, vector_cpumask, mask); |
134 | 135 | ||
136 | /* | ||
137 | * Clear the offline cpus from @vector_cpumask for searching | ||
138 | * and verify whether the result overlaps with @mask. If true, | ||
139 | * then the call to apic->cpu_mask_to_apicid_and() will | ||
140 | * succeed as well. If not, no point in trying to find a | ||
141 | * vector in this mask. | ||
142 | */ | ||
143 | cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask); | ||
144 | if (!cpumask_intersects(vector_searchmask, mask)) | ||
145 | goto next_cpu; | ||
146 | |||
135 | if (cpumask_subset(vector_cpumask, d->domain)) { | 147 | if (cpumask_subset(vector_cpumask, d->domain)) { |
136 | if (cpumask_equal(vector_cpumask, d->domain)) | 148 | if (cpumask_equal(vector_cpumask, d->domain)) |
137 | goto success; | 149 | goto success; |
@@ -164,7 +176,7 @@ next: | |||
164 | if (test_bit(vector, used_vectors)) | 176 | if (test_bit(vector, used_vectors)) |
165 | goto next; | 177 | goto next; |
166 | 178 | ||
167 | for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { | 179 | for_each_cpu(new_cpu, vector_searchmask) { |
168 | if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) | 180 | if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) |
169 | goto next; | 181 | goto next; |
170 | } | 182 | } |
@@ -176,7 +188,7 @@ next: | |||
176 | d->move_in_progress = | 188 | d->move_in_progress = |
177 | cpumask_intersects(d->old_domain, cpu_online_mask); | 189 | cpumask_intersects(d->old_domain, cpu_online_mask); |
178 | } | 190 | } |
179 | for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) | 191 | for_each_cpu(new_cpu, vector_searchmask) |
180 | per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); | 192 | per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); |
181 | d->cfg.vector = vector; | 193 | d->cfg.vector = vector; |
182 | cpumask_copy(d->domain, vector_cpumask); | 194 | cpumask_copy(d->domain, vector_cpumask); |
@@ -198,8 +210,14 @@ next_cpu: | |||
198 | return -ENOSPC; | 210 | return -ENOSPC; |
199 | 211 | ||
200 | success: | 212 | success: |
201 | /* cache destination APIC IDs into cfg->dest_apicid */ | 213 | /* |
202 | return apic->cpu_mask_to_apicid_and(mask, d->domain, &d->cfg.dest_apicid); | 214 | * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail |
215 | * as we already established, that mask & d->domain & cpu_online_mask | ||
216 | * is not empty. | ||
217 | */ | ||
218 | BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain, | ||
219 | &d->cfg.dest_apicid)); | ||
220 | return 0; | ||
203 | } | 221 | } |
204 | 222 | ||
205 | static int assign_irq_vector(int irq, struct apic_chip_data *data, | 223 | static int assign_irq_vector(int irq, struct apic_chip_data *data, |
@@ -409,6 +427,7 @@ int __init arch_early_irq_init(void) | |||
409 | arch_init_htirq_domain(x86_vector_domain); | 427 | arch_init_htirq_domain(x86_vector_domain); |
410 | 428 | ||
411 | BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL)); | 429 | BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL)); |
430 | BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); | ||
412 | BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL)); | 431 | BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL)); |
413 | 432 | ||
414 | return arch_early_ioapic_init(); | 433 | return arch_early_ioapic_init(); |
@@ -498,14 +517,7 @@ static int apic_set_affinity(struct irq_data *irq_data, | |||
498 | return -EINVAL; | 517 | return -EINVAL; |
499 | 518 | ||
500 | err = assign_irq_vector(irq, data, dest); | 519 | err = assign_irq_vector(irq, data, dest); |
501 | if (err) { | 520 | return err ? err : IRQ_SET_MASK_OK; |
502 | if (assign_irq_vector(irq, data, | ||
503 | irq_data_get_affinity_mask(irq_data))) | ||
504 | pr_err("Failed to recover vector for irq %d\n", irq); | ||
505 | return err; | ||
506 | } | ||
507 | |||
508 | return IRQ_SET_MASK_OK; | ||
509 | } | 521 | } |
510 | 522 | ||
511 | static struct irq_chip lapic_controller = { | 523 | static struct irq_chip lapic_controller = { |