diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2015-07-05 13:12:33 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-07 05:54:04 -0400 |
| commit | cbb24dc761d95fe39a7a122bb1b298e9604cae15 (patch) | |
| tree | 796028ad04f906c2c60f7fe998ed6bc7dc623d14 | |
| parent | 5a3f75e3f02836518ce49536e9c460ca8e1fa290 (diff) | |
x86/irq: Use proper locking in check_irq_vectors_for_cpu_disable()
It's unsafe to examine fields in the irq descriptor w/o holding the
descriptor lock. Add proper locking.
While at it add a comment why the vector check can run lock less
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: xiao jin <jin.xiao@intel.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Yanmin Zhang <yanmin_zhang@linux.intel.com>
Link: http://lkml.kernel.org/r/20150705171102.236544164@linutronix.de
| -rw-r--r-- | arch/x86/kernel/irq.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 88b366487b0e..85ca76e6241c 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
| @@ -347,14 +347,22 @@ int check_irq_vectors_for_cpu_disable(void) | |||
| 347 | if (!desc) | 347 | if (!desc) |
| 348 | continue; | 348 | continue; |
| 349 | 349 | ||
| 350 | /* | ||
| 351 | * Protect against concurrent action removal, | ||
| 352 | * affinity changes etc. | ||
| 353 | */ | ||
| 354 | raw_spin_lock(&desc->lock); | ||
| 350 | data = irq_desc_get_irq_data(desc); | 355 | data = irq_desc_get_irq_data(desc); |
| 351 | cpumask_copy(&affinity_new, data->affinity); | 356 | cpumask_copy(&affinity_new, data->affinity); |
| 352 | cpumask_clear_cpu(this_cpu, &affinity_new); | 357 | cpumask_clear_cpu(this_cpu, &affinity_new); |
| 353 | 358 | ||
| 354 | /* Do not count inactive or per-cpu irqs. */ | 359 | /* Do not count inactive or per-cpu irqs. */ |
| 355 | if (!irq_has_action(irq) || irqd_is_per_cpu(data)) | 360 | if (!irq_has_action(irq) || irqd_is_per_cpu(data)) { |
| 361 | raw_spin_unlock(&desc->lock); | ||
| 356 | continue; | 362 | continue; |
| 363 | } | ||
| 357 | 364 | ||
| 365 | raw_spin_unlock(&desc->lock); | ||
| 358 | /* | 366 | /* |
| 359 | * A single irq may be mapped to multiple | 367 | * A single irq may be mapped to multiple |
| 360 | * cpu's vector_irq[] (for example IOAPIC cluster | 368 | * cpu's vector_irq[] (for example IOAPIC cluster |
| @@ -385,6 +393,9 @@ int check_irq_vectors_for_cpu_disable(void) | |||
| 385 | * vector. If the vector is marked in the used vectors | 393 | * vector. If the vector is marked in the used vectors |
| 386 | * bitmap or an irq is assigned to it, we don't count | 394 | * bitmap or an irq is assigned to it, we don't count |
| 387 | * it as available. | 395 | * it as available. |
| 396 | * | ||
| 397 | * As this is an inaccurate snapshot anyway, we can do | ||
| 398 | * this w/o holding vector_lock. | ||
| 388 | */ | 399 | */ |
| 389 | for (vector = FIRST_EXTERNAL_VECTOR; | 400 | for (vector = FIRST_EXTERNAL_VECTOR; |
| 390 | vector < first_system_vector; vector++) { | 401 | vector < first_system_vector; vector++) { |
