aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/smp.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 4ec30e069987..17c6e5860231 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -195,6 +195,24 @@ void generic_smp_call_function_interrupt(void)
195 list_for_each_entry_rcu(data, &call_function.queue, csd.list) { 195 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
196 int refs; 196 int refs;
197 197
198 /*
199 * Since we walk the list without any locks, we might
200 * see an entry that was completed, removed from the
201 * list and is in the process of being reused.
202 *
203 * We must check that the cpu is in the cpumask before
204 * checking the refs, and both must be set before
205 * executing the callback on this cpu.
206 */
207
208 if (!cpumask_test_cpu(cpu, data->cpumask))
209 continue;
210
211 smp_rmb();
212
213 if (atomic_read(&data->refs) == 0)
214 continue;
215
198 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) 216 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
199 continue; 217 continue;
200 218
@@ -203,6 +221,8 @@ void generic_smp_call_function_interrupt(void)
203 refs = atomic_dec_return(&data->refs); 221 refs = atomic_dec_return(&data->refs);
204 WARN_ON(refs < 0); 222 WARN_ON(refs < 0);
205 if (!refs) { 223 if (!refs) {
224 WARN_ON(!cpumask_empty(data->cpumask));
225
206 raw_spin_lock(&call_function.lock); 226 raw_spin_lock(&call_function.lock);
207 list_del_rcu(&data->csd.list); 227 list_del_rcu(&data->csd.list);
208 raw_spin_unlock(&call_function.lock); 228 raw_spin_unlock(&call_function.lock);
@@ -454,11 +474,21 @@ void smp_call_function_many(const struct cpumask *mask,
454 474
455 data = &__get_cpu_var(cfd_data); 475 data = &__get_cpu_var(cfd_data);
456 csd_lock(&data->csd); 476 csd_lock(&data->csd);
477 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
457 478
458 data->csd.func = func; 479 data->csd.func = func;
459 data->csd.info = info; 480 data->csd.info = info;
460 cpumask_and(data->cpumask, mask, cpu_online_mask); 481 cpumask_and(data->cpumask, mask, cpu_online_mask);
461 cpumask_clear_cpu(this_cpu, data->cpumask); 482 cpumask_clear_cpu(this_cpu, data->cpumask);
483
484 /*
485 * To ensure the interrupt handler gets an complete view
486 * we order the cpumask and refs writes and order the read
487 * of them in the interrupt handler. In addition we may
488 * only clear our own cpu bit from the mask.
489 */
490 smp_wmb();
491
462 atomic_set(&data->refs, cpumask_weight(data->cpumask)); 492 atomic_set(&data->refs, cpumask_weight(data->cpumask));
463 493
464 raw_spin_lock_irqsave(&call_function.lock, flags); 494 raw_spin_lock_irqsave(&call_function.lock, flags);