aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index ed6aacfcb7ef..1ba1ba4b42f8 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -194,6 +194,24 @@ void generic_smp_call_function_interrupt(void)
194 list_for_each_entry_rcu(data, &call_function.queue, csd.list) { 194 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
195 int refs; 195 int refs;
196 196
197 /*
198 * Since we walk the list without any locks, we might
199 * see an entry that was completed, removed from the
200 * list and is in the process of being reused.
201 *
202 * We must check that the cpu is in the cpumask before
203 * checking the refs, and both must be set before
204 * executing the callback on this cpu.
205 */
206
207 if (!cpumask_test_cpu(cpu, data->cpumask))
208 continue;
209
210 smp_rmb();
211
212 if (atomic_read(&data->refs) == 0)
213 continue;
214
197 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) 215 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
198 continue; 216 continue;
199 217
@@ -202,6 +220,8 @@ void generic_smp_call_function_interrupt(void)
202 refs = atomic_dec_return(&data->refs); 220 refs = atomic_dec_return(&data->refs);
203 WARN_ON(refs < 0); 221 WARN_ON(refs < 0);
204 if (!refs) { 222 if (!refs) {
223 WARN_ON(!cpumask_empty(data->cpumask));
224
205 raw_spin_lock(&call_function.lock); 225 raw_spin_lock(&call_function.lock);
206 list_del_rcu(&data->csd.list); 226 list_del_rcu(&data->csd.list);
207 raw_spin_unlock(&call_function.lock); 227 raw_spin_unlock(&call_function.lock);
@@ -453,11 +473,21 @@ void smp_call_function_many(const struct cpumask *mask,
453 473
454 data = &__get_cpu_var(cfd_data); 474 data = &__get_cpu_var(cfd_data);
455 csd_lock(&data->csd); 475 csd_lock(&data->csd);
476 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
456 477
457 data->csd.func = func; 478 data->csd.func = func;
458 data->csd.info = info; 479 data->csd.info = info;
459 cpumask_and(data->cpumask, mask, cpu_online_mask); 480 cpumask_and(data->cpumask, mask, cpu_online_mask);
460 cpumask_clear_cpu(this_cpu, data->cpumask); 481 cpumask_clear_cpu(this_cpu, data->cpumask);
482
483 /*
484 * To ensure the interrupt handler gets an complete view
485 * we order the cpumask and refs writes and order the read
486 * of them in the interrupt handler. In addition we may
487 * only clear our own cpu bit from the mask.
488 */
489 smp_wmb();
490
461 atomic_set(&data->refs, cpumask_weight(data->cpumask)); 491 atomic_set(&data->refs, cpumask_weight(data->cpumask));
462 492
463 raw_spin_lock_irqsave(&call_function.lock, flags); 493 raw_spin_lock_irqsave(&call_function.lock, flags);