diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 62 |
1 files changed, 52 insertions, 10 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 4ec30e06998..9910744f085 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -194,23 +194,52 @@ void generic_smp_call_function_interrupt(void) | |||
194 | */ | 194 | */ |
195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
196 | int refs; | 196 | int refs; |
197 | void (*func) (void *info); | ||
197 | 198 | ||
198 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) | 199 | /* |
200 | * Since we walk the list without any locks, we might | ||
201 | * see an entry that was completed, removed from the | ||
202 | * list and is in the process of being reused. | ||
203 | * | ||
204 | * We must check that the cpu is in the cpumask before | ||
205 | * checking the refs, and both must be set before | ||
206 | * executing the callback on this cpu. | ||
207 | */ | ||
208 | |||
209 | if (!cpumask_test_cpu(cpu, data->cpumask)) | ||
210 | continue; | ||
211 | |||
212 | smp_rmb(); | ||
213 | |||
214 | if (atomic_read(&data->refs) == 0) | ||
199 | continue; | 215 | continue; |
200 | 216 | ||
217 | func = data->csd.func; /* for later warn */ | ||
201 | data->csd.func(data->csd.info); | 218 | data->csd.func(data->csd.info); |
202 | 219 | ||
220 | /* | ||
221 | * If the cpu mask is not still set then it enabled interrupts, | ||
222 | * we took another smp interrupt, and executed the function | ||
223 | * twice on this cpu. In theory that copy decremented refs. | ||
224 | */ | ||
225 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { | ||
226 | WARN(1, "%pS enabled interrupts and double executed\n", | ||
227 | func); | ||
228 | continue; | ||
229 | } | ||
230 | |||
203 | refs = atomic_dec_return(&data->refs); | 231 | refs = atomic_dec_return(&data->refs); |
204 | WARN_ON(refs < 0); | 232 | WARN_ON(refs < 0); |
205 | if (!refs) { | ||
206 | raw_spin_lock(&call_function.lock); | ||
207 | list_del_rcu(&data->csd.list); | ||
208 | raw_spin_unlock(&call_function.lock); | ||
209 | } | ||
210 | 233 | ||
211 | if (refs) | 234 | if (refs) |
212 | continue; | 235 | continue; |
213 | 236 | ||
237 | WARN_ON(!cpumask_empty(data->cpumask)); | ||
238 | |||
239 | raw_spin_lock(&call_function.lock); | ||
240 | list_del_rcu(&data->csd.list); | ||
241 | raw_spin_unlock(&call_function.lock); | ||
242 | |||
214 | csd_unlock(&data->csd); | 243 | csd_unlock(&data->csd); |
215 | } | 244 | } |
216 | 245 | ||
@@ -430,7 +459,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
430 | * can't happen. | 459 | * can't happen. |
431 | */ | 460 | */ |
432 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 461 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
433 | && !oops_in_progress); | 462 | && !oops_in_progress && !early_boot_irqs_disabled); |
434 | 463 | ||
435 | /* So, what's a CPU they want? Ignoring this one. */ | 464 | /* So, what's a CPU they want? Ignoring this one. */ |
436 | cpu = cpumask_first_and(mask, cpu_online_mask); | 465 | cpu = cpumask_first_and(mask, cpu_online_mask); |
@@ -454,11 +483,21 @@ void smp_call_function_many(const struct cpumask *mask, | |||
454 | 483 | ||
455 | data = &__get_cpu_var(cfd_data); | 484 | data = &__get_cpu_var(cfd_data); |
456 | csd_lock(&data->csd); | 485 | csd_lock(&data->csd); |
486 | BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); | ||
457 | 487 | ||
458 | data->csd.func = func; | 488 | data->csd.func = func; |
459 | data->csd.info = info; | 489 | data->csd.info = info; |
460 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 490 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
461 | cpumask_clear_cpu(this_cpu, data->cpumask); | 491 | cpumask_clear_cpu(this_cpu, data->cpumask); |
492 | |||
493 | /* | ||
494 | * To ensure the interrupt handler gets an complete view | ||
495 | * we order the cpumask and refs writes and order the read | ||
496 | * of them in the interrupt handler. In addition we may | ||
497 | * only clear our own cpu bit from the mask. | ||
498 | */ | ||
499 | smp_wmb(); | ||
500 | |||
462 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); | 501 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
463 | 502 | ||
464 | raw_spin_lock_irqsave(&call_function.lock, flags); | 503 | raw_spin_lock_irqsave(&call_function.lock, flags); |
@@ -533,17 +572,20 @@ void ipi_call_unlock_irq(void) | |||
533 | #endif /* USE_GENERIC_SMP_HELPERS */ | 572 | #endif /* USE_GENERIC_SMP_HELPERS */ |
534 | 573 | ||
535 | /* | 574 | /* |
536 | * Call a function on all processors | 575 | * Call a function on all processors. May be used during early boot while |
576 | * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead | ||
577 | * of local_irq_disable/enable(). | ||
537 | */ | 578 | */ |
538 | int on_each_cpu(void (*func) (void *info), void *info, int wait) | 579 | int on_each_cpu(void (*func) (void *info), void *info, int wait) |
539 | { | 580 | { |
581 | unsigned long flags; | ||
540 | int ret = 0; | 582 | int ret = 0; |
541 | 583 | ||
542 | preempt_disable(); | 584 | preempt_disable(); |
543 | ret = smp_call_function(func, info, wait); | 585 | ret = smp_call_function(func, info, wait); |
544 | local_irq_disable(); | 586 | local_irq_save(flags); |
545 | func(info); | 587 | func(info); |
546 | local_irq_enable(); | 588 | local_irq_restore(flags); |
547 | preempt_enable(); | 589 | preempt_enable(); |
548 | return ret; | 590 | return ret; |
549 | } | 591 | } |