diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 75 |
1 files changed, 68 insertions, 7 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 12ed8b013e2d..9910744f0856 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | 15 | ||
16 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
16 | static struct { | 17 | static struct { |
17 | struct list_head queue; | 18 | struct list_head queue; |
18 | raw_spinlock_t lock; | 19 | raw_spinlock_t lock; |
@@ -193,23 +194,52 @@ void generic_smp_call_function_interrupt(void) | |||
193 | */ | 194 | */ |
194 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
195 | int refs; | 196 | int refs; |
197 | void (*func) (void *info); | ||
196 | 198 | ||
197 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) | 199 | /* |
200 | * Since we walk the list without any locks, we might | ||
201 | * see an entry that was completed, removed from the | ||
202 | * list and is in the process of being reused. | ||
203 | * | ||
204 | * We must check that the cpu is in the cpumask before | ||
205 | * checking the refs, and both must be set before | ||
206 | * executing the callback on this cpu. | ||
207 | */ | ||
208 | |||
209 | if (!cpumask_test_cpu(cpu, data->cpumask)) | ||
210 | continue; | ||
211 | |||
212 | smp_rmb(); | ||
213 | |||
214 | if (atomic_read(&data->refs) == 0) | ||
198 | continue; | 215 | continue; |
199 | 216 | ||
217 | func = data->csd.func; /* for later warn */ | ||
200 | data->csd.func(data->csd.info); | 218 | data->csd.func(data->csd.info); |
201 | 219 | ||
220 | /* | ||
221 | * If the cpu mask is not still set then it enabled interrupts, | ||
222 | * we took another smp interrupt, and executed the function | ||
223 | * twice on this cpu. In theory that copy decremented refs. | ||
224 | */ | ||
225 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { | ||
226 | WARN(1, "%pS enabled interrupts and double executed\n", | ||
227 | func); | ||
228 | continue; | ||
229 | } | ||
230 | |||
202 | refs = atomic_dec_return(&data->refs); | 231 | refs = atomic_dec_return(&data->refs); |
203 | WARN_ON(refs < 0); | 232 | WARN_ON(refs < 0); |
204 | if (!refs) { | ||
205 | raw_spin_lock(&call_function.lock); | ||
206 | list_del_rcu(&data->csd.list); | ||
207 | raw_spin_unlock(&call_function.lock); | ||
208 | } | ||
209 | 233 | ||
210 | if (refs) | 234 | if (refs) |
211 | continue; | 235 | continue; |
212 | 236 | ||
237 | WARN_ON(!cpumask_empty(data->cpumask)); | ||
238 | |||
239 | raw_spin_lock(&call_function.lock); | ||
240 | list_del_rcu(&data->csd.list); | ||
241 | raw_spin_unlock(&call_function.lock); | ||
242 | |||
213 | csd_unlock(&data->csd); | 243 | csd_unlock(&data->csd); |
214 | } | 244 | } |
215 | 245 | ||
@@ -429,7 +459,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
429 | * can't happen. | 459 | * can't happen. |
430 | */ | 460 | */ |
431 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 461 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
432 | && !oops_in_progress); | 462 | && !oops_in_progress && !early_boot_irqs_disabled); |
433 | 463 | ||
434 | /* So, what's a CPU they want? Ignoring this one. */ | 464 | /* So, what's a CPU they want? Ignoring this one. */ |
435 | cpu = cpumask_first_and(mask, cpu_online_mask); | 465 | cpu = cpumask_first_and(mask, cpu_online_mask); |
@@ -453,11 +483,21 @@ void smp_call_function_many(const struct cpumask *mask, | |||
453 | 483 | ||
454 | data = &__get_cpu_var(cfd_data); | 484 | data = &__get_cpu_var(cfd_data); |
455 | csd_lock(&data->csd); | 485 | csd_lock(&data->csd); |
486 | BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); | ||
456 | 487 | ||
457 | data->csd.func = func; | 488 | data->csd.func = func; |
458 | data->csd.info = info; | 489 | data->csd.info = info; |
459 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 490 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
460 | cpumask_clear_cpu(this_cpu, data->cpumask); | 491 | cpumask_clear_cpu(this_cpu, data->cpumask); |
492 | |||
493 | /* | ||
494 | * To ensure the interrupt handler gets an complete view | ||
495 | * we order the cpumask and refs writes and order the read | ||
496 | * of them in the interrupt handler. In addition we may | ||
497 | * only clear our own cpu bit from the mask. | ||
498 | */ | ||
499 | smp_wmb(); | ||
500 | |||
461 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); | 501 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
462 | 502 | ||
463 | raw_spin_lock_irqsave(&call_function.lock, flags); | 503 | raw_spin_lock_irqsave(&call_function.lock, flags); |
@@ -529,3 +569,24 @@ void ipi_call_unlock_irq(void) | |||
529 | { | 569 | { |
530 | raw_spin_unlock_irq(&call_function.lock); | 570 | raw_spin_unlock_irq(&call_function.lock); |
531 | } | 571 | } |
572 | #endif /* USE_GENERIC_SMP_HELPERS */ | ||
573 | |||
574 | /* | ||
575 | * Call a function on all processors. May be used during early boot while | ||
576 | * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead | ||
577 | * of local_irq_disable/enable(). | ||
578 | */ | ||
579 | int on_each_cpu(void (*func) (void *info), void *info, int wait) | ||
580 | { | ||
581 | unsigned long flags; | ||
582 | int ret = 0; | ||
583 | |||
584 | preempt_disable(); | ||
585 | ret = smp_call_function(func, info, wait); | ||
586 | local_irq_save(flags); | ||
587 | func(info); | ||
588 | local_irq_restore(flags); | ||
589 | preempt_enable(); | ||
590 | return ret; | ||
591 | } | ||
592 | EXPORT_SYMBOL(on_each_cpu); | ||