diff options
Diffstat (limited to 'kernel/smp.c')
| -rw-r--r-- | kernel/smp.c | 51 |
1 files changed, 45 insertions, 6 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 4ec30e069987..2fe66f7c617a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -194,23 +194,52 @@ void generic_smp_call_function_interrupt(void) | |||
| 194 | */ | 194 | */ |
| 195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
| 196 | int refs; | 196 | int refs; |
| 197 | void (*func) (void *info); | ||
| 197 | 198 | ||
| 198 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) | 199 | /* |
| 200 | * Since we walk the list without any locks, we might | ||
| 201 | * see an entry that was completed, removed from the | ||
| 202 | * list and is in the process of being reused. | ||
| 203 | * | ||
| 204 | * We must check that the cpu is in the cpumask before | ||
| 205 | * checking the refs, and both must be set before | ||
| 206 | * executing the callback on this cpu. | ||
| 207 | */ | ||
| 208 | |||
| 209 | if (!cpumask_test_cpu(cpu, data->cpumask)) | ||
| 199 | continue; | 210 | continue; |
| 200 | 211 | ||
| 212 | smp_rmb(); | ||
| 213 | |||
| 214 | if (atomic_read(&data->refs) == 0) | ||
| 215 | continue; | ||
| 216 | |||
| 217 | func = data->csd.func; /* for later warn */ | ||
| 201 | data->csd.func(data->csd.info); | 218 | data->csd.func(data->csd.info); |
| 202 | 219 | ||
| 220 | /* | ||
| 221 | * If the cpu mask is not still set then it enabled interrupts, | ||
| 222 | * we took another smp interrupt, and executed the function | ||
| 223 | * twice on this cpu. In theory that copy decremented refs. | ||
| 224 | */ | ||
| 225 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { | ||
| 226 | WARN(1, "%pS enabled interrupts and double executed\n", | ||
| 227 | func); | ||
| 228 | continue; | ||
| 229 | } | ||
| 230 | |||
| 203 | refs = atomic_dec_return(&data->refs); | 231 | refs = atomic_dec_return(&data->refs); |
| 204 | WARN_ON(refs < 0); | 232 | WARN_ON(refs < 0); |
| 205 | if (!refs) { | ||
| 206 | raw_spin_lock(&call_function.lock); | ||
| 207 | list_del_rcu(&data->csd.list); | ||
| 208 | raw_spin_unlock(&call_function.lock); | ||
| 209 | } | ||
| 210 | 233 | ||
| 211 | if (refs) | 234 | if (refs) |
| 212 | continue; | 235 | continue; |
| 213 | 236 | ||
| 237 | WARN_ON(!cpumask_empty(data->cpumask)); | ||
| 238 | |||
| 239 | raw_spin_lock(&call_function.lock); | ||
| 240 | list_del_rcu(&data->csd.list); | ||
| 241 | raw_spin_unlock(&call_function.lock); | ||
| 242 | |||
| 214 | csd_unlock(&data->csd); | 243 | csd_unlock(&data->csd); |
| 215 | } | 244 | } |
| 216 | 245 | ||
| @@ -454,11 +483,21 @@ void smp_call_function_many(const struct cpumask *mask, | |||
| 454 | 483 | ||
| 455 | data = &__get_cpu_var(cfd_data); | 484 | data = &__get_cpu_var(cfd_data); |
| 456 | csd_lock(&data->csd); | 485 | csd_lock(&data->csd); |
| 486 | BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); | ||
| 457 | 487 | ||
| 458 | data->csd.func = func; | 488 | data->csd.func = func; |
| 459 | data->csd.info = info; | 489 | data->csd.info = info; |
| 460 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 490 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
| 461 | cpumask_clear_cpu(this_cpu, data->cpumask); | 491 | cpumask_clear_cpu(this_cpu, data->cpumask); |
| 492 | |||
| 493 | /* | ||
| 494 | * To ensure the interrupt handler gets an complete view | ||
| 495 | * we order the cpumask and refs writes and order the read | ||
| 496 | * of them in the interrupt handler. In addition we may | ||
| 497 | * only clear our own cpu bit from the mask. | ||
| 498 | */ | ||
| 499 | smp_wmb(); | ||
| 500 | |||
| 462 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); | 501 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
| 463 | 502 | ||
| 464 | raw_spin_lock_irqsave(&call_function.lock, flags); | 503 | raw_spin_lock_irqsave(&call_function.lock, flags); |
