diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 36 |
1 files changed, 8 insertions, 28 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 8e218500ab14..c9d1c7835c2f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -29,8 +29,7 @@ enum { | |||
29 | 29 | ||
30 | struct call_function_data { | 30 | struct call_function_data { |
31 | struct call_single_data csd; | 31 | struct call_single_data csd; |
32 | spinlock_t lock; | 32 | atomic_t refs; |
33 | unsigned int refs; | ||
34 | cpumask_var_t cpumask; | 33 | cpumask_var_t cpumask; |
35 | }; | 34 | }; |
36 | 35 | ||
@@ -39,9 +38,7 @@ struct call_single_queue { | |||
39 | spinlock_t lock; | 38 | spinlock_t lock; |
40 | }; | 39 | }; |
41 | 40 | ||
42 | static DEFINE_PER_CPU(struct call_function_data, cfd_data) = { | 41 | static DEFINE_PER_CPU(struct call_function_data, cfd_data); |
43 | .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock), | ||
44 | }; | ||
45 | 42 | ||
46 | static int | 43 | static int |
47 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | 44 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) |
@@ -196,25 +193,18 @@ void generic_smp_call_function_interrupt(void) | |||
196 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 193 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
197 | int refs; | 194 | int refs; |
198 | 195 | ||
199 | spin_lock(&data->lock); | 196 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) |
200 | if (!cpumask_test_cpu(cpu, data->cpumask)) { | ||
201 | spin_unlock(&data->lock); | ||
202 | continue; | 197 | continue; |
203 | } | ||
204 | cpumask_clear_cpu(cpu, data->cpumask); | ||
205 | spin_unlock(&data->lock); | ||
206 | 198 | ||
207 | data->csd.func(data->csd.info); | 199 | data->csd.func(data->csd.info); |
208 | 200 | ||
209 | spin_lock(&data->lock); | 201 | refs = atomic_dec_return(&data->refs); |
210 | WARN_ON(data->refs == 0); | 202 | WARN_ON(refs < 0); |
211 | refs = --data->refs; | ||
212 | if (!refs) { | 203 | if (!refs) { |
213 | spin_lock(&call_function.lock); | 204 | spin_lock(&call_function.lock); |
214 | list_del_rcu(&data->csd.list); | 205 | list_del_rcu(&data->csd.list); |
215 | spin_unlock(&call_function.lock); | 206 | spin_unlock(&call_function.lock); |
216 | } | 207 | } |
217 | spin_unlock(&data->lock); | ||
218 | 208 | ||
219 | if (refs) | 209 | if (refs) |
220 | continue; | 210 | continue; |
@@ -357,13 +347,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
357 | generic_exec_single(cpu, data, wait); | 347 | generic_exec_single(cpu, data, wait); |
358 | } | 348 | } |
359 | 349 | ||
360 | /* Deprecated: shim for archs using old arch_send_call_function_ipi API. */ | ||
361 | |||
362 | #ifndef arch_send_call_function_ipi_mask | ||
363 | # define arch_send_call_function_ipi_mask(maskp) \ | ||
364 | arch_send_call_function_ipi(*(maskp)) | ||
365 | #endif | ||
366 | |||
367 | /** | 350 | /** |
368 | * smp_call_function_many(): Run a function on a set of other CPUs. | 351 | * smp_call_function_many(): Run a function on a set of other CPUs. |
369 | * @mask: The set of cpus to run on (only runs on online subset). | 352 | * @mask: The set of cpus to run on (only runs on online subset). |
@@ -419,23 +402,20 @@ void smp_call_function_many(const struct cpumask *mask, | |||
419 | data = &__get_cpu_var(cfd_data); | 402 | data = &__get_cpu_var(cfd_data); |
420 | csd_lock(&data->csd); | 403 | csd_lock(&data->csd); |
421 | 404 | ||
422 | spin_lock_irqsave(&data->lock, flags); | ||
423 | data->csd.func = func; | 405 | data->csd.func = func; |
424 | data->csd.info = info; | 406 | data->csd.info = info; |
425 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 407 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
426 | cpumask_clear_cpu(this_cpu, data->cpumask); | 408 | cpumask_clear_cpu(this_cpu, data->cpumask); |
427 | data->refs = cpumask_weight(data->cpumask); | 409 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
428 | 410 | ||
429 | spin_lock(&call_function.lock); | 411 | spin_lock_irqsave(&call_function.lock, flags); |
430 | /* | 412 | /* |
431 | * Place entry at the _HEAD_ of the list, so that any cpu still | 413 | * Place entry at the _HEAD_ of the list, so that any cpu still |
432 | * observing the entry in generic_smp_call_function_interrupt() | 414 | * observing the entry in generic_smp_call_function_interrupt() |
433 | * will not miss any other list entries: | 415 | * will not miss any other list entries: |
434 | */ | 416 | */ |
435 | list_add_rcu(&data->csd.list, &call_function.queue); | 417 | list_add_rcu(&data->csd.list, &call_function.queue); |
436 | spin_unlock(&call_function.lock); | 418 | spin_unlock_irqrestore(&call_function.lock, flags); |
437 | |||
438 | spin_unlock_irqrestore(&data->lock, flags); | ||
439 | 419 | ||
440 | /* | 420 | /* |
441 | * Make the list addition visible before sending the ipi. | 421 | * Make the list addition visible before sending the ipi. |