diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 76 |
1 files changed, 42 insertions, 34 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 94188b8ecc33..c9d1c7835c2f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -29,8 +29,7 @@ enum { | |||
29 | 29 | ||
30 | struct call_function_data { | 30 | struct call_function_data { |
31 | struct call_single_data csd; | 31 | struct call_single_data csd; |
32 | spinlock_t lock; | 32 | atomic_t refs; |
33 | unsigned int refs; | ||
34 | cpumask_var_t cpumask; | 33 | cpumask_var_t cpumask; |
35 | }; | 34 | }; |
36 | 35 | ||
@@ -39,9 +38,7 @@ struct call_single_queue { | |||
39 | spinlock_t lock; | 38 | spinlock_t lock; |
40 | }; | 39 | }; |
41 | 40 | ||
42 | static DEFINE_PER_CPU(struct call_function_data, cfd_data) = { | 41 | static DEFINE_PER_CPU(struct call_function_data, cfd_data); |
43 | .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock), | ||
44 | }; | ||
45 | 42 | ||
46 | static int | 43 | static int |
47 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | 44 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) |
@@ -177,6 +174,11 @@ void generic_smp_call_function_interrupt(void) | |||
177 | int cpu = get_cpu(); | 174 | int cpu = get_cpu(); |
178 | 175 | ||
179 | /* | 176 | /* |
177 | * Shouldn't receive this interrupt on a cpu that is not yet online. | ||
178 | */ | ||
179 | WARN_ON_ONCE(!cpu_online(cpu)); | ||
180 | |||
181 | /* | ||
180 | * Ensure entry is visible on call_function_queue after we have | 182 | * Ensure entry is visible on call_function_queue after we have |
181 | * entered the IPI. See comment in smp_call_function_many. | 183 | * entered the IPI. See comment in smp_call_function_many. |
182 | * If we don't have this, then we may miss an entry on the list | 184 | * If we don't have this, then we may miss an entry on the list |
@@ -191,25 +193,18 @@ void generic_smp_call_function_interrupt(void) | |||
191 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 193 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
192 | int refs; | 194 | int refs; |
193 | 195 | ||
194 | spin_lock(&data->lock); | 196 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) |
195 | if (!cpumask_test_cpu(cpu, data->cpumask)) { | ||
196 | spin_unlock(&data->lock); | ||
197 | continue; | 197 | continue; |
198 | } | ||
199 | cpumask_clear_cpu(cpu, data->cpumask); | ||
200 | spin_unlock(&data->lock); | ||
201 | 198 | ||
202 | data->csd.func(data->csd.info); | 199 | data->csd.func(data->csd.info); |
203 | 200 | ||
204 | spin_lock(&data->lock); | 201 | refs = atomic_dec_return(&data->refs); |
205 | WARN_ON(data->refs == 0); | 202 | WARN_ON(refs < 0); |
206 | refs = --data->refs; | ||
207 | if (!refs) { | 203 | if (!refs) { |
208 | spin_lock(&call_function.lock); | 204 | spin_lock(&call_function.lock); |
209 | list_del_rcu(&data->csd.list); | 205 | list_del_rcu(&data->csd.list); |
210 | spin_unlock(&call_function.lock); | 206 | spin_unlock(&call_function.lock); |
211 | } | 207 | } |
212 | spin_unlock(&data->lock); | ||
213 | 208 | ||
214 | if (refs) | 209 | if (refs) |
215 | continue; | 210 | continue; |
@@ -230,6 +225,11 @@ void generic_smp_call_function_single_interrupt(void) | |||
230 | unsigned int data_flags; | 225 | unsigned int data_flags; |
231 | LIST_HEAD(list); | 226 | LIST_HEAD(list); |
232 | 227 | ||
228 | /* | ||
229 | * Shouldn't receive this interrupt on a cpu that is not yet online. | ||
230 | */ | ||
231 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); | ||
232 | |||
233 | spin_lock(&q->lock); | 233 | spin_lock(&q->lock); |
234 | list_replace_init(&q->list, &list); | 234 | list_replace_init(&q->list, &list); |
235 | spin_unlock(&q->lock); | 235 | spin_unlock(&q->lock); |
@@ -285,8 +285,14 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
285 | */ | 285 | */ |
286 | this_cpu = get_cpu(); | 286 | this_cpu = get_cpu(); |
287 | 287 | ||
288 | /* Can deadlock when called with interrupts disabled */ | 288 | /* |
289 | WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); | 289 | * Can deadlock when called with interrupts disabled. |
290 | * We allow cpu's that are not yet online though, as no one else can | ||
291 | * send smp call function interrupt to this cpu and as such deadlocks | ||
292 | * can't happen. | ||
293 | */ | ||
294 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | ||
295 | && !oops_in_progress); | ||
290 | 296 | ||
291 | if (cpu == this_cpu) { | 297 | if (cpu == this_cpu) { |
292 | local_irq_save(flags); | 298 | local_irq_save(flags); |
@@ -329,19 +335,18 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
329 | { | 335 | { |
330 | csd_lock(data); | 336 | csd_lock(data); |
331 | 337 | ||
332 | /* Can deadlock when called with interrupts disabled */ | 338 | /* |
333 | WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress); | 339 | * Can deadlock when called with interrupts disabled. |
340 | * We allow cpu's that are not yet online though, as no one else can | ||
341 | * send smp call function interrupt to this cpu and as such deadlocks | ||
342 | * can't happen. | ||
343 | */ | ||
344 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | ||
345 | && !oops_in_progress); | ||
334 | 346 | ||
335 | generic_exec_single(cpu, data, wait); | 347 | generic_exec_single(cpu, data, wait); |
336 | } | 348 | } |
337 | 349 | ||
338 | /* Deprecated: shim for archs using old arch_send_call_function_ipi API. */ | ||
339 | |||
340 | #ifndef arch_send_call_function_ipi_mask | ||
341 | # define arch_send_call_function_ipi_mask(maskp) \ | ||
342 | arch_send_call_function_ipi(*(maskp)) | ||
343 | #endif | ||
344 | |||
345 | /** | 350 | /** |
346 | * smp_call_function_many(): Run a function on a set of other CPUs. | 351 | * smp_call_function_many(): Run a function on a set of other CPUs. |
347 | * @mask: The set of cpus to run on (only runs on online subset). | 352 | * @mask: The set of cpus to run on (only runs on online subset). |
@@ -365,8 +370,14 @@ void smp_call_function_many(const struct cpumask *mask, | |||
365 | unsigned long flags; | 370 | unsigned long flags; |
366 | int cpu, next_cpu, this_cpu = smp_processor_id(); | 371 | int cpu, next_cpu, this_cpu = smp_processor_id(); |
367 | 372 | ||
368 | /* Can deadlock when called with interrupts disabled */ | 373 | /* |
369 | WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); | 374 | * Can deadlock when called with interrupts disabled. |
375 | * We allow cpu's that are not yet online though, as no one else can | ||
376 | * send smp call function interrupt to this cpu and as such deadlocks | ||
377 | * can't happen. | ||
378 | */ | ||
379 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | ||
380 | && !oops_in_progress); | ||
370 | 381 | ||
371 | /* So, what's a CPU they want? Ignoring this one. */ | 382 | /* So, what's a CPU they want? Ignoring this one. */ |
372 | cpu = cpumask_first_and(mask, cpu_online_mask); | 383 | cpu = cpumask_first_and(mask, cpu_online_mask); |
@@ -391,23 +402,20 @@ void smp_call_function_many(const struct cpumask *mask, | |||
391 | data = &__get_cpu_var(cfd_data); | 402 | data = &__get_cpu_var(cfd_data); |
392 | csd_lock(&data->csd); | 403 | csd_lock(&data->csd); |
393 | 404 | ||
394 | spin_lock_irqsave(&data->lock, flags); | ||
395 | data->csd.func = func; | 405 | data->csd.func = func; |
396 | data->csd.info = info; | 406 | data->csd.info = info; |
397 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 407 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
398 | cpumask_clear_cpu(this_cpu, data->cpumask); | 408 | cpumask_clear_cpu(this_cpu, data->cpumask); |
399 | data->refs = cpumask_weight(data->cpumask); | 409 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
400 | 410 | ||
401 | spin_lock(&call_function.lock); | 411 | spin_lock_irqsave(&call_function.lock, flags); |
402 | /* | 412 | /* |
403 | * Place entry at the _HEAD_ of the list, so that any cpu still | 413 | * Place entry at the _HEAD_ of the list, so that any cpu still |
404 | * observing the entry in generic_smp_call_function_interrupt() | 414 | * observing the entry in generic_smp_call_function_interrupt() |
405 | * will not miss any other list entries: | 415 | * will not miss any other list entries: |
406 | */ | 416 | */ |
407 | list_add_rcu(&data->csd.list, &call_function.queue); | 417 | list_add_rcu(&data->csd.list, &call_function.queue); |
408 | spin_unlock(&call_function.lock); | 418 | spin_unlock_irqrestore(&call_function.lock, flags); |
409 | |||
410 | spin_unlock_irqrestore(&data->lock, flags); | ||
411 | 419 | ||
412 | /* | 420 | /* |
413 | * Make the list addition visible before sending the ipi. | 421 | * Make the list addition visible before sending the ipi. |