aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-09-23 17:08:43 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-09-23 17:08:43 -0400
commitd7a4b414eed51f1653bb05ebe84122bf9a7ae18b (patch)
treebd6603a0c27de4c138a1767871897e9cd3e1a1d2 /kernel/smp.c
parent1f0ab40976460bc4673fa204ce917a725185d8f2 (diff)
parenta724eada8c2a7b62463b73ccf73fd0bb6e928aeb (diff)
Merge commit 'linus/master' into tracing/kprobes
Conflicts: kernel/trace/Makefile kernel/trace/trace.h kernel/trace/trace_event_types.h kernel/trace/trace_export.c Merge reason: Sync with latest significant tracing core changes.
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c69
1 files changed, 42 insertions, 27 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 94188b8ecc33..fd47a256a24e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -29,8 +29,7 @@ enum {
29 29
30struct call_function_data { 30struct call_function_data {
31 struct call_single_data csd; 31 struct call_single_data csd;
32 spinlock_t lock; 32 atomic_t refs;
33 unsigned int refs;
34 cpumask_var_t cpumask; 33 cpumask_var_t cpumask;
35}; 34};
36 35
@@ -39,9 +38,7 @@ struct call_single_queue {
39 spinlock_t lock; 38 spinlock_t lock;
40}; 39};
41 40
42static DEFINE_PER_CPU(struct call_function_data, cfd_data) = { 41static DEFINE_PER_CPU(struct call_function_data, cfd_data);
43 .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
44};
45 42
46static int 43static int
47hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) 44hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -177,6 +174,11 @@ void generic_smp_call_function_interrupt(void)
177 int cpu = get_cpu(); 174 int cpu = get_cpu();
178 175
179 /* 176 /*
177 * Shouldn't receive this interrupt on a cpu that is not yet online.
178 */
179 WARN_ON_ONCE(!cpu_online(cpu));
180
181 /*
180 * Ensure entry is visible on call_function_queue after we have 182 * Ensure entry is visible on call_function_queue after we have
181 * entered the IPI. See comment in smp_call_function_many. 183 * entered the IPI. See comment in smp_call_function_many.
182 * If we don't have this, then we may miss an entry on the list 184 * If we don't have this, then we may miss an entry on the list
@@ -191,25 +193,18 @@ void generic_smp_call_function_interrupt(void)
191 list_for_each_entry_rcu(data, &call_function.queue, csd.list) { 193 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
192 int refs; 194 int refs;
193 195
194 spin_lock(&data->lock); 196 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
195 if (!cpumask_test_cpu(cpu, data->cpumask)) {
196 spin_unlock(&data->lock);
197 continue; 197 continue;
198 }
199 cpumask_clear_cpu(cpu, data->cpumask);
200 spin_unlock(&data->lock);
201 198
202 data->csd.func(data->csd.info); 199 data->csd.func(data->csd.info);
203 200
204 spin_lock(&data->lock); 201 refs = atomic_dec_return(&data->refs);
205 WARN_ON(data->refs == 0); 202 WARN_ON(refs < 0);
206 refs = --data->refs;
207 if (!refs) { 203 if (!refs) {
208 spin_lock(&call_function.lock); 204 spin_lock(&call_function.lock);
209 list_del_rcu(&data->csd.list); 205 list_del_rcu(&data->csd.list);
210 spin_unlock(&call_function.lock); 206 spin_unlock(&call_function.lock);
211 } 207 }
212 spin_unlock(&data->lock);
213 208
214 if (refs) 209 if (refs)
215 continue; 210 continue;
@@ -230,6 +225,11 @@ void generic_smp_call_function_single_interrupt(void)
230 unsigned int data_flags; 225 unsigned int data_flags;
231 LIST_HEAD(list); 226 LIST_HEAD(list);
232 227
228 /*
229 * Shouldn't receive this interrupt on a cpu that is not yet online.
230 */
231 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
232
233 spin_lock(&q->lock); 233 spin_lock(&q->lock);
234 list_replace_init(&q->list, &list); 234 list_replace_init(&q->list, &list);
235 spin_unlock(&q->lock); 235 spin_unlock(&q->lock);
@@ -285,8 +285,14 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
285 */ 285 */
286 this_cpu = get_cpu(); 286 this_cpu = get_cpu();
287 287
288 /* Can deadlock when called with interrupts disabled */ 288 /*
289 WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); 289 * Can deadlock when called with interrupts disabled.
290 * We allow cpu's that are not yet online though, as no one else can
291 * send smp call function interrupt to this cpu and as such deadlocks
292 * can't happen.
293 */
294 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
295 && !oops_in_progress);
290 296
291 if (cpu == this_cpu) { 297 if (cpu == this_cpu) {
292 local_irq_save(flags); 298 local_irq_save(flags);
@@ -329,8 +335,14 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
329{ 335{
330 csd_lock(data); 336 csd_lock(data);
331 337
332 /* Can deadlock when called with interrupts disabled */ 338 /*
333 WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress); 339 * Can deadlock when called with interrupts disabled.
340 * We allow cpu's that are not yet online though, as no one else can
341 * send smp call function interrupt to this cpu and as such deadlocks
342 * can't happen.
343 */
344 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
345 && !oops_in_progress);
334 346
335 generic_exec_single(cpu, data, wait); 347 generic_exec_single(cpu, data, wait);
336} 348}
@@ -365,8 +377,14 @@ void smp_call_function_many(const struct cpumask *mask,
365 unsigned long flags; 377 unsigned long flags;
366 int cpu, next_cpu, this_cpu = smp_processor_id(); 378 int cpu, next_cpu, this_cpu = smp_processor_id();
367 379
368 /* Can deadlock when called with interrupts disabled */ 380 /*
369 WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); 381 * Can deadlock when called with interrupts disabled.
382 * We allow cpu's that are not yet online though, as no one else can
383 * send smp call function interrupt to this cpu and as such deadlocks
384 * can't happen.
385 */
386 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
387 && !oops_in_progress);
370 388
371 /* So, what's a CPU they want? Ignoring this one. */ 389 /* So, what's a CPU they want? Ignoring this one. */
372 cpu = cpumask_first_and(mask, cpu_online_mask); 390 cpu = cpumask_first_and(mask, cpu_online_mask);
@@ -391,23 +409,20 @@ void smp_call_function_many(const struct cpumask *mask,
391 data = &__get_cpu_var(cfd_data); 409 data = &__get_cpu_var(cfd_data);
392 csd_lock(&data->csd); 410 csd_lock(&data->csd);
393 411
394 spin_lock_irqsave(&data->lock, flags);
395 data->csd.func = func; 412 data->csd.func = func;
396 data->csd.info = info; 413 data->csd.info = info;
397 cpumask_and(data->cpumask, mask, cpu_online_mask); 414 cpumask_and(data->cpumask, mask, cpu_online_mask);
398 cpumask_clear_cpu(this_cpu, data->cpumask); 415 cpumask_clear_cpu(this_cpu, data->cpumask);
399 data->refs = cpumask_weight(data->cpumask); 416 atomic_set(&data->refs, cpumask_weight(data->cpumask));
400 417
401 spin_lock(&call_function.lock); 418 spin_lock_irqsave(&call_function.lock, flags);
402 /* 419 /*
403 * Place entry at the _HEAD_ of the list, so that any cpu still 420 * Place entry at the _HEAD_ of the list, so that any cpu still
404 * observing the entry in generic_smp_call_function_interrupt() 421 * observing the entry in generic_smp_call_function_interrupt()
405 * will not miss any other list entries: 422 * will not miss any other list entries:
406 */ 423 */
407 list_add_rcu(&data->csd.list, &call_function.queue); 424 list_add_rcu(&data->csd.list, &call_function.queue);
408 spin_unlock(&call_function.lock); 425 spin_unlock_irqrestore(&call_function.lock, flags);
409
410 spin_unlock_irqrestore(&data->lock, flags);
411 426
412 /* 427 /*
413 * Make the list addition visible before sending the ipi. 428 * Make the list addition visible before sending the ipi.