aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c91
1 files changed, 65 insertions, 26 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index c9d1c7835c2f..de735a6637d0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
16 16
17static struct { 17static struct {
18 struct list_head queue; 18 struct list_head queue;
19 spinlock_t lock; 19 raw_spinlock_t lock;
20} call_function __cacheline_aligned_in_smp = 20} call_function __cacheline_aligned_in_smp =
21 { 21 {
22 .queue = LIST_HEAD_INIT(call_function.queue), 22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), 23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
24 }; 24 };
25 25
26enum { 26enum {
@@ -35,7 +35,7 @@ struct call_function_data {
35 35
36struct call_single_queue { 36struct call_single_queue {
37 struct list_head list; 37 struct list_head list;
38 spinlock_t lock; 38 raw_spinlock_t lock;
39}; 39};
40 40
41static DEFINE_PER_CPU(struct call_function_data, cfd_data); 41static DEFINE_PER_CPU(struct call_function_data, cfd_data);
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
80 for_each_possible_cpu(i) { 80 for_each_possible_cpu(i) {
81 struct call_single_queue *q = &per_cpu(call_single_queue, i); 81 struct call_single_queue *q = &per_cpu(call_single_queue, i);
82 82
83 spin_lock_init(&q->lock); 83 raw_spin_lock_init(&q->lock);
84 INIT_LIST_HEAD(&q->list); 84 INIT_LIST_HEAD(&q->list);
85 } 85 }
86 86
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
141 unsigned long flags; 141 unsigned long flags;
142 int ipi; 142 int ipi;
143 143
144 spin_lock_irqsave(&dst->lock, flags); 144 raw_spin_lock_irqsave(&dst->lock, flags);
145 ipi = list_empty(&dst->list); 145 ipi = list_empty(&dst->list);
146 list_add_tail(&data->list, &dst->list); 146 list_add_tail(&data->list, &dst->list);
147 spin_unlock_irqrestore(&dst->lock, flags); 147 raw_spin_unlock_irqrestore(&dst->lock, flags);
148 148
149 /* 149 /*
150 * The list addition should be visible before sending the IPI 150 * The list addition should be visible before sending the IPI
@@ -171,7 +171,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
171void generic_smp_call_function_interrupt(void) 171void generic_smp_call_function_interrupt(void)
172{ 172{
173 struct call_function_data *data; 173 struct call_function_data *data;
174 int cpu = get_cpu(); 174 int cpu = smp_processor_id();
175 175
176 /* 176 /*
177 * Shouldn't receive this interrupt on a cpu that is not yet online. 177 * Shouldn't receive this interrupt on a cpu that is not yet online.
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
201 refs = atomic_dec_return(&data->refs); 201 refs = atomic_dec_return(&data->refs);
202 WARN_ON(refs < 0); 202 WARN_ON(refs < 0);
203 if (!refs) { 203 if (!refs) {
204 spin_lock(&call_function.lock); 204 raw_spin_lock(&call_function.lock);
205 list_del_rcu(&data->csd.list); 205 list_del_rcu(&data->csd.list);
206 spin_unlock(&call_function.lock); 206 raw_spin_unlock(&call_function.lock);
207 } 207 }
208 208
209 if (refs) 209 if (refs)
@@ -212,7 +212,6 @@ void generic_smp_call_function_interrupt(void)
212 csd_unlock(&data->csd); 212 csd_unlock(&data->csd);
213 } 213 }
214 214
215 put_cpu();
216} 215}
217 216
218/* 217/*
@@ -230,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void)
230 */ 229 */
231 WARN_ON_ONCE(!cpu_online(smp_processor_id())); 230 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
232 231
233 spin_lock(&q->lock); 232 raw_spin_lock(&q->lock);
234 list_replace_init(&q->list, &list); 233 list_replace_init(&q->list, &list);
235 spin_unlock(&q->lock); 234 raw_spin_unlock(&q->lock);
236 235
237 while (!list_empty(&list)) { 236 while (!list_empty(&list)) {
238 struct call_single_data *data; 237 struct call_single_data *data;
@@ -265,9 +264,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
265 * @info: An arbitrary pointer to pass to the function. 264 * @info: An arbitrary pointer to pass to the function.
266 * @wait: If true, wait until function has completed on other CPUs. 265 * @wait: If true, wait until function has completed on other CPUs.
267 * 266 *
268 * Returns 0 on success, else a negative status code. Note that @wait 267 * Returns 0 on success, else a negative status code.
269 * will be implicitly turned on in case of allocation failures, since
270 * we fall back to on-stack allocation.
271 */ 268 */
272int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 269int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
273 int wait) 270 int wait)
@@ -321,6 +318,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
321} 318}
322EXPORT_SYMBOL(smp_call_function_single); 319EXPORT_SYMBOL(smp_call_function_single);
323 320
321/*
322 * smp_call_function_any - Run a function on any of the given cpus
323 * @mask: The mask of cpus it can run on.
324 * @func: The function to run. This must be fast and non-blocking.
325 * @info: An arbitrary pointer to pass to the function.
326 * @wait: If true, wait until function has completed.
327 *
328 * Returns 0 on success, else a negative status code (if no cpus were online).
329 * Note that @wait will be implicitly turned on in case of allocation failures,
330 * since we fall back to on-stack allocation.
331 *
332 * Selection preference:
333 * 1) current cpu if in @mask
334 * 2) any cpu of current node if in @mask
335 * 3) any other online cpu in @mask
336 */
337int smp_call_function_any(const struct cpumask *mask,
338 void (*func)(void *info), void *info, int wait)
339{
340 unsigned int cpu;
341 const struct cpumask *nodemask;
342 int ret;
343
344 /* Try for same CPU (cheapest) */
345 cpu = get_cpu();
346 if (cpumask_test_cpu(cpu, mask))
347 goto call;
348
349 /* Try for same node. */
350 nodemask = cpumask_of_node(cpu);
351 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
352 cpu = cpumask_next_and(cpu, nodemask, mask)) {
353 if (cpu_online(cpu))
354 goto call;
355 }
356
357 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
358 cpu = cpumask_any_and(mask, cpu_online_mask);
359call:
360 ret = smp_call_function_single(cpu, func, info, wait);
361 put_cpu();
362 return ret;
363}
364EXPORT_SYMBOL_GPL(smp_call_function_any);
365
324/** 366/**
325 * __smp_call_function_single(): Run a function on another CPU 367 * __smp_call_function_single(): Run a function on another CPU
326 * @cpu: The CPU to run on. 368 * @cpu: The CPU to run on.
@@ -355,9 +397,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
355 * @wait: If true, wait (atomically) until function has completed 397 * @wait: If true, wait (atomically) until function has completed
356 * on other CPUs. 398 * on other CPUs.
357 * 399 *
358 * If @wait is true, then returns once @func has returned. Note that @wait 400 * If @wait is true, then returns once @func has returned.
359 * will be implicitly turned on in case of allocation failures, since
360 * we fall back to on-stack allocation.
361 * 401 *
362 * You must not call this function with disabled interrupts or from a 402 * You must not call this function with disabled interrupts or from a
363 * hardware interrupt handler or from a bottom half handler. Preemption 403 * hardware interrupt handler or from a bottom half handler. Preemption
@@ -408,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask,
408 cpumask_clear_cpu(this_cpu, data->cpumask); 448 cpumask_clear_cpu(this_cpu, data->cpumask);
409 atomic_set(&data->refs, cpumask_weight(data->cpumask)); 449 atomic_set(&data->refs, cpumask_weight(data->cpumask));
410 450
411 spin_lock_irqsave(&call_function.lock, flags); 451 raw_spin_lock_irqsave(&call_function.lock, flags);
412 /* 452 /*
413 * Place entry at the _HEAD_ of the list, so that any cpu still 453 * Place entry at the _HEAD_ of the list, so that any cpu still
414 * observing the entry in generic_smp_call_function_interrupt() 454 * observing the entry in generic_smp_call_function_interrupt()
415 * will not miss any other list entries: 455 * will not miss any other list entries:
416 */ 456 */
417 list_add_rcu(&data->csd.list, &call_function.queue); 457 list_add_rcu(&data->csd.list, &call_function.queue);
418 spin_unlock_irqrestore(&call_function.lock, flags); 458 raw_spin_unlock_irqrestore(&call_function.lock, flags);
419 459
420 /* 460 /*
421 * Make the list addition visible before sending the ipi. 461 * Make the list addition visible before sending the ipi.
@@ -443,8 +483,7 @@ EXPORT_SYMBOL(smp_call_function_many);
443 * Returns 0. 483 * Returns 0.
444 * 484 *
445 * If @wait is true, then returns once @func has returned; otherwise 485 * If @wait is true, then returns once @func has returned; otherwise
446 * it returns just before the target cpu calls @func. In case of allocation 486 * it returns just before the target cpu calls @func.
447 * failure, @wait will be implicitly turned on.
448 * 487 *
449 * You must not call this function with disabled interrupts or from a 488 * You must not call this function with disabled interrupts or from a
450 * hardware interrupt handler or from a bottom half handler. 489 * hardware interrupt handler or from a bottom half handler.
@@ -461,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function);
461 500
462void ipi_call_lock(void) 501void ipi_call_lock(void)
463{ 502{
464 spin_lock(&call_function.lock); 503 raw_spin_lock(&call_function.lock);
465} 504}
466 505
467void ipi_call_unlock(void) 506void ipi_call_unlock(void)
468{ 507{
469 spin_unlock(&call_function.lock); 508 raw_spin_unlock(&call_function.lock);
470} 509}
471 510
472void ipi_call_lock_irq(void) 511void ipi_call_lock_irq(void)
473{ 512{
474 spin_lock_irq(&call_function.lock); 513 raw_spin_lock_irq(&call_function.lock);
475} 514}
476 515
477void ipi_call_unlock_irq(void) 516void ipi_call_unlock_irq(void)
478{ 517{
479 spin_unlock_irq(&call_function.lock); 518 raw_spin_unlock_irq(&call_function.lock);
480} 519}