aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/smp.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index a8c76069cf50..6e7c7fdcd9b4 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
16 16
17static struct { 17static struct {
18 struct list_head queue; 18 struct list_head queue;
19 spinlock_t lock; 19 raw_spinlock_t lock;
20} call_function __cacheline_aligned_in_smp = 20} call_function __cacheline_aligned_in_smp =
21 { 21 {
22 .queue = LIST_HEAD_INIT(call_function.queue), 22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), 23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
24 }; 24 };
25 25
26enum { 26enum {
@@ -35,7 +35,7 @@ struct call_function_data {
35 35
36struct call_single_queue { 36struct call_single_queue {
37 struct list_head list; 37 struct list_head list;
38 spinlock_t lock; 38 raw_spinlock_t lock;
39}; 39};
40 40
41static DEFINE_PER_CPU(struct call_function_data, cfd_data); 41static DEFINE_PER_CPU(struct call_function_data, cfd_data);
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
80 for_each_possible_cpu(i) { 80 for_each_possible_cpu(i) {
81 struct call_single_queue *q = &per_cpu(call_single_queue, i); 81 struct call_single_queue *q = &per_cpu(call_single_queue, i);
82 82
83 spin_lock_init(&q->lock); 83 raw_spin_lock_init(&q->lock);
84 INIT_LIST_HEAD(&q->list); 84 INIT_LIST_HEAD(&q->list);
85 } 85 }
86 86
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
141 unsigned long flags; 141 unsigned long flags;
142 int ipi; 142 int ipi;
143 143
144 spin_lock_irqsave(&dst->lock, flags); 144 raw_spin_lock_irqsave(&dst->lock, flags);
145 ipi = list_empty(&dst->list); 145 ipi = list_empty(&dst->list);
146 list_add_tail(&data->list, &dst->list); 146 list_add_tail(&data->list, &dst->list);
147 spin_unlock_irqrestore(&dst->lock, flags); 147 raw_spin_unlock_irqrestore(&dst->lock, flags);
148 148
149 /* 149 /*
150 * The list addition should be visible before sending the IPI 150 * The list addition should be visible before sending the IPI
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
201 refs = atomic_dec_return(&data->refs); 201 refs = atomic_dec_return(&data->refs);
202 WARN_ON(refs < 0); 202 WARN_ON(refs < 0);
203 if (!refs) { 203 if (!refs) {
204 spin_lock(&call_function.lock); 204 raw_spin_lock(&call_function.lock);
205 list_del_rcu(&data->csd.list); 205 list_del_rcu(&data->csd.list);
206 spin_unlock(&call_function.lock); 206 raw_spin_unlock(&call_function.lock);
207 } 207 }
208 208
209 if (refs) 209 if (refs)
@@ -230,9 +230,9 @@ void generic_smp_call_function_single_interrupt(void)
230 */ 230 */
231 WARN_ON_ONCE(!cpu_online(smp_processor_id())); 231 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
232 232
233 spin_lock(&q->lock); 233 raw_spin_lock(&q->lock);
234 list_replace_init(&q->list, &list); 234 list_replace_init(&q->list, &list);
235 spin_unlock(&q->lock); 235 raw_spin_unlock(&q->lock);
236 236
237 while (!list_empty(&list)) { 237 while (!list_empty(&list)) {
238 struct call_single_data *data; 238 struct call_single_data *data;
@@ -449,14 +449,14 @@ void smp_call_function_many(const struct cpumask *mask,
449 cpumask_clear_cpu(this_cpu, data->cpumask); 449 cpumask_clear_cpu(this_cpu, data->cpumask);
450 atomic_set(&data->refs, cpumask_weight(data->cpumask)); 450 atomic_set(&data->refs, cpumask_weight(data->cpumask));
451 451
452 spin_lock_irqsave(&call_function.lock, flags); 452 raw_spin_lock_irqsave(&call_function.lock, flags);
453 /* 453 /*
454 * Place entry at the _HEAD_ of the list, so that any cpu still 454 * Place entry at the _HEAD_ of the list, so that any cpu still
455 * observing the entry in generic_smp_call_function_interrupt() 455 * observing the entry in generic_smp_call_function_interrupt()
456 * will not miss any other list entries: 456 * will not miss any other list entries:
457 */ 457 */
458 list_add_rcu(&data->csd.list, &call_function.queue); 458 list_add_rcu(&data->csd.list, &call_function.queue);
459 spin_unlock_irqrestore(&call_function.lock, flags); 459 raw_spin_unlock_irqrestore(&call_function.lock, flags);
460 460
461 /* 461 /*
462 * Make the list addition visible before sending the ipi. 462 * Make the list addition visible before sending the ipi.
@@ -501,20 +501,20 @@ EXPORT_SYMBOL(smp_call_function);
501 501
502void ipi_call_lock(void) 502void ipi_call_lock(void)
503{ 503{
504 spin_lock(&call_function.lock); 504 raw_spin_lock(&call_function.lock);
505} 505}
506 506
507void ipi_call_unlock(void) 507void ipi_call_unlock(void)
508{ 508{
509 spin_unlock(&call_function.lock); 509 raw_spin_unlock(&call_function.lock);
510} 510}
511 511
512void ipi_call_lock_irq(void) 512void ipi_call_lock_irq(void)
513{ 513{
514 spin_lock_irq(&call_function.lock); 514 raw_spin_lock_irq(&call_function.lock);
515} 515}
516 516
517void ipi_call_unlock_irq(void) 517void ipi_call_unlock_irq(void)
518{ 518{
519 spin_unlock_irq(&call_function.lock); 519 raw_spin_unlock_irq(&call_function.lock);
520} 520}