diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-03-01 02:55:20 -0500 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-03-01 02:55:20 -0500 |
| commit | 35858adbfca13678af99fb31618ef4428d6dedb0 (patch) | |
| tree | 3336feaa61324486945816cb52c347733e7c0821 /kernel/smp.c | |
| parent | 197d4db752e67160d79fed09968c2140376a80a3 (diff) | |
| parent | 4b70858ba8d4537daf782defebe5f2ff80ccef2b (diff) | |
Merge branch 'next' into for-linus
Diffstat (limited to 'kernel/smp.c')
| -rw-r--r-- | kernel/smp.c | 37 |
1 files changed, 18 insertions, 19 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index a8c76069cf50..f10408422444 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); | |||
| 16 | 16 | ||
| 17 | static struct { | 17 | static struct { |
| 18 | struct list_head queue; | 18 | struct list_head queue; |
| 19 | spinlock_t lock; | 19 | raw_spinlock_t lock; |
| 20 | } call_function __cacheline_aligned_in_smp = | 20 | } call_function __cacheline_aligned_in_smp = |
| 21 | { | 21 | { |
| 22 | .queue = LIST_HEAD_INIT(call_function.queue), | 22 | .queue = LIST_HEAD_INIT(call_function.queue), |
| 23 | .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), | 23 | .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock), |
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | enum { | 26 | enum { |
| @@ -35,7 +35,7 @@ struct call_function_data { | |||
| 35 | 35 | ||
| 36 | struct call_single_queue { | 36 | struct call_single_queue { |
| 37 | struct list_head list; | 37 | struct list_head list; |
| 38 | spinlock_t lock; | 38 | raw_spinlock_t lock; |
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| 41 | static DEFINE_PER_CPU(struct call_function_data, cfd_data); | 41 | static DEFINE_PER_CPU(struct call_function_data, cfd_data); |
| @@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void) | |||
| 80 | for_each_possible_cpu(i) { | 80 | for_each_possible_cpu(i) { |
| 81 | struct call_single_queue *q = &per_cpu(call_single_queue, i); | 81 | struct call_single_queue *q = &per_cpu(call_single_queue, i); |
| 82 | 82 | ||
| 83 | spin_lock_init(&q->lock); | 83 | raw_spin_lock_init(&q->lock); |
| 84 | INIT_LIST_HEAD(&q->list); | 84 | INIT_LIST_HEAD(&q->list); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| @@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait) | |||
| 141 | unsigned long flags; | 141 | unsigned long flags; |
| 142 | int ipi; | 142 | int ipi; |
| 143 | 143 | ||
| 144 | spin_lock_irqsave(&dst->lock, flags); | 144 | raw_spin_lock_irqsave(&dst->lock, flags); |
| 145 | ipi = list_empty(&dst->list); | 145 | ipi = list_empty(&dst->list); |
| 146 | list_add_tail(&data->list, &dst->list); | 146 | list_add_tail(&data->list, &dst->list); |
| 147 | spin_unlock_irqrestore(&dst->lock, flags); | 147 | raw_spin_unlock_irqrestore(&dst->lock, flags); |
| 148 | 148 | ||
| 149 | /* | 149 | /* |
| 150 | * The list addition should be visible before sending the IPI | 150 | * The list addition should be visible before sending the IPI |
| @@ -171,7 +171,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait) | |||
| 171 | void generic_smp_call_function_interrupt(void) | 171 | void generic_smp_call_function_interrupt(void) |
| 172 | { | 172 | { |
| 173 | struct call_function_data *data; | 173 | struct call_function_data *data; |
| 174 | int cpu = get_cpu(); | 174 | int cpu = smp_processor_id(); |
| 175 | 175 | ||
| 176 | /* | 176 | /* |
| 177 | * Shouldn't receive this interrupt on a cpu that is not yet online. | 177 | * Shouldn't receive this interrupt on a cpu that is not yet online. |
| @@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void) | |||
| 201 | refs = atomic_dec_return(&data->refs); | 201 | refs = atomic_dec_return(&data->refs); |
| 202 | WARN_ON(refs < 0); | 202 | WARN_ON(refs < 0); |
| 203 | if (!refs) { | 203 | if (!refs) { |
| 204 | spin_lock(&call_function.lock); | 204 | raw_spin_lock(&call_function.lock); |
| 205 | list_del_rcu(&data->csd.list); | 205 | list_del_rcu(&data->csd.list); |
| 206 | spin_unlock(&call_function.lock); | 206 | raw_spin_unlock(&call_function.lock); |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | if (refs) | 209 | if (refs) |
| @@ -212,7 +212,6 @@ void generic_smp_call_function_interrupt(void) | |||
| 212 | csd_unlock(&data->csd); | 212 | csd_unlock(&data->csd); |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | put_cpu(); | ||
| 216 | } | 215 | } |
| 217 | 216 | ||
| 218 | /* | 217 | /* |
| @@ -230,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void) | |||
| 230 | */ | 229 | */ |
| 231 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); | 230 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); |
| 232 | 231 | ||
| 233 | spin_lock(&q->lock); | 232 | raw_spin_lock(&q->lock); |
| 234 | list_replace_init(&q->list, &list); | 233 | list_replace_init(&q->list, &list); |
| 235 | spin_unlock(&q->lock); | 234 | raw_spin_unlock(&q->lock); |
| 236 | 235 | ||
| 237 | while (!list_empty(&list)) { | 236 | while (!list_empty(&list)) { |
| 238 | struct call_single_data *data; | 237 | struct call_single_data *data; |
| @@ -348,7 +347,7 @@ int smp_call_function_any(const struct cpumask *mask, | |||
| 348 | goto call; | 347 | goto call; |
| 349 | 348 | ||
| 350 | /* Try for same node. */ | 349 | /* Try for same node. */ |
| 351 | nodemask = cpumask_of_node(cpu); | 350 | nodemask = cpumask_of_node(cpu_to_node(cpu)); |
| 352 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; | 351 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; |
| 353 | cpu = cpumask_next_and(cpu, nodemask, mask)) { | 352 | cpu = cpumask_next_and(cpu, nodemask, mask)) { |
| 354 | if (cpu_online(cpu)) | 353 | if (cpu_online(cpu)) |
| @@ -449,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask, | |||
| 449 | cpumask_clear_cpu(this_cpu, data->cpumask); | 448 | cpumask_clear_cpu(this_cpu, data->cpumask); |
| 450 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); | 449 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
| 451 | 450 | ||
| 452 | spin_lock_irqsave(&call_function.lock, flags); | 451 | raw_spin_lock_irqsave(&call_function.lock, flags); |
| 453 | /* | 452 | /* |
| 454 | * Place entry at the _HEAD_ of the list, so that any cpu still | 453 | * Place entry at the _HEAD_ of the list, so that any cpu still |
| 455 | * observing the entry in generic_smp_call_function_interrupt() | 454 | * observing the entry in generic_smp_call_function_interrupt() |
| 456 | * will not miss any other list entries: | 455 | * will not miss any other list entries: |
| 457 | */ | 456 | */ |
| 458 | list_add_rcu(&data->csd.list, &call_function.queue); | 457 | list_add_rcu(&data->csd.list, &call_function.queue); |
| 459 | spin_unlock_irqrestore(&call_function.lock, flags); | 458 | raw_spin_unlock_irqrestore(&call_function.lock, flags); |
| 460 | 459 | ||
| 461 | /* | 460 | /* |
| 462 | * Make the list addition visible before sending the ipi. | 461 | * Make the list addition visible before sending the ipi. |
| @@ -501,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function); | |||
| 501 | 500 | ||
| 502 | void ipi_call_lock(void) | 501 | void ipi_call_lock(void) |
| 503 | { | 502 | { |
| 504 | spin_lock(&call_function.lock); | 503 | raw_spin_lock(&call_function.lock); |
| 505 | } | 504 | } |
| 506 | 505 | ||
| 507 | void ipi_call_unlock(void) | 506 | void ipi_call_unlock(void) |
| 508 | { | 507 | { |
| 509 | spin_unlock(&call_function.lock); | 508 | raw_spin_unlock(&call_function.lock); |
| 510 | } | 509 | } |
| 511 | 510 | ||
| 512 | void ipi_call_lock_irq(void) | 511 | void ipi_call_lock_irq(void) |
| 513 | { | 512 | { |
| 514 | spin_lock_irq(&call_function.lock); | 513 | raw_spin_lock_irq(&call_function.lock); |
| 515 | } | 514 | } |
| 516 | 515 | ||
| 517 | void ipi_call_unlock_irq(void) | 516 | void ipi_call_unlock_irq(void) |
| 518 | { | 517 | { |
| 519 | spin_unlock_irq(&call_function.lock); | 518 | raw_spin_unlock_irq(&call_function.lock); |
| 520 | } | 519 | } |
