diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 68 |
1 files changed, 14 insertions, 54 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index bd9f94028838..ffee35bef179 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -23,17 +23,11 @@ enum { | |||
23 | struct call_function_data { | 23 | struct call_function_data { |
24 | struct call_single_data __percpu *csd; | 24 | struct call_single_data __percpu *csd; |
25 | cpumask_var_t cpumask; | 25 | cpumask_var_t cpumask; |
26 | cpumask_var_t cpumask_ipi; | ||
27 | }; | 26 | }; |
28 | 27 | ||
29 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); | 28 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); |
30 | 29 | ||
31 | struct call_single_queue { | 30 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); |
32 | struct list_head list; | ||
33 | raw_spinlock_t lock; | ||
34 | }; | ||
35 | |||
36 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue); | ||
37 | 31 | ||
38 | static int | 32 | static int |
39 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | 33 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) |
@@ -47,14 +41,8 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
47 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, | 41 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, |
48 | cpu_to_node(cpu))) | 42 | cpu_to_node(cpu))) |
49 | return notifier_from_errno(-ENOMEM); | 43 | return notifier_from_errno(-ENOMEM); |
50 | if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, | ||
51 | cpu_to_node(cpu))) { | ||
52 | free_cpumask_var(cfd->cpumask); | ||
53 | return notifier_from_errno(-ENOMEM); | ||
54 | } | ||
55 | cfd->csd = alloc_percpu(struct call_single_data); | 44 | cfd->csd = alloc_percpu(struct call_single_data); |
56 | if (!cfd->csd) { | 45 | if (!cfd->csd) { |
57 | free_cpumask_var(cfd->cpumask_ipi); | ||
58 | free_cpumask_var(cfd->cpumask); | 46 | free_cpumask_var(cfd->cpumask); |
59 | return notifier_from_errno(-ENOMEM); | 47 | return notifier_from_errno(-ENOMEM); |
60 | } | 48 | } |
@@ -67,7 +55,6 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
67 | case CPU_DEAD: | 55 | case CPU_DEAD: |
68 | case CPU_DEAD_FROZEN: | 56 | case CPU_DEAD_FROZEN: |
69 | free_cpumask_var(cfd->cpumask); | 57 | free_cpumask_var(cfd->cpumask); |
70 | free_cpumask_var(cfd->cpumask_ipi); | ||
71 | free_percpu(cfd->csd); | 58 | free_percpu(cfd->csd); |
72 | break; | 59 | break; |
73 | #endif | 60 | #endif |
@@ -85,12 +72,8 @@ void __init call_function_init(void) | |||
85 | void *cpu = (void *)(long)smp_processor_id(); | 72 | void *cpu = (void *)(long)smp_processor_id(); |
86 | int i; | 73 | int i; |
87 | 74 | ||
88 | for_each_possible_cpu(i) { | 75 | for_each_possible_cpu(i) |
89 | struct call_single_queue *q = &per_cpu(call_single_queue, i); | 76 | init_llist_head(&per_cpu(call_single_queue, i)); |
90 | |||
91 | raw_spin_lock_init(&q->lock); | ||
92 | INIT_LIST_HEAD(&q->list); | ||
93 | } | ||
94 | 77 | ||
95 | hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); | 78 | hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); |
96 | register_cpu_notifier(&hotplug_cfd_notifier); | 79 | register_cpu_notifier(&hotplug_cfd_notifier); |
@@ -141,18 +124,9 @@ static void csd_unlock(struct call_single_data *csd) | |||
141 | */ | 124 | */ |
142 | static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | 125 | static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) |
143 | { | 126 | { |
144 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | ||
145 | unsigned long flags; | ||
146 | int ipi; | ||
147 | |||
148 | if (wait) | 127 | if (wait) |
149 | csd->flags |= CSD_FLAG_WAIT; | 128 | csd->flags |= CSD_FLAG_WAIT; |
150 | 129 | ||
151 | raw_spin_lock_irqsave(&dst->lock, flags); | ||
152 | ipi = list_empty(&dst->list); | ||
153 | list_add_tail(&csd->list, &dst->list); | ||
154 | raw_spin_unlock_irqrestore(&dst->lock, flags); | ||
155 | |||
156 | /* | 130 | /* |
157 | * The list addition should be visible before sending the IPI | 131 | * The list addition should be visible before sending the IPI |
158 | * handler locks the list to pull the entry off it because of | 132 | * handler locks the list to pull the entry off it because of |
@@ -164,7 +138,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
164 | * locking and barrier primitives. Generic code isn't really | 138 | * locking and barrier primitives. Generic code isn't really |
165 | * equipped to do the right thing... | 139 | * equipped to do the right thing... |
166 | */ | 140 | */ |
167 | if (ipi) | 141 | if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) |
168 | arch_send_call_function_single_ipi(cpu); | 142 | arch_send_call_function_single_ipi(cpu); |
169 | 143 | ||
170 | if (wait) | 144 | if (wait) |
@@ -177,27 +151,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
177 | */ | 151 | */ |
178 | void generic_smp_call_function_single_interrupt(void) | 152 | void generic_smp_call_function_single_interrupt(void) |
179 | { | 153 | { |
180 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); | 154 | struct llist_node *entry, *next; |
181 | LIST_HEAD(list); | ||
182 | 155 | ||
183 | /* | 156 | /* |
184 | * Shouldn't receive this interrupt on a cpu that is not yet online. | 157 | * Shouldn't receive this interrupt on a cpu that is not yet online. |
185 | */ | 158 | */ |
186 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); | 159 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); |
187 | 160 | ||
188 | raw_spin_lock(&q->lock); | 161 | entry = llist_del_all(&__get_cpu_var(call_single_queue)); |
189 | list_replace_init(&q->list, &list); | 162 | entry = llist_reverse_order(entry); |
190 | raw_spin_unlock(&q->lock); | ||
191 | 163 | ||
192 | while (!list_empty(&list)) { | 164 | while (entry) { |
193 | struct call_single_data *csd; | 165 | struct call_single_data *csd; |
194 | 166 | ||
195 | csd = list_entry(list.next, struct call_single_data, list); | 167 | next = entry->next; |
196 | list_del(&csd->list); | ||
197 | 168 | ||
169 | csd = llist_entry(entry, struct call_single_data, llist); | ||
198 | csd->func(csd->info); | 170 | csd->func(csd->info); |
199 | |||
200 | csd_unlock(csd); | 171 | csd_unlock(csd); |
172 | |||
173 | entry = next; | ||
201 | } | 174 | } |
202 | } | 175 | } |
203 | 176 | ||
@@ -402,30 +375,17 @@ void smp_call_function_many(const struct cpumask *mask, | |||
402 | if (unlikely(!cpumask_weight(cfd->cpumask))) | 375 | if (unlikely(!cpumask_weight(cfd->cpumask))) |
403 | return; | 376 | return; |
404 | 377 | ||
405 | /* | ||
406 | * After we put an entry into the list, cfd->cpumask may be cleared | ||
407 | * again when another CPU sends another IPI for a SMP function call, so | ||
408 | * cfd->cpumask will be zero. | ||
409 | */ | ||
410 | cpumask_copy(cfd->cpumask_ipi, cfd->cpumask); | ||
411 | |||
412 | for_each_cpu(cpu, cfd->cpumask) { | 378 | for_each_cpu(cpu, cfd->cpumask) { |
413 | struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); | 379 | struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); |
414 | struct call_single_queue *dst = | ||
415 | &per_cpu(call_single_queue, cpu); | ||
416 | unsigned long flags; | ||
417 | 380 | ||
418 | csd_lock(csd); | 381 | csd_lock(csd); |
419 | csd->func = func; | 382 | csd->func = func; |
420 | csd->info = info; | 383 | csd->info = info; |
421 | 384 | llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); | |
422 | raw_spin_lock_irqsave(&dst->lock, flags); | ||
423 | list_add_tail(&csd->list, &dst->list); | ||
424 | raw_spin_unlock_irqrestore(&dst->lock, flags); | ||
425 | } | 385 | } |
426 | 386 | ||
427 | /* Send a message to all CPUs in the map */ | 387 | /* Send a message to all CPUs in the map */ |
428 | arch_send_call_function_ipi_mask(cfd->cpumask_ipi); | 388 | arch_send_call_function_ipi_mask(cfd->cpumask); |
429 | 389 | ||
430 | if (wait) { | 390 | if (wait) { |
431 | for_each_cpu(cpu, cfd->cpumask) { | 391 | for_each_cpu(cpu, cfd->cpumask) { |