diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 51 |
1 files changed, 13 insertions, 38 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index bd9f94028838..4ad913e7c253 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -28,12 +28,7 @@ struct call_function_data { | |||
28 | 28 | ||
29 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); | 29 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); |
30 | 30 | ||
31 | struct call_single_queue { | 31 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); |
32 | struct list_head list; | ||
33 | raw_spinlock_t lock; | ||
34 | }; | ||
35 | |||
36 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue); | ||
37 | 32 | ||
38 | static int | 33 | static int |
39 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | 34 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) |
@@ -85,12 +80,8 @@ void __init call_function_init(void) | |||
85 | void *cpu = (void *)(long)smp_processor_id(); | 80 | void *cpu = (void *)(long)smp_processor_id(); |
86 | int i; | 81 | int i; |
87 | 82 | ||
88 | for_each_possible_cpu(i) { | 83 | for_each_possible_cpu(i) |
89 | struct call_single_queue *q = &per_cpu(call_single_queue, i); | 84 | init_llist_head(&per_cpu(call_single_queue, i)); |
90 | |||
91 | raw_spin_lock_init(&q->lock); | ||
92 | INIT_LIST_HEAD(&q->list); | ||
93 | } | ||
94 | 85 | ||
95 | hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); | 86 | hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); |
96 | register_cpu_notifier(&hotplug_cfd_notifier); | 87 | register_cpu_notifier(&hotplug_cfd_notifier); |
@@ -141,18 +132,9 @@ static void csd_unlock(struct call_single_data *csd) | |||
141 | */ | 132 | */ |
142 | static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | 133 | static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) |
143 | { | 134 | { |
144 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | ||
145 | unsigned long flags; | ||
146 | int ipi; | ||
147 | |||
148 | if (wait) | 135 | if (wait) |
149 | csd->flags |= CSD_FLAG_WAIT; | 136 | csd->flags |= CSD_FLAG_WAIT; |
150 | 137 | ||
151 | raw_spin_lock_irqsave(&dst->lock, flags); | ||
152 | ipi = list_empty(&dst->list); | ||
153 | list_add_tail(&csd->list, &dst->list); | ||
154 | raw_spin_unlock_irqrestore(&dst->lock, flags); | ||
155 | |||
156 | /* | 138 | /* |
157 | * The list addition should be visible before sending the IPI | 139 | * The list addition should be visible before sending the IPI |
158 | * handler locks the list to pull the entry off it because of | 140 | * handler locks the list to pull the entry off it because of |
@@ -164,7 +146,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
164 | * locking and barrier primitives. Generic code isn't really | 146 | * locking and barrier primitives. Generic code isn't really |
165 | * equipped to do the right thing... | 147 | * equipped to do the right thing... |
166 | */ | 148 | */ |
167 | if (ipi) | 149 | if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) |
168 | arch_send_call_function_single_ipi(cpu); | 150 | arch_send_call_function_single_ipi(cpu); |
169 | 151 | ||
170 | if (wait) | 152 | if (wait) |
@@ -177,27 +159,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) | |||
177 | */ | 159 | */ |
178 | void generic_smp_call_function_single_interrupt(void) | 160 | void generic_smp_call_function_single_interrupt(void) |
179 | { | 161 | { |
180 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); | 162 | struct llist_node *entry, *next; |
181 | LIST_HEAD(list); | ||
182 | 163 | ||
183 | /* | 164 | /* |
184 | * Shouldn't receive this interrupt on a cpu that is not yet online. | 165 | * Shouldn't receive this interrupt on a cpu that is not yet online. |
185 | */ | 166 | */ |
186 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); | 167 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); |
187 | 168 | ||
188 | raw_spin_lock(&q->lock); | 169 | entry = llist_del_all(&__get_cpu_var(call_single_queue)); |
189 | list_replace_init(&q->list, &list); | 170 | entry = llist_reverse_order(entry); |
190 | raw_spin_unlock(&q->lock); | ||
191 | 171 | ||
192 | while (!list_empty(&list)) { | 172 | while (entry) { |
193 | struct call_single_data *csd; | 173 | struct call_single_data *csd; |
194 | 174 | ||
195 | csd = list_entry(list.next, struct call_single_data, list); | 175 | next = entry->next; |
196 | list_del(&csd->list); | ||
197 | 176 | ||
177 | csd = llist_entry(entry, struct call_single_data, llist); | ||
198 | csd->func(csd->info); | 178 | csd->func(csd->info); |
199 | |||
200 | csd_unlock(csd); | 179 | csd_unlock(csd); |
180 | |||
181 | entry = next; | ||
201 | } | 182 | } |
202 | } | 183 | } |
203 | 184 | ||
@@ -411,17 +392,11 @@ void smp_call_function_many(const struct cpumask *mask, | |||
411 | 392 | ||
412 | for_each_cpu(cpu, cfd->cpumask) { | 393 | for_each_cpu(cpu, cfd->cpumask) { |
413 | struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); | 394 | struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); |
414 | struct call_single_queue *dst = | ||
415 | &per_cpu(call_single_queue, cpu); | ||
416 | unsigned long flags; | ||
417 | 395 | ||
418 | csd_lock(csd); | 396 | csd_lock(csd); |
419 | csd->func = func; | 397 | csd->func = func; |
420 | csd->info = info; | 398 | csd->info = info; |
421 | 399 | llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); | |
422 | raw_spin_lock_irqsave(&dst->lock, flags); | ||
423 | list_add_tail(&csd->list, &dst->list); | ||
424 | raw_spin_unlock_irqrestore(&dst->lock, flags); | ||
425 | } | 400 | } |
426 | 401 | ||
427 | /* Send a message to all CPUs in the map */ | 402 | /* Send a message to all CPUs in the map */ |