diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 57 |
1 files changed, 49 insertions, 8 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 306f8180b0d5..80c33f8de14f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -29,6 +29,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); | |||
29 | 29 | ||
30 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); | 30 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); |
31 | 31 | ||
32 | static void flush_smp_call_function_queue(bool warn_cpu_offline); | ||
33 | |||
32 | static int | 34 | static int |
33 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | 35 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) |
34 | { | 36 | { |
@@ -51,12 +53,27 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
51 | #ifdef CONFIG_HOTPLUG_CPU | 53 | #ifdef CONFIG_HOTPLUG_CPU |
52 | case CPU_UP_CANCELED: | 54 | case CPU_UP_CANCELED: |
53 | case CPU_UP_CANCELED_FROZEN: | 55 | case CPU_UP_CANCELED_FROZEN: |
56 | /* Fall-through to the CPU_DEAD[_FROZEN] case. */ | ||
54 | 57 | ||
55 | case CPU_DEAD: | 58 | case CPU_DEAD: |
56 | case CPU_DEAD_FROZEN: | 59 | case CPU_DEAD_FROZEN: |
57 | free_cpumask_var(cfd->cpumask); | 60 | free_cpumask_var(cfd->cpumask); |
58 | free_percpu(cfd->csd); | 61 | free_percpu(cfd->csd); |
59 | break; | 62 | break; |
63 | |||
64 | case CPU_DYING: | ||
65 | case CPU_DYING_FROZEN: | ||
66 | /* | ||
67 | * The IPIs for the smp-call-function callbacks queued by other | ||
68 | * CPUs might arrive late, either due to hardware latencies or | ||
69 | * because this CPU disabled interrupts (inside stop-machine) | ||
70 | * before the IPIs were sent. So flush out any pending callbacks | ||
71 | * explicitly (without waiting for the IPIs to arrive), to | ||
72 | * ensure that the outgoing CPU doesn't go offline with work | ||
73 | * still pending. | ||
74 | */ | ||
75 | flush_smp_call_function_queue(false); | ||
76 | break; | ||
60 | #endif | 77 | #endif |
61 | }; | 78 | }; |
62 | 79 | ||
@@ -177,23 +194,47 @@ static int generic_exec_single(int cpu, struct call_single_data *csd, | |||
177 | return 0; | 194 | return 0; |
178 | } | 195 | } |
179 | 196 | ||
180 | /* | 197 | /** |
181 | * Invoked by arch to handle an IPI for call function single. Must be | 198 | * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks |
182 | * called from the arch with interrupts disabled. | 199 | * |
200 | * Invoked by arch to handle an IPI for call function single. | ||
201 | * Must be called with interrupts disabled. | ||
183 | */ | 202 | */ |
184 | void generic_smp_call_function_single_interrupt(void) | 203 | void generic_smp_call_function_single_interrupt(void) |
185 | { | 204 | { |
205 | flush_smp_call_function_queue(true); | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * flush_smp_call_function_queue - Flush pending smp-call-function callbacks | ||
210 | * | ||
211 | * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an | ||
212 | * offline CPU. Skip this check if set to 'false'. | ||
213 | * | ||
214 | * Flush any pending smp-call-function callbacks queued on this CPU. This is | ||
215 | * invoked by the generic IPI handler, as well as by a CPU about to go offline, | ||
216 | * to ensure that all pending IPI callbacks are run before it goes completely | ||
217 | * offline. | ||
218 | * | ||
219 | * Loop through the call_single_queue and run all the queued callbacks. | ||
220 | * Must be called with interrupts disabled. | ||
221 | */ | ||
222 | static void flush_smp_call_function_queue(bool warn_cpu_offline) | ||
223 | { | ||
224 | struct llist_head *head; | ||
186 | struct llist_node *entry; | 225 | struct llist_node *entry; |
187 | struct call_single_data *csd, *csd_next; | 226 | struct call_single_data *csd, *csd_next; |
188 | static bool warned; | 227 | static bool warned; |
189 | 228 | ||
190 | entry = llist_del_all(&__get_cpu_var(call_single_queue)); | 229 | WARN_ON(!irqs_disabled()); |
230 | |||
231 | head = &__get_cpu_var(call_single_queue); | ||
232 | entry = llist_del_all(head); | ||
191 | entry = llist_reverse_order(entry); | 233 | entry = llist_reverse_order(entry); |
192 | 234 | ||
193 | /* | 235 | /* There shouldn't be any pending callbacks on an offline CPU. */ |
194 | * Shouldn't receive this interrupt on a cpu that is not yet online. | 236 | if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && |
195 | */ | 237 | !warned && !llist_empty(head))) { |
196 | if (unlikely(!cpu_online(smp_processor_id()) && !warned)) { | ||
197 | warned = true; | 238 | warned = true; |
198 | WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); | 239 | WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); |
199 | 240 | ||