diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 90 |
1 files changed, 23 insertions, 67 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 7a0ce25829dc..f5308258891a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -23,8 +23,7 @@ static struct { | |||
23 | }; | 23 | }; |
24 | 24 | ||
25 | enum { | 25 | enum { |
26 | CSD_FLAG_WAIT = 0x01, | 26 | CSD_FLAG_LOCK = 0x01, |
27 | CSD_FLAG_LOCK = 0x02, | ||
28 | }; | 27 | }; |
29 | 28 | ||
30 | struct call_function_data { | 29 | struct call_function_data { |
@@ -95,41 +94,21 @@ static int __cpuinit init_call_single_data(void) | |||
95 | early_initcall(init_call_single_data); | 94 | early_initcall(init_call_single_data); |
96 | 95 | ||
97 | /* | 96 | /* |
98 | * csd_wait/csd_complete are used for synchronous ipi calls | ||
99 | */ | ||
100 | static void csd_wait_prepare(struct call_single_data *data) | ||
101 | { | ||
102 | data->flags |= CSD_FLAG_WAIT; | ||
103 | } | ||
104 | |||
105 | static void csd_complete(struct call_single_data *data) | ||
106 | { | ||
107 | if (data->flags & CSD_FLAG_WAIT) { | ||
108 | /* | ||
109 | * ensure we're all done before saying we are | ||
110 | */ | ||
111 | smp_mb(); | ||
112 | data->flags &= ~CSD_FLAG_WAIT; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | static void csd_wait(struct call_single_data *data) | ||
117 | { | ||
118 | while (data->flags & CSD_FLAG_WAIT) | ||
119 | cpu_relax(); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * csd_lock/csd_unlock used to serialize access to per-cpu csd resources | 97 | * csd_lock/csd_unlock used to serialize access to per-cpu csd resources |
124 | * | 98 | * |
125 | * For non-synchronous ipi calls the csd can still be in use by the previous | 99 | * For non-synchronous ipi calls the csd can still be in use by the previous |
126 | * function call. For multi-cpu calls its even more interesting as we'll have | 100 | * function call. For multi-cpu calls its even more interesting as we'll have |
127 | * to ensure no other cpu is observing our csd. | 101 | * to ensure no other cpu is observing our csd. |
128 | */ | 102 | */ |
129 | static void csd_lock(struct call_single_data *data) | 103 | static void csd_lock_wait(struct call_single_data *data) |
130 | { | 104 | { |
131 | while (data->flags & CSD_FLAG_LOCK) | 105 | while (data->flags & CSD_FLAG_LOCK) |
132 | cpu_relax(); | 106 | cpu_relax(); |
107 | } | ||
108 | |||
109 | static void csd_lock(struct call_single_data *data) | ||
110 | { | ||
111 | csd_lock_wait(data); | ||
133 | data->flags = CSD_FLAG_LOCK; | 112 | data->flags = CSD_FLAG_LOCK; |
134 | 113 | ||
135 | /* | 114 | /* |
@@ -155,11 +134,12 @@ static void csd_unlock(struct call_single_data *data) | |||
155 | * Insert a previously allocated call_single_data element for execution | 134 | * Insert a previously allocated call_single_data element for execution |
156 | * on the given CPU. data must already have ->func, ->info, and ->flags set. | 135 | * on the given CPU. data must already have ->func, ->info, and ->flags set. |
157 | */ | 136 | */ |
158 | static void generic_exec_single(int cpu, struct call_single_data *data) | 137 | static |
138 | void generic_exec_single(int cpu, struct call_single_data *data, int wait) | ||
159 | { | 139 | { |
160 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | 140 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); |
161 | int wait = data->flags & CSD_FLAG_WAIT, ipi; | ||
162 | unsigned long flags; | 141 | unsigned long flags; |
142 | int ipi; | ||
163 | 143 | ||
164 | spin_lock_irqsave(&dst->lock, flags); | 144 | spin_lock_irqsave(&dst->lock, flags); |
165 | ipi = list_empty(&dst->list); | 145 | ipi = list_empty(&dst->list); |
@@ -182,7 +162,7 @@ static void generic_exec_single(int cpu, struct call_single_data *data) | |||
182 | arch_send_call_function_single_ipi(cpu); | 162 | arch_send_call_function_single_ipi(cpu); |
183 | 163 | ||
184 | if (wait) | 164 | if (wait) |
185 | csd_wait(data); | 165 | csd_lock_wait(data); |
186 | } | 166 | } |
187 | 167 | ||
188 | /* | 168 | /* |
@@ -232,7 +212,6 @@ void generic_smp_call_function_interrupt(void) | |||
232 | if (refs) | 212 | if (refs) |
233 | continue; | 213 | continue; |
234 | 214 | ||
235 | csd_complete(&data->csd); | ||
236 | csd_unlock(&data->csd); | 215 | csd_unlock(&data->csd); |
237 | } | 216 | } |
238 | 217 | ||
@@ -270,9 +249,6 @@ void generic_smp_call_function_single_interrupt(void) | |||
270 | 249 | ||
271 | data->func(data->info); | 250 | data->func(data->info); |
272 | 251 | ||
273 | if (data_flags & CSD_FLAG_WAIT) | ||
274 | csd_complete(data); | ||
275 | |||
276 | /* | 252 | /* |
277 | * Unlocked CSDs are valid through generic_exec_single() | 253 | * Unlocked CSDs are valid through generic_exec_single() |
278 | */ | 254 | */ |
@@ -313,36 +289,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
313 | func(info); | 289 | func(info); |
314 | local_irq_restore(flags); | 290 | local_irq_restore(flags); |
315 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | 291 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { |
316 | struct call_single_data *data; | 292 | struct call_single_data *data = &d; |
317 | 293 | ||
318 | if (!wait) { | 294 | if (!wait) |
319 | /* | ||
320 | * We are calling a function on a single CPU | ||
321 | * and we are not going to wait for it to finish. | ||
322 | * We use a per cpu data to pass the information to | ||
323 | * that CPU. Since all callers of this code will | ||
324 | * use the same data, we must synchronize the | ||
325 | * callers to prevent a new caller from corrupting | ||
326 | * the data before the callee can access it. | ||
327 | * | ||
328 | * The CSD_FLAG_LOCK is used to let us know when | ||
329 | * the IPI handler is done with the data. | ||
330 | * The first caller will set it, and the callee | ||
331 | * will clear it. The next caller must wait for | ||
332 | * it to clear before we set it again. This | ||
333 | * will make sure the callee is done with the | ||
334 | * data before a new caller will use it. | ||
335 | */ | ||
336 | data = &__get_cpu_var(csd_data); | 295 | data = &__get_cpu_var(csd_data); |
337 | csd_lock(data); | 296 | |
338 | } else { | 297 | csd_lock(data); |
339 | data = &d; | ||
340 | csd_wait_prepare(data); | ||
341 | } | ||
342 | 298 | ||
343 | data->func = func; | 299 | data->func = func; |
344 | data->info = info; | 300 | data->info = info; |
345 | generic_exec_single(cpu, data); | 301 | generic_exec_single(cpu, data, wait); |
346 | } else { | 302 | } else { |
347 | err = -ENXIO; /* CPU not online */ | 303 | err = -ENXIO; /* CPU not online */ |
348 | } | 304 | } |
@@ -362,12 +318,15 @@ EXPORT_SYMBOL(smp_call_function_single); | |||
362 | * instance. | 318 | * instance. |
363 | * | 319 | * |
364 | */ | 320 | */ |
365 | void __smp_call_function_single(int cpu, struct call_single_data *data) | 321 | void __smp_call_function_single(int cpu, struct call_single_data *data, |
322 | int wait) | ||
366 | { | 323 | { |
324 | csd_lock(data); | ||
325 | |||
367 | /* Can deadlock when called with interrupts disabled */ | 326 | /* Can deadlock when called with interrupts disabled */ |
368 | WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); | 327 | WARN_ON(wait && irqs_disabled()); |
369 | 328 | ||
370 | generic_exec_single(cpu, data); | 329 | generic_exec_single(cpu, data, wait); |
371 | } | 330 | } |
372 | 331 | ||
373 | /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ | 332 | /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ |
@@ -425,9 +384,6 @@ void smp_call_function_many(const struct cpumask *mask, | |||
425 | csd_lock(&data->csd); | 384 | csd_lock(&data->csd); |
426 | 385 | ||
427 | spin_lock_irqsave(&data->lock, flags); | 386 | spin_lock_irqsave(&data->lock, flags); |
428 | if (wait) | ||
429 | csd_wait_prepare(&data->csd); | ||
430 | |||
431 | data->csd.func = func; | 387 | data->csd.func = func; |
432 | data->csd.info = info; | 388 | data->csd.info = info; |
433 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 389 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
@@ -456,7 +412,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
456 | 412 | ||
457 | /* optionally wait for the CPUs to complete */ | 413 | /* optionally wait for the CPUs to complete */ |
458 | if (wait) | 414 | if (wait) |
459 | csd_wait(&data->csd); | 415 | csd_lock_wait(&data->csd); |
460 | } | 416 | } |
461 | EXPORT_SYMBOL(smp_call_function_many); | 417 | EXPORT_SYMBOL(smp_call_function_many); |
462 | 418 | ||