diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-02-25 07:59:48 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-25 08:13:44 -0500 |
commit | 6e2756376c706e4da3454a272947983f92e80a7e (patch) | |
tree | 3a54c5dd97921ae869bb6122a69af42d9161ee3e | |
parent | 8969a5ede0f9e17da4b943712429aef2c9bcd82b (diff) |
generic-ipi: remove CSD_FLAG_WAIT
Oleg noticed that we don't strictly need CSD_FLAG_WAIT, rework
the code so that we can use CSD_FLAG_LOCK for both purposes.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | block/blk-softirq.c | 2 | ||||
-rw-r--r-- | include/linux/smp.h | 3 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/smp.c | 90 | ||||
-rw-r--r-- | kernel/softirq.c | 2 |
5 files changed, 28 insertions, 71 deletions
diff --git a/block/blk-softirq.c b/block/blk-softirq.c index ce0efc6b26dc..ee9c21602228 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c | |||
@@ -64,7 +64,7 @@ static int raise_blk_irq(int cpu, struct request *rq) | |||
64 | data->info = rq; | 64 | data->info = rq; |
65 | data->flags = 0; | 65 | data->flags = 0; |
66 | 66 | ||
67 | __smp_call_function_single(cpu, data); | 67 | __smp_call_function_single(cpu, data, 0); |
68 | return 0; | 68 | return 0; |
69 | } | 69 | } |
70 | 70 | ||
diff --git a/include/linux/smp.h b/include/linux/smp.h index 715196b09d67..00866d7fdf34 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | |||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
85 | void __smp_call_function_single(int cpuid, struct call_single_data *data); | 85 | void __smp_call_function_single(int cpuid, struct call_single_data *data, |
86 | int wait); | ||
86 | 87 | ||
87 | /* | 88 | /* |
88 | * Generic and arch helpers | 89 | * Generic and arch helpers |
diff --git a/kernel/sched.c b/kernel/sched.c index 410eec404133..d4c2749a2998 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1093,7 +1093,7 @@ static void hrtick_start(struct rq *rq, u64 delay) | |||
1093 | if (rq == this_rq()) { | 1093 | if (rq == this_rq()) { |
1094 | hrtimer_restart(timer); | 1094 | hrtimer_restart(timer); |
1095 | } else if (!rq->hrtick_csd_pending) { | 1095 | } else if (!rq->hrtick_csd_pending) { |
1096 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); | 1096 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); |
1097 | rq->hrtick_csd_pending = 1; | 1097 | rq->hrtick_csd_pending = 1; |
1098 | } | 1098 | } |
1099 | } | 1099 | } |
diff --git a/kernel/smp.c b/kernel/smp.c index 7a0ce25829dc..f5308258891a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -23,8 +23,7 @@ static struct { | |||
23 | }; | 23 | }; |
24 | 24 | ||
25 | enum { | 25 | enum { |
26 | CSD_FLAG_WAIT = 0x01, | 26 | CSD_FLAG_LOCK = 0x01, |
27 | CSD_FLAG_LOCK = 0x02, | ||
28 | }; | 27 | }; |
29 | 28 | ||
30 | struct call_function_data { | 29 | struct call_function_data { |
@@ -95,41 +94,21 @@ static int __cpuinit init_call_single_data(void) | |||
95 | early_initcall(init_call_single_data); | 94 | early_initcall(init_call_single_data); |
96 | 95 | ||
97 | /* | 96 | /* |
98 | * csd_wait/csd_complete are used for synchronous ipi calls | ||
99 | */ | ||
100 | static void csd_wait_prepare(struct call_single_data *data) | ||
101 | { | ||
102 | data->flags |= CSD_FLAG_WAIT; | ||
103 | } | ||
104 | |||
105 | static void csd_complete(struct call_single_data *data) | ||
106 | { | ||
107 | if (data->flags & CSD_FLAG_WAIT) { | ||
108 | /* | ||
109 | * ensure we're all done before saying we are | ||
110 | */ | ||
111 | smp_mb(); | ||
112 | data->flags &= ~CSD_FLAG_WAIT; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | static void csd_wait(struct call_single_data *data) | ||
117 | { | ||
118 | while (data->flags & CSD_FLAG_WAIT) | ||
119 | cpu_relax(); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * csd_lock/csd_unlock used to serialize access to per-cpu csd resources | 97 | * csd_lock/csd_unlock used to serialize access to per-cpu csd resources |
124 | * | 98 | * |
125 | * For non-synchronous ipi calls the csd can still be in use by the previous | 99 | * For non-synchronous ipi calls the csd can still be in use by the previous |
126 | * function call. For multi-cpu calls its even more interesting as we'll have | 100 | * function call. For multi-cpu calls its even more interesting as we'll have |
127 | * to ensure no other cpu is observing our csd. | 101 | * to ensure no other cpu is observing our csd. |
128 | */ | 102 | */ |
129 | static void csd_lock(struct call_single_data *data) | 103 | static void csd_lock_wait(struct call_single_data *data) |
130 | { | 104 | { |
131 | while (data->flags & CSD_FLAG_LOCK) | 105 | while (data->flags & CSD_FLAG_LOCK) |
132 | cpu_relax(); | 106 | cpu_relax(); |
107 | } | ||
108 | |||
109 | static void csd_lock(struct call_single_data *data) | ||
110 | { | ||
111 | csd_lock_wait(data); | ||
133 | data->flags = CSD_FLAG_LOCK; | 112 | data->flags = CSD_FLAG_LOCK; |
134 | 113 | ||
135 | /* | 114 | /* |
@@ -155,11 +134,12 @@ static void csd_unlock(struct call_single_data *data) | |||
155 | * Insert a previously allocated call_single_data element for execution | 134 | * Insert a previously allocated call_single_data element for execution |
156 | * on the given CPU. data must already have ->func, ->info, and ->flags set. | 135 | * on the given CPU. data must already have ->func, ->info, and ->flags set. |
157 | */ | 136 | */ |
158 | static void generic_exec_single(int cpu, struct call_single_data *data) | 137 | static |
138 | void generic_exec_single(int cpu, struct call_single_data *data, int wait) | ||
159 | { | 139 | { |
160 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | 140 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); |
161 | int wait = data->flags & CSD_FLAG_WAIT, ipi; | ||
162 | unsigned long flags; | 141 | unsigned long flags; |
142 | int ipi; | ||
163 | 143 | ||
164 | spin_lock_irqsave(&dst->lock, flags); | 144 | spin_lock_irqsave(&dst->lock, flags); |
165 | ipi = list_empty(&dst->list); | 145 | ipi = list_empty(&dst->list); |
@@ -182,7 +162,7 @@ static void generic_exec_single(int cpu, struct call_single_data *data) | |||
182 | arch_send_call_function_single_ipi(cpu); | 162 | arch_send_call_function_single_ipi(cpu); |
183 | 163 | ||
184 | if (wait) | 164 | if (wait) |
185 | csd_wait(data); | 165 | csd_lock_wait(data); |
186 | } | 166 | } |
187 | 167 | ||
188 | /* | 168 | /* |
@@ -232,7 +212,6 @@ void generic_smp_call_function_interrupt(void) | |||
232 | if (refs) | 212 | if (refs) |
233 | continue; | 213 | continue; |
234 | 214 | ||
235 | csd_complete(&data->csd); | ||
236 | csd_unlock(&data->csd); | 215 | csd_unlock(&data->csd); |
237 | } | 216 | } |
238 | 217 | ||
@@ -270,9 +249,6 @@ void generic_smp_call_function_single_interrupt(void) | |||
270 | 249 | ||
271 | data->func(data->info); | 250 | data->func(data->info); |
272 | 251 | ||
273 | if (data_flags & CSD_FLAG_WAIT) | ||
274 | csd_complete(data); | ||
275 | |||
276 | /* | 252 | /* |
277 | * Unlocked CSDs are valid through generic_exec_single() | 253 | * Unlocked CSDs are valid through generic_exec_single() |
278 | */ | 254 | */ |
@@ -313,36 +289,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
313 | func(info); | 289 | func(info); |
314 | local_irq_restore(flags); | 290 | local_irq_restore(flags); |
315 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { | 291 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { |
316 | struct call_single_data *data; | 292 | struct call_single_data *data = &d; |
317 | 293 | ||
318 | if (!wait) { | 294 | if (!wait) |
319 | /* | ||
320 | * We are calling a function on a single CPU | ||
321 | * and we are not going to wait for it to finish. | ||
322 | * We use a per cpu data to pass the information to | ||
323 | * that CPU. Since all callers of this code will | ||
324 | * use the same data, we must synchronize the | ||
325 | * callers to prevent a new caller from corrupting | ||
326 | * the data before the callee can access it. | ||
327 | * | ||
328 | * The CSD_FLAG_LOCK is used to let us know when | ||
329 | * the IPI handler is done with the data. | ||
330 | * The first caller will set it, and the callee | ||
331 | * will clear it. The next caller must wait for | ||
332 | * it to clear before we set it again. This | ||
333 | * will make sure the callee is done with the | ||
334 | * data before a new caller will use it. | ||
335 | */ | ||
336 | data = &__get_cpu_var(csd_data); | 295 | data = &__get_cpu_var(csd_data); |
337 | csd_lock(data); | 296 | |
338 | } else { | 297 | csd_lock(data); |
339 | data = &d; | ||
340 | csd_wait_prepare(data); | ||
341 | } | ||
342 | 298 | ||
343 | data->func = func; | 299 | data->func = func; |
344 | data->info = info; | 300 | data->info = info; |
345 | generic_exec_single(cpu, data); | 301 | generic_exec_single(cpu, data, wait); |
346 | } else { | 302 | } else { |
347 | err = -ENXIO; /* CPU not online */ | 303 | err = -ENXIO; /* CPU not online */ |
348 | } | 304 | } |
@@ -362,12 +318,15 @@ EXPORT_SYMBOL(smp_call_function_single); | |||
362 | * instance. | 318 | * instance. |
363 | * | 319 | * |
364 | */ | 320 | */ |
365 | void __smp_call_function_single(int cpu, struct call_single_data *data) | 321 | void __smp_call_function_single(int cpu, struct call_single_data *data, |
322 | int wait) | ||
366 | { | 323 | { |
324 | csd_lock(data); | ||
325 | |||
367 | /* Can deadlock when called with interrupts disabled */ | 326 | /* Can deadlock when called with interrupts disabled */ |
368 | WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); | 327 | WARN_ON(wait && irqs_disabled()); |
369 | 328 | ||
370 | generic_exec_single(cpu, data); | 329 | generic_exec_single(cpu, data, wait); |
371 | } | 330 | } |
372 | 331 | ||
373 | /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ | 332 | /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ |
@@ -425,9 +384,6 @@ void smp_call_function_many(const struct cpumask *mask, | |||
425 | csd_lock(&data->csd); | 384 | csd_lock(&data->csd); |
426 | 385 | ||
427 | spin_lock_irqsave(&data->lock, flags); | 386 | spin_lock_irqsave(&data->lock, flags); |
428 | if (wait) | ||
429 | csd_wait_prepare(&data->csd); | ||
430 | |||
431 | data->csd.func = func; | 387 | data->csd.func = func; |
432 | data->csd.info = info; | 388 | data->csd.info = info; |
433 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 389 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
@@ -456,7 +412,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
456 | 412 | ||
457 | /* optionally wait for the CPUs to complete */ | 413 | /* optionally wait for the CPUs to complete */ |
458 | if (wait) | 414 | if (wait) |
459 | csd_wait(&data->csd); | 415 | csd_lock_wait(&data->csd); |
460 | } | 416 | } |
461 | EXPORT_SYMBOL(smp_call_function_many); | 417 | EXPORT_SYMBOL(smp_call_function_many); |
462 | 418 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index bdbe9de9cd8d..48c3d5d627a8 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -496,7 +496,7 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir | |||
496 | cp->flags = 0; | 496 | cp->flags = 0; |
497 | cp->priv = softirq; | 497 | cp->priv = softirq; |
498 | 498 | ||
499 | __smp_call_function_single(cpu, cp); | 499 | __smp_call_function_single(cpu, cp, 0); |
500 | return 0; | 500 | return 0; |
501 | } | 501 | } |
502 | return 1; | 502 | return 1; |