diff options
author | Nick Piggin <npiggin@suse.de> | 2009-02-25 00:22:45 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-25 06:27:08 -0500 |
commit | 15d0d3b3371227f846b9f644547fde081c7e1c0c (patch) | |
tree | 11170d41f38123923208c44fe957730350a4e31a /kernel | |
parent | f7e603ad8f78cd3b59e33fa72707da0cbabdf699 (diff) |
generic IPI: simplify barriers and locking
Simplify the barriers in generic remote function call interrupt
code.
Firstly, just unconditionally take the lock and check the list
in the generic_call_function_single_interrupt IPI handler. As
we've just taken an IPI here, the chances are fairly high that
there will be work on the list for us, so do the locking
unconditionally. This removes the tricky lockless list_empty
check and dubious barriers. The change looks bigger than it is
because it is just removing an outer loop.
Secondly, clarify architecture specific IPI locking rules.
Generic code has no tools to impose any sane ordering on IPIs if
they go outside normal cache coherency, ergo the arch code must
make them appear to obey cache coherency as a "memory operation"
to initiate an IPI, and a "memory operation" to receive one.
This way at least they can be reasoned about in generic code,
and smp_mb used to provide ordering.
The combination of these two changes means that explict barriers
can be taken out of queue handling for the single case -- shared
data is explicitly locked, and ipi ordering must conform to
that, so no barriers needed. An extra barrier is needed in the
many handler, so as to ensure we load the list element after the
IPI is received.
Does any architecture actually *need* these barriers? For the
initiator I could see it, but for the handler I would be
surprised. So the other thing we could do for simplicity is just
to require that, rather than just matching with cache coherency,
we just require a full barrier before generating an IPI, and
after receiving an IPI. In which case, the smp_mb()s can go
away. But just for now, we'll be on the safe side and use the
barriers (they're in the slow case anyway).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-arch@vger.kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/smp.c | 83 |
1 files changed, 44 insertions, 39 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index bbedbb7efe32..6ecf4b9895d4 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -74,9 +74,16 @@ static void generic_exec_single(int cpu, struct call_single_data *data) | |||
74 | spin_unlock_irqrestore(&dst->lock, flags); | 74 | spin_unlock_irqrestore(&dst->lock, flags); |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * Make the list addition visible before sending the ipi. | 77 | * The list addition should be visible before sending the IPI |
78 | * handler locks the list to pull the entry off it because of | ||
79 | * normal cache coherency rules implied by spinlocks. | ||
80 | * | ||
81 | * If IPIs can go out of order to the cache coherency protocol | ||
82 | * in an architecture, sufficient synchronisation should be added | ||
83 | * to arch code to make it appear to obey cache coherency WRT | ||
84 | * locking and barrier primitives. Generic code isn't really equipped | ||
85 | * to do the right thing... | ||
78 | */ | 86 | */ |
79 | smp_mb(); | ||
80 | 87 | ||
81 | if (ipi) | 88 | if (ipi) |
82 | arch_send_call_function_single_ipi(cpu); | 89 | arch_send_call_function_single_ipi(cpu); |
@@ -104,6 +111,14 @@ void generic_smp_call_function_interrupt(void) | |||
104 | int cpu = get_cpu(); | 111 | int cpu = get_cpu(); |
105 | 112 | ||
106 | /* | 113 | /* |
114 | * Ensure entry is visible on call_function_queue after we have | ||
115 | * entered the IPI. See comment in smp_call_function_many. | ||
116 | * If we don't have this, then we may miss an entry on the list | ||
117 | * and never get another IPI to process it. | ||
118 | */ | ||
119 | smp_mb(); | ||
120 | |||
121 | /* | ||
107 | * It's ok to use list_for_each_rcu() here even though we may delete | 122 | * It's ok to use list_for_each_rcu() here even though we may delete |
108 | * 'pos', since list_del_rcu() doesn't clear ->next | 123 | * 'pos', since list_del_rcu() doesn't clear ->next |
109 | */ | 124 | */ |
@@ -154,49 +169,37 @@ void generic_smp_call_function_single_interrupt(void) | |||
154 | { | 169 | { |
155 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); | 170 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); |
156 | LIST_HEAD(list); | 171 | LIST_HEAD(list); |
172 | unsigned int data_flags; | ||
157 | 173 | ||
158 | /* | 174 | spin_lock(&q->lock); |
159 | * Need to see other stores to list head for checking whether | 175 | list_replace_init(&q->list, &list); |
160 | * list is empty without holding q->lock | 176 | spin_unlock(&q->lock); |
161 | */ | ||
162 | smp_read_barrier_depends(); | ||
163 | while (!list_empty(&q->list)) { | ||
164 | unsigned int data_flags; | ||
165 | |||
166 | spin_lock(&q->lock); | ||
167 | list_replace_init(&q->list, &list); | ||
168 | spin_unlock(&q->lock); | ||
169 | 177 | ||
170 | while (!list_empty(&list)) { | 178 | while (!list_empty(&list)) { |
171 | struct call_single_data *data; | 179 | struct call_single_data *data; |
172 | 180 | ||
173 | data = list_entry(list.next, struct call_single_data, | 181 | data = list_entry(list.next, struct call_single_data, |
174 | list); | 182 | list); |
175 | list_del(&data->list); | 183 | list_del(&data->list); |
176 | 184 | ||
177 | /* | ||
178 | * 'data' can be invalid after this call if | ||
179 | * flags == 0 (when called through | ||
180 | * generic_exec_single(), so save them away before | ||
181 | * making the call. | ||
182 | */ | ||
183 | data_flags = data->flags; | ||
184 | |||
185 | data->func(data->info); | ||
186 | |||
187 | if (data_flags & CSD_FLAG_WAIT) { | ||
188 | smp_wmb(); | ||
189 | data->flags &= ~CSD_FLAG_WAIT; | ||
190 | } else if (data_flags & CSD_FLAG_LOCK) { | ||
191 | smp_wmb(); | ||
192 | data->flags &= ~CSD_FLAG_LOCK; | ||
193 | } else if (data_flags & CSD_FLAG_ALLOC) | ||
194 | kfree(data); | ||
195 | } | ||
196 | /* | 185 | /* |
197 | * See comment on outer loop | 186 | * 'data' can be invalid after this call if |
187 | * flags == 0 (when called through | ||
188 | * generic_exec_single(), so save them away before | ||
189 | * making the call. | ||
198 | */ | 190 | */ |
199 | smp_read_barrier_depends(); | 191 | data_flags = data->flags; |
192 | |||
193 | data->func(data->info); | ||
194 | |||
195 | if (data_flags & CSD_FLAG_WAIT) { | ||
196 | smp_wmb(); | ||
197 | data->flags &= ~CSD_FLAG_WAIT; | ||
198 | } else if (data_flags & CSD_FLAG_LOCK) { | ||
199 | smp_wmb(); | ||
200 | data->flags &= ~CSD_FLAG_LOCK; | ||
201 | } else if (data_flags & CSD_FLAG_ALLOC) | ||
202 | kfree(data); | ||
200 | } | 203 | } |
201 | } | 204 | } |
202 | 205 | ||
@@ -375,6 +378,8 @@ void smp_call_function_many(const struct cpumask *mask, | |||
375 | 378 | ||
376 | /* | 379 | /* |
377 | * Make the list addition visible before sending the ipi. | 380 | * Make the list addition visible before sending the ipi. |
381 | * (IPIs must obey or appear to obey normal Linux cache coherency | ||
382 | * rules -- see comment in generic_exec_single). | ||
378 | */ | 383 | */ |
379 | smp_mb(); | 384 | smp_mb(); |
380 | 385 | ||