aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-09-10 03:25:56 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 13:06:21 -0400
commitfb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch)
treea006d078aa02e421a7dc4793c335308204859d36 /arch/sparc64/lib
parent4327edf6b8a7ac7dce144313947995538842d8fd (diff)
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler <grundler@parisc-linux.org> Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" <tony.luck@intel.com> ia64 fix Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjanv@infradead.org> Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se> Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sparc64/lib')
-rw-r--r--arch/sparc64/lib/Makefile1
-rw-r--r--arch/sparc64/lib/debuglocks.c366
2 files changed, 0 insertions, 367 deletions
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 40dbeec7e5d..d968aebe83b 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,7 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
14 copy_in_user.o user_fixup.o memmove.o \ 14 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
16 16
17lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
18lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o 17lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
19 18
20obj-y += iomap.o 19obj-y += iomap.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
deleted file mode 100644
index f5f0b5586f0..00000000000
--- a/arch/sparc64/lib/debuglocks.c
+++ /dev/null
@@ -1,366 +0,0 @@
1/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
2 * debuglocks.c: Debugging versions of SMP locking primitives.
3 *
4 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/spinlock.h>
11#include <asm/system.h>
12
13#ifdef CONFIG_SMP
14
15static inline void show (char *str, spinlock_t *lock, unsigned long caller)
16{
17 int cpu = smp_processor_id();
18
19 printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
20 str, lock, cpu, (unsigned int) caller,
21 lock->owner_pc, lock->owner_cpu);
22}
23
24static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
25{
26 int cpu = smp_processor_id();
27
28 printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
29 str, lock, cpu, (unsigned int) caller,
30 lock->writer_pc, lock->writer_cpu);
31}
32
33static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
34{
35 int cpu = smp_processor_id();
36 int i;
37
38 printk("%s(%p) CPU#%d stuck at %08x\n",
39 str, lock, cpu, (unsigned int) caller);
40 printk("Writer: PC(%08x):CPU(%x)\n",
41 lock->writer_pc, lock->writer_cpu);
42 printk("Readers:");
43 for (i = 0; i < NR_CPUS; i++)
44 if (lock->reader_pc[i])
45 printk(" %d[%08x]", i, lock->reader_pc[i]);
46 printk("\n");
47}
48
49#undef INIT_STUCK
50#define INIT_STUCK 100000000
51
52void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
53{
54 unsigned long val;
55 int stuck = INIT_STUCK;
56 int cpu = get_cpu();
57 int shown = 0;
58
59again:
60 __asm__ __volatile__("ldstub [%1], %0"
61 : "=r" (val)
62 : "r" (&(lock->lock))
63 : "memory");
64 membar_storeload_storestore();
65 if (val) {
66 while (lock->lock) {
67 if (!--stuck) {
68 if (shown++ <= 2)
69 show(str, lock, caller);
70 stuck = INIT_STUCK;
71 }
72 rmb();
73 }
74 goto again;
75 }
76 lock->owner_pc = ((unsigned int)caller);
77 lock->owner_cpu = cpu;
78 current->thread.smp_lock_count++;
79 current->thread.smp_lock_pc = ((unsigned int)caller);
80
81 put_cpu();
82}
83
84int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
85{
86 unsigned long val;
87 int cpu = get_cpu();
88
89 __asm__ __volatile__("ldstub [%1], %0"
90 : "=r" (val)
91 : "r" (&(lock->lock))
92 : "memory");
93 membar_storeload_storestore();
94 if (!val) {
95 lock->owner_pc = ((unsigned int)caller);
96 lock->owner_cpu = cpu;
97 current->thread.smp_lock_count++;
98 current->thread.smp_lock_pc = ((unsigned int)caller);
99 }
100
101 put_cpu();
102
103 return val == 0;
104}
105
106void _do_spin_unlock(spinlock_t *lock)
107{
108 lock->owner_pc = 0;
109 lock->owner_cpu = NO_PROC_ID;
110 membar_storestore_loadstore();
111 lock->lock = 0;
112 current->thread.smp_lock_count--;
113}
114
115/* Keep INIT_STUCK the same... */
116
117void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
118{
119 unsigned long val;
120 int stuck = INIT_STUCK;
121 int cpu = get_cpu();
122 int shown = 0;
123
124wlock_again:
125 /* Wait for any writer to go away. */
126 while (((long)(rw->lock)) < 0) {
127 if (!--stuck) {
128 if (shown++ <= 2)
129 show_read(str, rw, caller);
130 stuck = INIT_STUCK;
131 }
132 rmb();
133 }
134 /* Try once to increment the counter. */
135 __asm__ __volatile__(
136" ldx [%0], %%g1\n"
137" brlz,a,pn %%g1, 2f\n"
138" mov 1, %0\n"
139" add %%g1, 1, %%g7\n"
140" casx [%0], %%g1, %%g7\n"
141" sub %%g1, %%g7, %0\n"
142"2:" : "=r" (val)
143 : "0" (&(rw->lock))
144 : "g1", "g7", "memory");
145 membar_storeload_storestore();
146 if (val)
147 goto wlock_again;
148 rw->reader_pc[cpu] = ((unsigned int)caller);
149 current->thread.smp_lock_count++;
150 current->thread.smp_lock_pc = ((unsigned int)caller);
151
152 put_cpu();
153}
154
155void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
156{
157 unsigned long val;
158 int stuck = INIT_STUCK;
159 int cpu = get_cpu();
160 int shown = 0;
161
162 /* Drop our identity _first_. */
163 rw->reader_pc[cpu] = 0;
164 current->thread.smp_lock_count--;
165runlock_again:
166 /* Spin trying to decrement the counter using casx. */
167 __asm__ __volatile__(
168" membar #StoreLoad | #LoadLoad\n"
169" ldx [%0], %%g1\n"
170" sub %%g1, 1, %%g7\n"
171" casx [%0], %%g1, %%g7\n"
172" membar #StoreLoad | #StoreStore\n"
173" sub %%g1, %%g7, %0\n"
174 : "=r" (val)
175 : "0" (&(rw->lock))
176 : "g1", "g7", "memory");
177 if (val) {
178 if (!--stuck) {
179 if (shown++ <= 2)
180 show_read(str, rw, caller);
181 stuck = INIT_STUCK;
182 }
183 goto runlock_again;
184 }
185
186 put_cpu();
187}
188
189void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
190{
191 unsigned long val;
192 int stuck = INIT_STUCK;
193 int cpu = get_cpu();
194 int shown = 0;
195
196wlock_again:
197 /* Spin while there is another writer. */
198 while (((long)rw->lock) < 0) {
199 if (!--stuck) {
200 if (shown++ <= 2)
201 show_write(str, rw, caller);
202 stuck = INIT_STUCK;
203 }
204 rmb();
205 }
206
207 /* Try to acuire the write bit. */
208 __asm__ __volatile__(
209" mov 1, %%g3\n"
210" sllx %%g3, 63, %%g3\n"
211" ldx [%0], %%g1\n"
212" brlz,pn %%g1, 1f\n"
213" or %%g1, %%g3, %%g7\n"
214" casx [%0], %%g1, %%g7\n"
215" membar #StoreLoad | #StoreStore\n"
216" ba,pt %%xcc, 2f\n"
217" sub %%g1, %%g7, %0\n"
218"1: mov 1, %0\n"
219"2:" : "=r" (val)
220 : "0" (&(rw->lock))
221 : "g3", "g1", "g7", "memory");
222 if (val) {
223 /* We couldn't get the write bit. */
224 if (!--stuck) {
225 if (shown++ <= 2)
226 show_write(str, rw, caller);
227 stuck = INIT_STUCK;
228 }
229 goto wlock_again;
230 }
231 if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
232 /* Readers still around, drop the write
233 * lock, spin, and try again.
234 */
235 if (!--stuck) {
236 if (shown++ <= 2)
237 show_write(str, rw, caller);
238 stuck = INIT_STUCK;
239 }
240 __asm__ __volatile__(
241" mov 1, %%g3\n"
242" sllx %%g3, 63, %%g3\n"
243"1: ldx [%0], %%g1\n"
244" andn %%g1, %%g3, %%g7\n"
245" casx [%0], %%g1, %%g7\n"
246" cmp %%g1, %%g7\n"
247" membar #StoreLoad | #StoreStore\n"
248" bne,pn %%xcc, 1b\n"
249" nop"
250 : /* no outputs */
251 : "r" (&(rw->lock))
252 : "g3", "g1", "g7", "cc", "memory");
253 while(rw->lock != 0) {
254 if (!--stuck) {
255 if (shown++ <= 2)
256 show_write(str, rw, caller);
257 stuck = INIT_STUCK;
258 }
259 rmb();
260 }
261 goto wlock_again;
262 }
263
264 /* We have it, say who we are. */
265 rw->writer_pc = ((unsigned int)caller);
266 rw->writer_cpu = cpu;
267 current->thread.smp_lock_count++;
268 current->thread.smp_lock_pc = ((unsigned int)caller);
269
270 put_cpu();
271}
272
273void _do_write_unlock(rwlock_t *rw, unsigned long caller)
274{
275 unsigned long val;
276 int stuck = INIT_STUCK;
277 int shown = 0;
278
279 /* Drop our identity _first_ */
280 rw->writer_pc = 0;
281 rw->writer_cpu = NO_PROC_ID;
282 current->thread.smp_lock_count--;
283wlock_again:
284 __asm__ __volatile__(
285" membar #StoreLoad | #LoadLoad\n"
286" mov 1, %%g3\n"
287" sllx %%g3, 63, %%g3\n"
288" ldx [%0], %%g1\n"
289" andn %%g1, %%g3, %%g7\n"
290" casx [%0], %%g1, %%g7\n"
291" membar #StoreLoad | #StoreStore\n"
292" sub %%g1, %%g7, %0\n"
293 : "=r" (val)
294 : "0" (&(rw->lock))
295 : "g3", "g1", "g7", "memory");
296 if (val) {
297 if (!--stuck) {
298 if (shown++ <= 2)
299 show_write("write_unlock", rw, caller);
300 stuck = INIT_STUCK;
301 }
302 goto wlock_again;
303 }
304}
305
306int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
307{
308 unsigned long val;
309 int cpu = get_cpu();
310
311 /* Try to acuire the write bit. */
312 __asm__ __volatile__(
313" mov 1, %%g3\n"
314" sllx %%g3, 63, %%g3\n"
315" ldx [%0], %%g1\n"
316" brlz,pn %%g1, 1f\n"
317" or %%g1, %%g3, %%g7\n"
318" casx [%0], %%g1, %%g7\n"
319" membar #StoreLoad | #StoreStore\n"
320" ba,pt %%xcc, 2f\n"
321" sub %%g1, %%g7, %0\n"
322"1: mov 1, %0\n"
323"2:" : "=r" (val)
324 : "0" (&(rw->lock))
325 : "g3", "g1", "g7", "memory");
326
327 if (val) {
328 put_cpu();
329 return 0;
330 }
331
332 if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
333 /* Readers still around, drop the write
334 * lock, return failure.
335 */
336 __asm__ __volatile__(
337" mov 1, %%g3\n"
338" sllx %%g3, 63, %%g3\n"
339"1: ldx [%0], %%g1\n"
340" andn %%g1, %%g3, %%g7\n"
341" casx [%0], %%g1, %%g7\n"
342" cmp %%g1, %%g7\n"
343" membar #StoreLoad | #StoreStore\n"
344" bne,pn %%xcc, 1b\n"
345" nop"
346 : /* no outputs */
347 : "r" (&(rw->lock))
348 : "g3", "g1", "g7", "cc", "memory");
349
350 put_cpu();
351
352 return 0;
353 }
354
355 /* We have it, say who we are. */
356 rw->writer_pc = ((unsigned int)caller);
357 rw->writer_cpu = cpu;
358 current->thread.smp_lock_count++;
359 current->thread.smp_lock_pc = ((unsigned int)caller);
360
361 put_cpu();
362
363 return 1;
364}
365
366#endif /* CONFIG_SMP */