aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-09-10 03:25:56 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 13:06:21 -0400
commitfb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch)
treea006d078aa02e421a7dc4793c335308204859d36 /include/asm-mips
parent4327edf6b8a7ac7dce144313947995538842d8fd (diff)
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler <grundler@parisc-linux.org> Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" <tony.luck@intel.com> ia64 fix Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjanv@infradead.org> Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se> Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/spinlock.h75
-rw-r--r--include/asm-mips/spinlock_types.h20
2 files changed, 47 insertions, 48 deletions
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index 114d3eb98a6a..4d0135b11156 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -16,20 +16,10 @@
16 * Your basic SMP spinlocks, allowing only a single CPU anywhere 16 * Your basic SMP spinlocks, allowing only a single CPU anywhere
17 */ 17 */
18 18
19typedef struct { 19#define __raw_spin_is_locked(x) ((x)->lock != 0)
20 volatile unsigned int lock; 20#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
21#ifdef CONFIG_PREEMPT 21#define __raw_spin_unlock_wait(x) \
22 unsigned int break_lock; 22 do { cpu_relax(); } while ((x)->lock)
23#endif
24} spinlock_t;
25
26#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
27
28#define spin_lock_init(x) do { (x)->lock = 0; } while(0)
29
30#define spin_is_locked(x) ((x)->lock != 0)
31#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
32#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
33 23
34/* 24/*
35 * Simple spin lock operations. There are two variants, one clears IRQ's 25 * Simple spin lock operations. There are two variants, one clears IRQ's
@@ -38,13 +28,13 @@ typedef struct {
38 * We make no fairness assumptions. They have a cost. 28 * We make no fairness assumptions. They have a cost.
39 */ 29 */
40 30
41static inline void _raw_spin_lock(spinlock_t *lock) 31static inline void __raw_spin_lock(raw_spinlock_t *lock)
42{ 32{
43 unsigned int tmp; 33 unsigned int tmp;
44 34
45 if (R10000_LLSC_WAR) { 35 if (R10000_LLSC_WAR) {
46 __asm__ __volatile__( 36 __asm__ __volatile__(
47 " .set noreorder # _raw_spin_lock \n" 37 " .set noreorder # __raw_spin_lock \n"
48 "1: ll %1, %2 \n" 38 "1: ll %1, %2 \n"
49 " bnez %1, 1b \n" 39 " bnez %1, 1b \n"
50 " li %1, 1 \n" 40 " li %1, 1 \n"
@@ -58,7 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
58 : "memory"); 48 : "memory");
59 } else { 49 } else {
60 __asm__ __volatile__( 50 __asm__ __volatile__(
61 " .set noreorder # _raw_spin_lock \n" 51 " .set noreorder # __raw_spin_lock \n"
62 "1: ll %1, %2 \n" 52 "1: ll %1, %2 \n"
63 " bnez %1, 1b \n" 53 " bnez %1, 1b \n"
64 " li %1, 1 \n" 54 " li %1, 1 \n"
@@ -72,10 +62,10 @@ static inline void _raw_spin_lock(spinlock_t *lock)
72 } 62 }
73} 63}
74 64
75static inline void _raw_spin_unlock(spinlock_t *lock) 65static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76{ 66{
77 __asm__ __volatile__( 67 __asm__ __volatile__(
78 " .set noreorder # _raw_spin_unlock \n" 68 " .set noreorder # __raw_spin_unlock \n"
79 " sync \n" 69 " sync \n"
80 " sw $0, %0 \n" 70 " sw $0, %0 \n"
81 " .set\treorder \n" 71 " .set\treorder \n"
@@ -84,13 +74,13 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
84 : "memory"); 74 : "memory");
85} 75}
86 76
87static inline unsigned int _raw_spin_trylock(spinlock_t *lock) 77static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
88{ 78{
89 unsigned int temp, res; 79 unsigned int temp, res;
90 80
91 if (R10000_LLSC_WAR) { 81 if (R10000_LLSC_WAR) {
92 __asm__ __volatile__( 82 __asm__ __volatile__(
93 " .set noreorder # _raw_spin_trylock \n" 83 " .set noreorder # __raw_spin_trylock \n"
94 "1: ll %0, %3 \n" 84 "1: ll %0, %3 \n"
95 " ori %2, %0, 1 \n" 85 " ori %2, %0, 1 \n"
96 " sc %2, %1 \n" 86 " sc %2, %1 \n"
@@ -104,7 +94,7 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
104 : "memory"); 94 : "memory");
105 } else { 95 } else {
106 __asm__ __volatile__( 96 __asm__ __volatile__(
107 " .set noreorder # _raw_spin_trylock \n" 97 " .set noreorder # __raw_spin_trylock \n"
108 "1: ll %0, %3 \n" 98 "1: ll %0, %3 \n"
109 " ori %2, %0, 1 \n" 99 " ori %2, %0, 1 \n"
110 " sc %2, %1 \n" 100 " sc %2, %1 \n"
@@ -129,24 +119,13 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
129 * read-locks. 119 * read-locks.
130 */ 120 */
131 121
132typedef struct { 122static inline void __raw_read_lock(raw_rwlock_t *rw)
133 volatile unsigned int lock;
134#ifdef CONFIG_PREEMPT
135 unsigned int break_lock;
136#endif
137} rwlock_t;
138
139#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
140
141#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
142
143static inline void _raw_read_lock(rwlock_t *rw)
144{ 123{
145 unsigned int tmp; 124 unsigned int tmp;
146 125
147 if (R10000_LLSC_WAR) { 126 if (R10000_LLSC_WAR) {
148 __asm__ __volatile__( 127 __asm__ __volatile__(
149 " .set noreorder # _raw_read_lock \n" 128 " .set noreorder # __raw_read_lock \n"
150 "1: ll %1, %2 \n" 129 "1: ll %1, %2 \n"
151 " bltz %1, 1b \n" 130 " bltz %1, 1b \n"
152 " addu %1, 1 \n" 131 " addu %1, 1 \n"
@@ -160,7 +139,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
160 : "memory"); 139 : "memory");
161 } else { 140 } else {
162 __asm__ __volatile__( 141 __asm__ __volatile__(
163 " .set noreorder # _raw_read_lock \n" 142 " .set noreorder # __raw_read_lock \n"
164 "1: ll %1, %2 \n" 143 "1: ll %1, %2 \n"
165 " bltz %1, 1b \n" 144 " bltz %1, 1b \n"
166 " addu %1, 1 \n" 145 " addu %1, 1 \n"
@@ -177,13 +156,13 @@ static inline void _raw_read_lock(rwlock_t *rw)
177/* Note the use of sub, not subu which will make the kernel die with an 156/* Note the use of sub, not subu which will make the kernel die with an
178 overflow exception if we ever try to unlock an rwlock that is already 157 overflow exception if we ever try to unlock an rwlock that is already
179 unlocked or is being held by a writer. */ 158 unlocked or is being held by a writer. */
180static inline void _raw_read_unlock(rwlock_t *rw) 159static inline void __raw_read_unlock(raw_rwlock_t *rw)
181{ 160{
182 unsigned int tmp; 161 unsigned int tmp;
183 162
184 if (R10000_LLSC_WAR) { 163 if (R10000_LLSC_WAR) {
185 __asm__ __volatile__( 164 __asm__ __volatile__(
186 "1: ll %1, %2 # _raw_read_unlock \n" 165 "1: ll %1, %2 # __raw_read_unlock \n"
187 " sub %1, 1 \n" 166 " sub %1, 1 \n"
188 " sc %1, %0 \n" 167 " sc %1, %0 \n"
189 " beqzl %1, 1b \n" 168 " beqzl %1, 1b \n"
@@ -193,7 +172,7 @@ static inline void _raw_read_unlock(rwlock_t *rw)
193 : "memory"); 172 : "memory");
194 } else { 173 } else {
195 __asm__ __volatile__( 174 __asm__ __volatile__(
196 " .set noreorder # _raw_read_unlock \n" 175 " .set noreorder # __raw_read_unlock \n"
197 "1: ll %1, %2 \n" 176 "1: ll %1, %2 \n"
198 " sub %1, 1 \n" 177 " sub %1, 1 \n"
199 " sc %1, %0 \n" 178 " sc %1, %0 \n"
@@ -206,13 +185,13 @@ static inline void _raw_read_unlock(rwlock_t *rw)
206 } 185 }
207} 186}
208 187
209static inline void _raw_write_lock(rwlock_t *rw) 188static inline void __raw_write_lock(raw_rwlock_t *rw)
210{ 189{
211 unsigned int tmp; 190 unsigned int tmp;
212 191
213 if (R10000_LLSC_WAR) { 192 if (R10000_LLSC_WAR) {
214 __asm__ __volatile__( 193 __asm__ __volatile__(
215 " .set noreorder # _raw_write_lock \n" 194 " .set noreorder # __raw_write_lock \n"
216 "1: ll %1, %2 \n" 195 "1: ll %1, %2 \n"
217 " bnez %1, 1b \n" 196 " bnez %1, 1b \n"
218 " lui %1, 0x8000 \n" 197 " lui %1, 0x8000 \n"
@@ -226,7 +205,7 @@ static inline void _raw_write_lock(rwlock_t *rw)
226 : "memory"); 205 : "memory");
227 } else { 206 } else {
228 __asm__ __volatile__( 207 __asm__ __volatile__(
229 " .set noreorder # _raw_write_lock \n" 208 " .set noreorder # __raw_write_lock \n"
230 "1: ll %1, %2 \n" 209 "1: ll %1, %2 \n"
231 " bnez %1, 1b \n" 210 " bnez %1, 1b \n"
232 " lui %1, 0x8000 \n" 211 " lui %1, 0x8000 \n"
@@ -241,26 +220,26 @@ static inline void _raw_write_lock(rwlock_t *rw)
241 } 220 }
242} 221}
243 222
244static inline void _raw_write_unlock(rwlock_t *rw) 223static inline void __raw_write_unlock(raw_rwlock_t *rw)
245{ 224{
246 __asm__ __volatile__( 225 __asm__ __volatile__(
247 " sync # _raw_write_unlock \n" 226 " sync # __raw_write_unlock \n"
248 " sw $0, %0 \n" 227 " sw $0, %0 \n"
249 : "=m" (rw->lock) 228 : "=m" (rw->lock)
250 : "m" (rw->lock) 229 : "m" (rw->lock)
251 : "memory"); 230 : "memory");
252} 231}
253 232
254#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) 233#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
255 234
256static inline int _raw_write_trylock(rwlock_t *rw) 235static inline int __raw_write_trylock(raw_rwlock_t *rw)
257{ 236{
258 unsigned int tmp; 237 unsigned int tmp;
259 int ret; 238 int ret;
260 239
261 if (R10000_LLSC_WAR) { 240 if (R10000_LLSC_WAR) {
262 __asm__ __volatile__( 241 __asm__ __volatile__(
263 " .set noreorder # _raw_write_trylock \n" 242 " .set noreorder # __raw_write_trylock \n"
264 " li %2, 0 \n" 243 " li %2, 0 \n"
265 "1: ll %1, %3 \n" 244 "1: ll %1, %3 \n"
266 " bnez %1, 2f \n" 245 " bnez %1, 2f \n"
@@ -277,7 +256,7 @@ static inline int _raw_write_trylock(rwlock_t *rw)
277 : "memory"); 256 : "memory");
278 } else { 257 } else {
279 __asm__ __volatile__( 258 __asm__ __volatile__(
280 " .set noreorder # _raw_write_trylock \n" 259 " .set noreorder # __raw_write_trylock \n"
281 " li %2, 0 \n" 260 " li %2, 0 \n"
282 "1: ll %1, %3 \n" 261 "1: ll %1, %3 \n"
283 " bnez %1, 2f \n" 262 " bnez %1, 2f \n"
diff --git a/include/asm-mips/spinlock_types.h b/include/asm-mips/spinlock_types.h
new file mode 100644
index 000000000000..ce26c5048b15
--- /dev/null
+++ b/include/asm-mips/spinlock_types.h
@@ -0,0 +1,20 @@
1#ifndef _ASM_SPINLOCK_TYPES_H
2#define _ASM_SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct {
9 volatile unsigned int lock;
10} raw_spinlock_t;
11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13
14typedef struct {
15 volatile unsigned int lock;
16} raw_rwlock_t;
17
18#define __RAW_RW_LOCK_UNLOCKED { 0 }
19
20#endif