diff options
author | Ingo Molnar <mingo@elte.hu> | 2005-09-10 03:25:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-10 13:06:21 -0400 |
commit | fb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch) | |
tree | a006d078aa02e421a7dc4793c335308204859d36 /include/asm-ia64/spinlock.h | |
parent | 4327edf6b8a7ac7dce144313947995538842d8fd (diff) |
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ia64/spinlock.h')
-rw-r--r-- | include/asm-ia64/spinlock.h | 69 |
1 files changed, 26 insertions, 43 deletions
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index d2430aa0d49d..5b78611411c3 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h | |||
@@ -17,28 +17,20 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | typedef struct { | 20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) |
21 | volatile unsigned int lock; | ||
22 | #ifdef CONFIG_PREEMPT | ||
23 | unsigned int break_lock; | ||
24 | #endif | ||
25 | } spinlock_t; | ||
26 | |||
27 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
28 | #define spin_lock_init(x) ((x)->lock = 0) | ||
29 | 21 | ||
30 | #ifdef ASM_SUPPORTED | 22 | #ifdef ASM_SUPPORTED |
31 | /* | 23 | /* |
32 | * Try to get the lock. If we fail to get the lock, make a non-standard call to | 24 | * Try to get the lock. If we fail to get the lock, make a non-standard call to |
33 | * ia64_spinlock_contention(). We do not use a normal call because that would force all | 25 | * ia64_spinlock_contention(). We do not use a normal call because that would force all |
34 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is | 26 | * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is |
35 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". | 27 | * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". |
36 | */ | 28 | */ |
37 | 29 | ||
38 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" | 30 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" |
39 | 31 | ||
40 | static inline void | 32 | static inline void |
41 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 33 | __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) |
42 | { | 34 | { |
43 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; | 35 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; |
44 | 36 | ||
@@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
94 | #endif | 86 | #endif |
95 | } | 87 | } |
96 | 88 | ||
97 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | 89 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
98 | 90 | ||
99 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ | 91 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ |
100 | static inline void _raw_spin_unlock(spinlock_t *x) { | 92 | static inline void __raw_spin_unlock(raw_spinlock_t *x) { |
101 | barrier(); | 93 | barrier(); |
102 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); | 94 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); |
103 | } | 95 | } |
104 | 96 | ||
105 | #else /* !ASM_SUPPORTED */ | 97 | #else /* !ASM_SUPPORTED */ |
106 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 98 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
107 | # define _raw_spin_lock(x) \ | 99 | # define __raw_spin_lock(x) \ |
108 | do { \ | 100 | do { \ |
109 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 101 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
110 | __u64 ia64_spinlock_val; \ | 102 | __u64 ia64_spinlock_val; \ |
@@ -117,29 +109,20 @@ do { \ | |||
117 | } while (ia64_spinlock_val); \ | 109 | } while (ia64_spinlock_val); \ |
118 | } \ | 110 | } \ |
119 | } while (0) | 111 | } while (0) |
120 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | 112 | #define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) |
121 | #endif /* !ASM_SUPPORTED */ | 113 | #endif /* !ASM_SUPPORTED */ |
122 | 114 | ||
123 | #define spin_is_locked(x) ((x)->lock != 0) | 115 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
124 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | 116 | #define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
125 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | 117 | #define __raw_spin_unlock_wait(lock) \ |
126 | 118 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
127 | typedef struct { | ||
128 | volatile unsigned int read_counter : 24; | ||
129 | volatile unsigned int write_lock : 8; | ||
130 | #ifdef CONFIG_PREEMPT | ||
131 | unsigned int break_lock; | ||
132 | #endif | ||
133 | } rwlock_t; | ||
134 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
135 | 119 | ||
136 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
137 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
138 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) | ||
139 | 122 | ||
140 | #define _raw_read_lock(rw) \ | 123 | #define __raw_read_lock(rw) \ |
141 | do { \ | 124 | do { \ |
142 | rwlock_t *__read_lock_ptr = (rw); \ | 125 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
143 | \ | 126 | \ |
144 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 127 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
145 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 128 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
@@ -148,14 +131,14 @@ do { \ | |||
148 | } \ | 131 | } \ |
149 | } while (0) | 132 | } while (0) |
150 | 133 | ||
151 | #define _raw_read_unlock(rw) \ | 134 | #define __raw_read_unlock(rw) \ |
152 | do { \ | 135 | do { \ |
153 | rwlock_t *__read_lock_ptr = (rw); \ | 136 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
154 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 137 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
155 | } while (0) | 138 | } while (0) |
156 | 139 | ||
157 | #ifdef ASM_SUPPORTED | 140 | #ifdef ASM_SUPPORTED |
158 | #define _raw_write_lock(rw) \ | 141 | #define __raw_write_lock(rw) \ |
159 | do { \ | 142 | do { \ |
160 | __asm__ __volatile__ ( \ | 143 | __asm__ __volatile__ ( \ |
161 | "mov ar.ccv = r0\n" \ | 144 | "mov ar.ccv = r0\n" \ |
@@ -170,7 +153,7 @@ do { \ | |||
170 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | 153 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ |
171 | } while(0) | 154 | } while(0) |
172 | 155 | ||
173 | #define _raw_write_trylock(rw) \ | 156 | #define __raw_write_trylock(rw) \ |
174 | ({ \ | 157 | ({ \ |
175 | register long result; \ | 158 | register long result; \ |
176 | \ | 159 | \ |
@@ -182,7 +165,7 @@ do { \ | |||
182 | (result == 0); \ | 165 | (result == 0); \ |
183 | }) | 166 | }) |
184 | 167 | ||
185 | static inline void _raw_write_unlock(rwlock_t *x) | 168 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
186 | { | 169 | { |
187 | u8 *y = (u8 *)x; | 170 | u8 *y = (u8 *)x; |
188 | barrier(); | 171 | barrier(); |
@@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
191 | 174 | ||
192 | #else /* !ASM_SUPPORTED */ | 175 | #else /* !ASM_SUPPORTED */ |
193 | 176 | ||
194 | #define _raw_write_lock(l) \ | 177 | #define __raw_write_lock(l) \ |
195 | ({ \ | 178 | ({ \ |
196 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 179 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
197 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 180 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
@@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
202 | } while (ia64_val); \ | 185 | } while (ia64_val); \ |
203 | }) | 186 | }) |
204 | 187 | ||
205 | #define _raw_write_trylock(rw) \ | 188 | #define __raw_write_trylock(rw) \ |
206 | ({ \ | 189 | ({ \ |
207 | __u64 ia64_val; \ | 190 | __u64 ia64_val; \ |
208 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 191 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
@@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
210 | (ia64_val == 0); \ | 193 | (ia64_val == 0); \ |
211 | }) | 194 | }) |
212 | 195 | ||
213 | static inline void _raw_write_unlock(rwlock_t *x) | 196 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
214 | { | 197 | { |
215 | barrier(); | 198 | barrier(); |
216 | x->write_lock = 0; | 199 | x->write_lock = 0; |
@@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
218 | 201 | ||
219 | #endif /* !ASM_SUPPORTED */ | 202 | #endif /* !ASM_SUPPORTED */ |
220 | 203 | ||
221 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 204 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
222 | 205 | ||
223 | #endif /* _ASM_IA64_SPINLOCK_H */ | 206 | #endif /* _ASM_IA64_SPINLOCK_H */ |