diff options
author | Ingo Molnar <mingo@elte.hu> | 2005-09-10 03:25:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-10 13:06:21 -0400 |
commit | fb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch) | |
tree | a006d078aa02e421a7dc4793c335308204859d36 /include/linux/spinlock.h | |
parent | 4327edf6b8a7ac7dce144313947995538842d8fd (diff) |
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r-- | include/linux/spinlock.h | 627 |
1 files changed, 125 insertions, 502 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d6ba068719b6..cdc99a27840d 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -2,7 +2,48 @@ | |||
2 | #define __LINUX_SPINLOCK_H | 2 | #define __LINUX_SPINLOCK_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * include/linux/spinlock.h - generic locking declarations | 5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
6 | * | ||
7 | * here's the role of the various spinlock/rwlock related include files: | ||
8 | * | ||
9 | * on SMP builds: | ||
10 | * | ||
11 | * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the | ||
12 | * initializers | ||
13 | * | ||
14 | * linux/spinlock_types.h: | ||
15 | * defines the generic type and initializers | ||
16 | * | ||
17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | ||
18 | * implementations, mostly inline assembly code | ||
19 | * | ||
20 | * (also included on UP-debug builds:) | ||
21 | * | ||
22 | * linux/spinlock_api_smp.h: | ||
23 | * contains the prototypes for the _spin_*() APIs. | ||
24 | * | ||
25 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
26 | * | ||
27 | * on UP builds: | ||
28 | * | ||
29 | * linux/spinlock_type_up.h: | ||
30 | * contains the generic, simplified UP spinlock type. | ||
31 | * (which is an empty structure on non-debug builds) | ||
32 | * | ||
33 | * linux/spinlock_types.h: | ||
34 | * defines the generic type and initializers | ||
35 | * | ||
36 | * linux/spinlock_up.h: | ||
37 | * contains the __raw_spin_*()/etc. version of UP | ||
38 | * builds. (which are NOPs on non-debug, non-preempt | ||
39 | * builds) | ||
40 | * | ||
41 | * (included on UP-non-debug builds:) | ||
42 | * | ||
43 | * linux/spinlock_api_up.h: | ||
44 | * builds the _spin_*() APIs. | ||
45 | * | ||
46 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
6 | */ | 47 | */ |
7 | 48 | ||
8 | #include <linux/config.h> | 49 | #include <linux/config.h> |
@@ -13,7 +54,6 @@ | |||
13 | #include <linux/kernel.h> | 54 | #include <linux/kernel.h> |
14 | #include <linux/stringify.h> | 55 | #include <linux/stringify.h> |
15 | 56 | ||
16 | #include <asm/processor.h> /* for cpu relax */ | ||
17 | #include <asm/system.h> | 57 | #include <asm/system.h> |
18 | 58 | ||
19 | /* | 59 | /* |
@@ -35,423 +75,84 @@ | |||
35 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) | 75 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) |
36 | 76 | ||
37 | /* | 77 | /* |
38 | * If CONFIG_SMP is set, pull in the _raw_* definitions | 78 | * Pull the raw_spinlock_t and raw_rwlock_t definitions: |
39 | */ | 79 | */ |
40 | #ifdef CONFIG_SMP | 80 | #include <linux/spinlock_types.h> |
41 | |||
42 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
43 | #include <asm/spinlock.h> | ||
44 | |||
45 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
46 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
47 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
48 | |||
49 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
50 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
51 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
52 | |||
53 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
54 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
55 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
56 | |||
57 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t); | ||
58 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
59 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
60 | |||
61 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
62 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
63 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
64 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
65 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
66 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
67 | |||
68 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t); | ||
69 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
70 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
71 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
72 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
73 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
74 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
75 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
76 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
77 | |||
78 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
79 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock); | ||
80 | int in_lock_functions(unsigned long addr); | ||
81 | |||
82 | #else | ||
83 | 81 | ||
84 | #define in_lock_functions(ADDR) 0 | 82 | extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); |
85 | 83 | ||
86 | #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
87 | # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) | ||
88 | # define ATOMIC_DEC_AND_LOCK | ||
89 | #endif | ||
90 | |||
91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
92 | |||
93 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
94 | typedef struct { | ||
95 | unsigned long magic; | ||
96 | volatile unsigned long lock; | ||
97 | volatile unsigned int babble; | ||
98 | const char *module; | ||
99 | char *owner; | ||
100 | int oline; | ||
101 | } spinlock_t; | ||
102 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} | ||
103 | |||
104 | #define spin_lock_init(x) \ | ||
105 | do { \ | ||
106 | (x)->magic = SPINLOCK_MAGIC; \ | ||
107 | (x)->lock = 0; \ | ||
108 | (x)->babble = 5; \ | ||
109 | (x)->module = __FILE__; \ | ||
110 | (x)->owner = NULL; \ | ||
111 | (x)->oline = 0; \ | ||
112 | } while (0) | ||
113 | |||
114 | #define CHECK_LOCK(x) \ | ||
115 | do { \ | ||
116 | if ((x)->magic != SPINLOCK_MAGIC) { \ | ||
117 | printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \ | ||
118 | __FILE__, __LINE__, (x)); \ | ||
119 | } \ | ||
120 | } while(0) | ||
121 | |||
122 | #define _raw_spin_lock(x) \ | ||
123 | do { \ | ||
124 | CHECK_LOCK(x); \ | ||
125 | if ((x)->lock&&(x)->babble) { \ | ||
126 | (x)->babble--; \ | ||
127 | printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \ | ||
128 | __FILE__,__LINE__, (x)->module, \ | ||
129 | (x), (x)->owner, (x)->oline); \ | ||
130 | } \ | ||
131 | (x)->lock = 1; \ | ||
132 | (x)->owner = __FILE__; \ | ||
133 | (x)->oline = __LINE__; \ | ||
134 | } while (0) | ||
135 | |||
136 | /* without debugging, spin_is_locked on UP always says | ||
137 | * FALSE. --> printk if already locked. */ | ||
138 | #define spin_is_locked(x) \ | ||
139 | ({ \ | ||
140 | CHECK_LOCK(x); \ | ||
141 | if ((x)->lock&&(x)->babble) { \ | ||
142 | (x)->babble--; \ | ||
143 | printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \ | ||
144 | __FILE__,__LINE__, (x)->module, \ | ||
145 | (x), (x)->owner, (x)->oline); \ | ||
146 | } \ | ||
147 | 0; \ | ||
148 | }) | ||
149 | |||
150 | /* with debugging, assert_spin_locked() on UP does check | ||
151 | * the lock value properly */ | ||
152 | #define assert_spin_locked(x) \ | ||
153 | ({ \ | ||
154 | CHECK_LOCK(x); \ | ||
155 | BUG_ON(!(x)->lock); \ | ||
156 | }) | ||
157 | |||
158 | /* without debugging, spin_trylock on UP always says | ||
159 | * TRUE. --> printk if already locked. */ | ||
160 | #define _raw_spin_trylock(x) \ | ||
161 | ({ \ | ||
162 | CHECK_LOCK(x); \ | ||
163 | if ((x)->lock&&(x)->babble) { \ | ||
164 | (x)->babble--; \ | ||
165 | printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \ | ||
166 | __FILE__,__LINE__, (x)->module, \ | ||
167 | (x), (x)->owner, (x)->oline); \ | ||
168 | } \ | ||
169 | (x)->lock = 1; \ | ||
170 | (x)->owner = __FILE__; \ | ||
171 | (x)->oline = __LINE__; \ | ||
172 | 1; \ | ||
173 | }) | ||
174 | |||
175 | #define spin_unlock_wait(x) \ | ||
176 | do { \ | ||
177 | CHECK_LOCK(x); \ | ||
178 | if ((x)->lock&&(x)->babble) { \ | ||
179 | (x)->babble--; \ | ||
180 | printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \ | ||
181 | __FILE__,__LINE__, (x)->module, (x), \ | ||
182 | (x)->owner, (x)->oline); \ | ||
183 | }\ | ||
184 | } while (0) | ||
185 | |||
186 | #define _raw_spin_unlock(x) \ | ||
187 | do { \ | ||
188 | CHECK_LOCK(x); \ | ||
189 | if (!(x)->lock&&(x)->babble) { \ | ||
190 | (x)->babble--; \ | ||
191 | printk("%s:%d: spin_unlock(%s:%p) not locked\n", \ | ||
192 | __FILE__,__LINE__, (x)->module, (x));\ | ||
193 | } \ | ||
194 | (x)->lock = 0; \ | ||
195 | } while (0) | ||
196 | #else | ||
197 | /* | 84 | /* |
198 | * gcc versions before ~2.95 have a nasty bug with empty initializers. | 85 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
199 | */ | 86 | */ |
200 | #if (__GNUC__ > 2) | 87 | #if defined(CONFIG_SMP) |
201 | typedef struct { } spinlock_t; | 88 | # include <asm/spinlock.h> |
202 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { } | ||
203 | #else | 89 | #else |
204 | typedef struct { int gcc_is_buggy; } spinlock_t; | 90 | # include <linux/spinlock_up.h> |
205 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
206 | #endif | 91 | #endif |
207 | 92 | ||
93 | #define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | ||
94 | #define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | ||
95 | |||
96 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | ||
97 | |||
98 | /** | ||
99 | * spin_unlock_wait - wait until the spinlock gets unlocked | ||
100 | * @lock: the spinlock in question. | ||
101 | */ | ||
102 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | ||
103 | |||
208 | /* | 104 | /* |
209 | * If CONFIG_SMP is unset, declare the _raw_* definitions as nops | 105 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
210 | */ | 106 | */ |
211 | #define spin_lock_init(lock) do { (void)(lock); } while(0) | 107 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
212 | #define _raw_spin_lock(lock) do { (void)(lock); } while(0) | 108 | # include <linux/spinlock_api_smp.h> |
213 | #define spin_is_locked(lock) ((void)(lock), 0) | ||
214 | #define assert_spin_locked(lock) do { (void)(lock); } while(0) | ||
215 | #define _raw_spin_trylock(lock) (((void)(lock), 1)) | ||
216 | #define spin_unlock_wait(lock) (void)(lock) | ||
217 | #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) | ||
218 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
219 | |||
220 | /* RW spinlocks: No debug version */ | ||
221 | |||
222 | #if (__GNUC__ > 2) | ||
223 | typedef struct { } rwlock_t; | ||
224 | #define RW_LOCK_UNLOCKED (rwlock_t) { } | ||
225 | #else | 109 | #else |
226 | typedef struct { int gcc_is_buggy; } rwlock_t; | 110 | # include <linux/spinlock_api_up.h> |
227 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
228 | #endif | 111 | #endif |
229 | 112 | ||
230 | #define rwlock_init(lock) do { (void)(lock); } while(0) | 113 | #ifdef CONFIG_DEBUG_SPINLOCK |
231 | #define _raw_read_lock(lock) do { (void)(lock); } while(0) | 114 | extern void _raw_spin_lock(spinlock_t *lock); |
232 | #define _raw_read_unlock(lock) do { (void)(lock); } while(0) | 115 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
233 | #define _raw_write_lock(lock) do { (void)(lock); } while(0) | 116 | extern int _raw_spin_trylock(spinlock_t *lock); |
234 | #define _raw_write_unlock(lock) do { (void)(lock); } while(0) | 117 | extern void _raw_spin_unlock(spinlock_t *lock); |
235 | #define read_can_lock(lock) (((void)(lock), 1)) | 118 | |
236 | #define write_can_lock(lock) (((void)(lock), 1)) | 119 | extern void _raw_read_lock(rwlock_t *lock); |
237 | #define _raw_read_trylock(lock) ({ (void)(lock); (1); }) | 120 | extern int _raw_read_trylock(rwlock_t *lock); |
238 | #define _raw_write_trylock(lock) ({ (void)(lock); (1); }) | 121 | extern void _raw_read_unlock(rwlock_t *lock); |
239 | 122 | extern void _raw_write_lock(rwlock_t *lock); | |
240 | #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ | 123 | extern int _raw_write_trylock(rwlock_t *lock); |
241 | 1 : ({preempt_enable(); 0;});}) | 124 | extern void _raw_write_unlock(rwlock_t *lock); |
242 | 125 | #else | |
243 | #define _read_trylock(lock) ({preempt_disable();_raw_read_trylock(lock) ? \ | 126 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
244 | 1 : ({preempt_enable(); 0;});}) | 127 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
245 | 128 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | |
246 | #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \ | 129 | # define _raw_spin_lock_flags(lock, flags) \ |
247 | 1 : ({preempt_enable(); 0;});}) | 130 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
248 | 131 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | |
249 | #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ | 132 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
250 | _raw_spin_trylock(lock) ? \ | 133 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
251 | 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});}) | 134 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
252 | 135 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | |
253 | #define _spin_lock(lock) \ | 136 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
254 | do { \ | 137 | #endif |
255 | preempt_disable(); \ | ||
256 | _raw_spin_lock(lock); \ | ||
257 | __acquire(lock); \ | ||
258 | } while(0) | ||
259 | |||
260 | #define _write_lock(lock) \ | ||
261 | do { \ | ||
262 | preempt_disable(); \ | ||
263 | _raw_write_lock(lock); \ | ||
264 | __acquire(lock); \ | ||
265 | } while(0) | ||
266 | |||
267 | #define _read_lock(lock) \ | ||
268 | do { \ | ||
269 | preempt_disable(); \ | ||
270 | _raw_read_lock(lock); \ | ||
271 | __acquire(lock); \ | ||
272 | } while(0) | ||
273 | |||
274 | #define _spin_unlock(lock) \ | ||
275 | do { \ | ||
276 | _raw_spin_unlock(lock); \ | ||
277 | preempt_enable(); \ | ||
278 | __release(lock); \ | ||
279 | } while (0) | ||
280 | |||
281 | #define _write_unlock(lock) \ | ||
282 | do { \ | ||
283 | _raw_write_unlock(lock); \ | ||
284 | preempt_enable(); \ | ||
285 | __release(lock); \ | ||
286 | } while(0) | ||
287 | |||
288 | #define _read_unlock(lock) \ | ||
289 | do { \ | ||
290 | _raw_read_unlock(lock); \ | ||
291 | preempt_enable(); \ | ||
292 | __release(lock); \ | ||
293 | } while(0) | ||
294 | |||
295 | #define _spin_lock_irqsave(lock, flags) \ | ||
296 | do { \ | ||
297 | local_irq_save(flags); \ | ||
298 | preempt_disable(); \ | ||
299 | _raw_spin_lock(lock); \ | ||
300 | __acquire(lock); \ | ||
301 | } while (0) | ||
302 | |||
303 | #define _spin_lock_irq(lock) \ | ||
304 | do { \ | ||
305 | local_irq_disable(); \ | ||
306 | preempt_disable(); \ | ||
307 | _raw_spin_lock(lock); \ | ||
308 | __acquire(lock); \ | ||
309 | } while (0) | ||
310 | |||
311 | #define _spin_lock_bh(lock) \ | ||
312 | do { \ | ||
313 | local_bh_disable(); \ | ||
314 | preempt_disable(); \ | ||
315 | _raw_spin_lock(lock); \ | ||
316 | __acquire(lock); \ | ||
317 | } while (0) | ||
318 | |||
319 | #define _read_lock_irqsave(lock, flags) \ | ||
320 | do { \ | ||
321 | local_irq_save(flags); \ | ||
322 | preempt_disable(); \ | ||
323 | _raw_read_lock(lock); \ | ||
324 | __acquire(lock); \ | ||
325 | } while (0) | ||
326 | |||
327 | #define _read_lock_irq(lock) \ | ||
328 | do { \ | ||
329 | local_irq_disable(); \ | ||
330 | preempt_disable(); \ | ||
331 | _raw_read_lock(lock); \ | ||
332 | __acquire(lock); \ | ||
333 | } while (0) | ||
334 | |||
335 | #define _read_lock_bh(lock) \ | ||
336 | do { \ | ||
337 | local_bh_disable(); \ | ||
338 | preempt_disable(); \ | ||
339 | _raw_read_lock(lock); \ | ||
340 | __acquire(lock); \ | ||
341 | } while (0) | ||
342 | |||
343 | #define _write_lock_irqsave(lock, flags) \ | ||
344 | do { \ | ||
345 | local_irq_save(flags); \ | ||
346 | preempt_disable(); \ | ||
347 | _raw_write_lock(lock); \ | ||
348 | __acquire(lock); \ | ||
349 | } while (0) | ||
350 | 138 | ||
351 | #define _write_lock_irq(lock) \ | 139 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
352 | do { \ | 140 | #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
353 | local_irq_disable(); \ | ||
354 | preempt_disable(); \ | ||
355 | _raw_write_lock(lock); \ | ||
356 | __acquire(lock); \ | ||
357 | } while (0) | ||
358 | |||
359 | #define _write_lock_bh(lock) \ | ||
360 | do { \ | ||
361 | local_bh_disable(); \ | ||
362 | preempt_disable(); \ | ||
363 | _raw_write_lock(lock); \ | ||
364 | __acquire(lock); \ | ||
365 | } while (0) | ||
366 | |||
367 | #define _spin_unlock_irqrestore(lock, flags) \ | ||
368 | do { \ | ||
369 | _raw_spin_unlock(lock); \ | ||
370 | local_irq_restore(flags); \ | ||
371 | preempt_enable(); \ | ||
372 | __release(lock); \ | ||
373 | } while (0) | ||
374 | |||
375 | #define _spin_unlock_irq(lock) \ | ||
376 | do { \ | ||
377 | _raw_spin_unlock(lock); \ | ||
378 | local_irq_enable(); \ | ||
379 | preempt_enable(); \ | ||
380 | __release(lock); \ | ||
381 | } while (0) | ||
382 | |||
383 | #define _spin_unlock_bh(lock) \ | ||
384 | do { \ | ||
385 | _raw_spin_unlock(lock); \ | ||
386 | preempt_enable_no_resched(); \ | ||
387 | local_bh_enable(); \ | ||
388 | __release(lock); \ | ||
389 | } while (0) | ||
390 | |||
391 | #define _write_unlock_bh(lock) \ | ||
392 | do { \ | ||
393 | _raw_write_unlock(lock); \ | ||
394 | preempt_enable_no_resched(); \ | ||
395 | local_bh_enable(); \ | ||
396 | __release(lock); \ | ||
397 | } while (0) | ||
398 | |||
399 | #define _read_unlock_irqrestore(lock, flags) \ | ||
400 | do { \ | ||
401 | _raw_read_unlock(lock); \ | ||
402 | local_irq_restore(flags); \ | ||
403 | preempt_enable(); \ | ||
404 | __release(lock); \ | ||
405 | } while (0) | ||
406 | |||
407 | #define _write_unlock_irqrestore(lock, flags) \ | ||
408 | do { \ | ||
409 | _raw_write_unlock(lock); \ | ||
410 | local_irq_restore(flags); \ | ||
411 | preempt_enable(); \ | ||
412 | __release(lock); \ | ||
413 | } while (0) | ||
414 | |||
415 | #define _read_unlock_irq(lock) \ | ||
416 | do { \ | ||
417 | _raw_read_unlock(lock); \ | ||
418 | local_irq_enable(); \ | ||
419 | preempt_enable(); \ | ||
420 | __release(lock); \ | ||
421 | } while (0) | ||
422 | |||
423 | #define _read_unlock_bh(lock) \ | ||
424 | do { \ | ||
425 | _raw_read_unlock(lock); \ | ||
426 | preempt_enable_no_resched(); \ | ||
427 | local_bh_enable(); \ | ||
428 | __release(lock); \ | ||
429 | } while (0) | ||
430 | |||
431 | #define _write_unlock_irq(lock) \ | ||
432 | do { \ | ||
433 | _raw_write_unlock(lock); \ | ||
434 | local_irq_enable(); \ | ||
435 | preempt_enable(); \ | ||
436 | __release(lock); \ | ||
437 | } while (0) | ||
438 | |||
439 | #endif /* !SMP */ | ||
440 | 141 | ||
441 | /* | 142 | /* |
442 | * Define the various spin_lock and rw_lock methods. Note we define these | 143 | * Define the various spin_lock and rw_lock methods. Note we define these |
443 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 144 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
444 | * methods are defined as nops in the case they are not required. | 145 | * methods are defined as nops in the case they are not required. |
445 | */ | 146 | */ |
446 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) | 147 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) |
447 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) | 148 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) |
448 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) | 149 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) |
449 | 150 | ||
450 | #define spin_lock(lock) _spin_lock(lock) | 151 | #define spin_lock(lock) _spin_lock(lock) |
451 | #define write_lock(lock) _write_lock(lock) | 152 | #define write_lock(lock) _write_lock(lock) |
452 | #define read_lock(lock) _read_lock(lock) | 153 | #define read_lock(lock) _read_lock(lock) |
453 | 154 | ||
454 | #ifdef CONFIG_SMP | 155 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
455 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) | 156 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) |
456 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) | 157 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) |
457 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) | 158 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) |
@@ -470,137 +171,59 @@ do { \ | |||
470 | #define write_lock_irq(lock) _write_lock_irq(lock) | 171 | #define write_lock_irq(lock) _write_lock_irq(lock) |
471 | #define write_lock_bh(lock) _write_lock_bh(lock) | 172 | #define write_lock_bh(lock) _write_lock_bh(lock) |
472 | 173 | ||
473 | #define spin_unlock(lock) _spin_unlock(lock) | 174 | #define spin_unlock(lock) _spin_unlock(lock) |
474 | #define write_unlock(lock) _write_unlock(lock) | 175 | #define write_unlock(lock) _write_unlock(lock) |
475 | #define read_unlock(lock) _read_unlock(lock) | 176 | #define read_unlock(lock) _read_unlock(lock) |
476 | 177 | ||
477 | #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags) | 178 | #define spin_unlock_irqrestore(lock, flags) \ |
179 | _spin_unlock_irqrestore(lock, flags) | ||
478 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | 180 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
479 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | 181 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
480 | 182 | ||
481 | #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags) | 183 | #define read_unlock_irqrestore(lock, flags) \ |
482 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | 184 | _read_unlock_irqrestore(lock, flags) |
483 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | 185 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
186 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | ||
484 | 187 | ||
485 | #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags) | 188 | #define write_unlock_irqrestore(lock, flags) \ |
486 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | 189 | _write_unlock_irqrestore(lock, flags) |
487 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | 190 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
191 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | ||
488 | 192 | ||
489 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) | 193 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) |
490 | 194 | ||
491 | #define spin_trylock_irq(lock) \ | 195 | #define spin_trylock_irq(lock) \ |
492 | ({ \ | 196 | ({ \ |
493 | local_irq_disable(); \ | 197 | local_irq_disable(); \ |
494 | _spin_trylock(lock) ? \ | 198 | _spin_trylock(lock) ? \ |
495 | 1 : ({local_irq_enable(); 0; }); \ | 199 | 1 : ({ local_irq_enable(); 0; }); \ |
496 | }) | 200 | }) |
497 | 201 | ||
498 | #define spin_trylock_irqsave(lock, flags) \ | 202 | #define spin_trylock_irqsave(lock, flags) \ |
499 | ({ \ | 203 | ({ \ |
500 | local_irq_save(flags); \ | 204 | local_irq_save(flags); \ |
501 | _spin_trylock(lock) ? \ | 205 | _spin_trylock(lock) ? \ |
502 | 1 : ({local_irq_restore(flags); 0;}); \ | 206 | 1 : ({ local_irq_restore(flags); 0; }); \ |
503 | }) | 207 | }) |
504 | 208 | ||
505 | #ifdef CONFIG_LOCKMETER | ||
506 | extern void _metered_spin_lock (spinlock_t *lock); | ||
507 | extern void _metered_spin_unlock (spinlock_t *lock); | ||
508 | extern int _metered_spin_trylock(spinlock_t *lock); | ||
509 | extern void _metered_read_lock (rwlock_t *lock); | ||
510 | extern void _metered_read_unlock (rwlock_t *lock); | ||
511 | extern void _metered_write_lock (rwlock_t *lock); | ||
512 | extern void _metered_write_unlock (rwlock_t *lock); | ||
513 | extern int _metered_read_trylock (rwlock_t *lock); | ||
514 | extern int _metered_write_trylock(rwlock_t *lock); | ||
515 | #endif | ||
516 | |||
517 | /* "lock on reference count zero" */ | ||
518 | #ifndef ATOMIC_DEC_AND_LOCK | ||
519 | #include <asm/atomic.h> | ||
520 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | ||
521 | #endif | ||
522 | |||
523 | #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock)) | ||
524 | |||
525 | /* | ||
526 | * bit-based spin_lock() | ||
527 | * | ||
528 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
529 | * are significantly faster. | ||
530 | */ | ||
531 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
532 | { | ||
533 | /* | ||
534 | * Assuming the lock is uncontended, this never enters | ||
535 | * the body of the outer loop. If it is contended, then | ||
536 | * within the inner loop a non-atomic test is used to | ||
537 | * busywait with less bus contention for a good time to | ||
538 | * attempt to acquire the lock bit. | ||
539 | */ | ||
540 | preempt_disable(); | ||
541 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
542 | while (test_and_set_bit(bitnum, addr)) { | ||
543 | while (test_bit(bitnum, addr)) { | ||
544 | preempt_enable(); | ||
545 | cpu_relax(); | ||
546 | preempt_disable(); | ||
547 | } | ||
548 | } | ||
549 | #endif | ||
550 | __acquire(bitlock); | ||
551 | } | ||
552 | |||
553 | /* | 209 | /* |
554 | * Return true if it was acquired | 210 | * Pull the atomic_t declaration: |
211 | * (asm-mips/atomic.h needs above definitions) | ||
555 | */ | 212 | */ |
556 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | 213 | #include <asm/atomic.h> |
557 | { | 214 | /** |
558 | preempt_disable(); | 215 | * atomic_dec_and_lock - lock on reaching reference count zero |
559 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 216 | * @atomic: the atomic counter |
560 | if (test_and_set_bit(bitnum, addr)) { | 217 | * @lock: the spinlock in question |
561 | preempt_enable(); | ||
562 | return 0; | ||
563 | } | ||
564 | #endif | ||
565 | __acquire(bitlock); | ||
566 | return 1; | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * bit-based spin_unlock() | ||
571 | */ | ||
572 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
573 | { | ||
574 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
575 | BUG_ON(!test_bit(bitnum, addr)); | ||
576 | smp_mb__before_clear_bit(); | ||
577 | clear_bit(bitnum, addr); | ||
578 | #endif | ||
579 | preempt_enable(); | ||
580 | __release(bitlock); | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * Return true if the lock is held. | ||
585 | */ | 218 | */ |
586 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | 219 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
587 | { | 220 | #define atomic_dec_and_lock(atomic, lock) \ |
588 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 221 | __cond_lock(_atomic_dec_and_lock(atomic, lock)) |
589 | return test_bit(bitnum, addr); | ||
590 | #elif defined CONFIG_PREEMPT | ||
591 | return preempt_count(); | ||
592 | #else | ||
593 | return 1; | ||
594 | #endif | ||
595 | } | ||
596 | |||
597 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
598 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
599 | 222 | ||
600 | /** | 223 | /** |
601 | * spin_can_lock - would spin_trylock() succeed? | 224 | * spin_can_lock - would spin_trylock() succeed? |
602 | * @lock: the spinlock in question. | 225 | * @lock: the spinlock in question. |
603 | */ | 226 | */ |
604 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 227 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
605 | 228 | ||
606 | #endif /* __LINUX_SPINLOCK_H */ | 229 | #endif /* __LINUX_SPINLOCK_H */ |