aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-02 14:02:59 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:32 -0500
commitc2f21ce2e31286a0a32f8da0a7856e9ca1122ef3 (patch)
tree6cc8d1fd37ffa6d02481353857b92734241f4dd0
parente5931943d02bf751b1ec849c0d2ade23d76a8d41 (diff)
locking: Implement new raw_spinlock
Now that the raw_spin name space is freed up, we can implement raw_spinlock and the related functions which are used to annotate the locks which are not converted to sleeping spinlocks in preempt-rt. A side effect is that only such locks can be used with the low level lock fsunctions which circumvent lockdep. For !rt spin_* functions are mapped to the raw_spin* implementations. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/spinlock.h258
-rw-r--r--include/linux/spinlock_api_smp.h51
-rw-r--r--include/linux/spinlock_api_up.h2
-rw-r--r--include/linux/spinlock_types.h49
-rw-r--r--kernel/mutex-debug.h12
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/spinlock.c34
-rw-r--r--lib/spinlock_debug.c24
8 files changed, 297 insertions, 135 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 53bc2213b414..ef5a55d96b9b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -80,7 +80,7 @@
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82/* 82/*
83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
84 */ 84 */
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86# include <asm/spinlock.h> 86# include <asm/spinlock.h>
@@ -89,30 +89,30 @@
89#endif 89#endif
90 90
91#ifdef CONFIG_DEBUG_SPINLOCK 91#ifdef CONFIG_DEBUG_SPINLOCK
92 extern void __spin_lock_init(spinlock_t *lock, const char *name, 92 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
93 struct lock_class_key *key); 93 struct lock_class_key *key);
94# define spin_lock_init(lock) \ 94# define raw_spin_lock_init(lock) \
95do { \ 95do { \
96 static struct lock_class_key __key; \ 96 static struct lock_class_key __key; \
97 \ 97 \
98 __spin_lock_init((lock), #lock, &__key); \ 98 __raw_spin_lock_init((lock), #lock, &__key); \
99} while (0) 99} while (0)
100 100
101#else 101#else
102# define spin_lock_init(lock) \ 102# define raw_spin_lock_init(lock) \
103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) 103 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
104#endif 104#endif
105 105
106#define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 106#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
107 107
108#ifdef CONFIG_GENERIC_LOCKBREAK 108#ifdef CONFIG_GENERIC_LOCKBREAK
109#define spin_is_contended(lock) ((lock)->break_lock) 109#define raw_spin_is_contended(lock) ((lock)->break_lock)
110#else 110#else
111 111
112#ifdef arch_spin_is_contended 112#ifdef arch_spin_is_contended
113#define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 113#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
114#else 114#else
115#define spin_is_contended(lock) (((void)(lock), 0)) 115#define raw_spin_is_contended(lock) (((void)(lock), 0))
116#endif /*arch_spin_is_contended*/ 116#endif /*arch_spin_is_contended*/
117#endif 117#endif
118 118
@@ -122,22 +122,37 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
122#endif 122#endif
123 123
124/** 124/**
125 * spin_unlock_wait - wait until the spinlock gets unlocked 125 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
126 * @lock: the spinlock in question. 126 * @lock: the spinlock in question.
127 */ 127 */
128#define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
129 129
130#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
131 extern void _raw_spin_lock(spinlock_t *lock); 131 extern void _raw_spin_lock(raw_spinlock_t *lock);
132#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 132#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
133 extern int _raw_spin_trylock(spinlock_t *lock); 133 extern int _raw_spin_trylock(raw_spinlock_t *lock);
134 extern void _raw_spin_unlock(spinlock_t *lock); 134 extern void _raw_spin_unlock(raw_spinlock_t *lock);
135#else 135#else
136# define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock) 136static inline void _raw_spin_lock(raw_spinlock_t *lock)
137# define _raw_spin_lock_flags(lock, flags) \ 137{
138 arch_spin_lock_flags(&(lock)->raw_lock, *(flags)) 138 arch_spin_lock(&lock->raw_lock);
139# define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock) 139}
140# define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock) 140
141static inline void
142_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
143{
144 arch_spin_lock_flags(&lock->raw_lock, *flags);
145}
146
147static inline int _raw_spin_trylock(raw_spinlock_t *lock)
148{
149 return arch_spin_trylock(&(lock)->raw_lock);
150}
151
152static inline void _raw_spin_unlock(raw_spinlock_t *lock)
153{
154 arch_spin_unlock(&lock->raw_lock);
155}
141#endif 156#endif
142 157
143/* 158/*
@@ -146,38 +161,38 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
146 * various methods are defined as nops in the case they are not 161 * various methods are defined as nops in the case they are not
147 * required. 162 * required.
148 */ 163 */
149#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) 164#define raw_spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
150 165
151#define spin_lock(lock) _spin_lock(lock) 166#define raw_spin_lock(lock) _spin_lock(lock)
152 167
153#ifdef CONFIG_DEBUG_LOCK_ALLOC 168#ifdef CONFIG_DEBUG_LOCK_ALLOC
154# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) 169# define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
155# define spin_lock_nest_lock(lock, nest_lock) \ 170# define raw_spin_lock_nest_lock(lock, nest_lock) \
156 do { \ 171 do { \
157 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 172 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
158 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 173 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
159 } while (0) 174 } while (0)
160#else 175#else
161# define spin_lock_nested(lock, subclass) _spin_lock(lock) 176# define raw_spin_lock_nested(lock, subclass) _spin_lock(lock)
162# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) 177# define raw_spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
163#endif 178#endif
164 179
165#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 180#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
166 181
167#define spin_lock_irqsave(lock, flags) \ 182#define raw_spin_lock_irqsave(lock, flags) \
168 do { \ 183 do { \
169 typecheck(unsigned long, flags); \ 184 typecheck(unsigned long, flags); \
170 flags = _spin_lock_irqsave(lock); \ 185 flags = _spin_lock_irqsave(lock); \
171 } while (0) 186 } while (0)
172 187
173#ifdef CONFIG_DEBUG_LOCK_ALLOC 188#ifdef CONFIG_DEBUG_LOCK_ALLOC
174#define spin_lock_irqsave_nested(lock, flags, subclass) \ 189#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
175 do { \ 190 do { \
176 typecheck(unsigned long, flags); \ 191 typecheck(unsigned long, flags); \
177 flags = _spin_lock_irqsave_nested(lock, subclass); \ 192 flags = _spin_lock_irqsave_nested(lock, subclass); \
178 } while (0) 193 } while (0)
179#else 194#else
180#define spin_lock_irqsave_nested(lock, flags, subclass) \ 195#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
181 do { \ 196 do { \
182 typecheck(unsigned long, flags); \ 197 typecheck(unsigned long, flags); \
183 flags = _spin_lock_irqsave(lock); \ 198 flags = _spin_lock_irqsave(lock); \
@@ -186,45 +201,178 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
186 201
187#else 202#else
188 203
189#define spin_lock_irqsave(lock, flags) \ 204#define raw_spin_lock_irqsave(lock, flags) \
190 do { \ 205 do { \
191 typecheck(unsigned long, flags); \ 206 typecheck(unsigned long, flags); \
192 _spin_lock_irqsave(lock, flags); \ 207 _spin_lock_irqsave(lock, flags); \
193 } while (0) 208 } while (0)
194 209
195#define spin_lock_irqsave_nested(lock, flags, subclass) \ 210#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
196 spin_lock_irqsave(lock, flags) 211 raw_spin_lock_irqsave(lock, flags)
197 212
198#endif 213#endif
199 214
200#define spin_lock_irq(lock) _spin_lock_irq(lock) 215#define raw_spin_lock_irq(lock) _spin_lock_irq(lock)
201#define spin_lock_bh(lock) _spin_lock_bh(lock) 216#define raw_spin_lock_bh(lock) _spin_lock_bh(lock)
202#define spin_unlock(lock) _spin_unlock(lock) 217#define raw_spin_unlock(lock) _spin_unlock(lock)
203#define spin_unlock_irq(lock) _spin_unlock_irq(lock) 218#define raw_spin_unlock_irq(lock) _spin_unlock_irq(lock)
204 219
205#define spin_unlock_irqrestore(lock, flags) \ 220#define raw_spin_unlock_irqrestore(lock, flags) \
206 do { \ 221 do { \
207 typecheck(unsigned long, flags); \ 222 typecheck(unsigned long, flags); \
208 _spin_unlock_irqrestore(lock, flags); \ 223 _spin_unlock_irqrestore(lock, flags); \
209 } while (0) 224 } while (0)
210#define spin_unlock_bh(lock) _spin_unlock_bh(lock) 225#define raw_spin_unlock_bh(lock) _spin_unlock_bh(lock)
211 226
212#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) 227#define raw_spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
213 228
214#define spin_trylock_irq(lock) \ 229#define raw_spin_trylock_irq(lock) \
215({ \ 230({ \
216 local_irq_disable(); \ 231 local_irq_disable(); \
217 spin_trylock(lock) ? \ 232 raw_spin_trylock(lock) ? \
218 1 : ({ local_irq_enable(); 0; }); \ 233 1 : ({ local_irq_enable(); 0; }); \
219}) 234})
220 235
221#define spin_trylock_irqsave(lock, flags) \ 236#define raw_spin_trylock_irqsave(lock, flags) \
222({ \ 237({ \
223 local_irq_save(flags); \ 238 local_irq_save(flags); \
224 spin_trylock(lock) ? \ 239 raw_spin_trylock(lock) ? \
225 1 : ({ local_irq_restore(flags); 0; }); \ 240 1 : ({ local_irq_restore(flags); 0; }); \
226}) 241})
227 242
243/**
244 * raw_spin_can_lock - would raw_spin_trylock() succeed?
245 * @lock: the spinlock in question.
246 */
247#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
248
249/* Include rwlock functions */
250#include <linux/rwlock.h>
251
252/*
253 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
254 */
255#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
256# include <linux/spinlock_api_smp.h>
257#else
258# include <linux/spinlock_api_up.h>
259#endif
260
261/*
262 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
263 */
264
265static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
266{
267 return &lock->rlock;
268}
269
270#define spin_lock_init(_lock) \
271do { \
272 spinlock_check(_lock); \
273 raw_spin_lock_init(&(_lock)->rlock); \
274} while (0)
275
276static inline void spin_lock(spinlock_t *lock)
277{
278 raw_spin_lock(&lock->rlock);
279}
280
281static inline void spin_lock_bh(spinlock_t *lock)
282{
283 raw_spin_lock_bh(&lock->rlock);
284}
285
286static inline int spin_trylock(spinlock_t *lock)
287{
288 return raw_spin_trylock(&lock->rlock);
289}
290
291#define spin_lock_nested(lock, subclass) \
292do { \
293 raw_spin_lock_nested(spinlock_check(lock), subclass); \
294} while (0)
295
296#define spin_lock_nest_lock(lock, nest_lock) \
297do { \
298 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
299} while (0)
300
301static inline void spin_lock_irq(spinlock_t *lock)
302{
303 raw_spin_lock_irq(&lock->rlock);
304}
305
306#define spin_lock_irqsave(lock, flags) \
307do { \
308 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
309} while (0)
310
311#define spin_lock_irqsave_nested(lock, flags, subclass) \
312do { \
313 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
314} while (0)
315
316static inline void spin_unlock(spinlock_t *lock)
317{
318 raw_spin_unlock(&lock->rlock);
319}
320
321static inline void spin_unlock_bh(spinlock_t *lock)
322{
323 raw_spin_unlock_bh(&lock->rlock);
324}
325
326static inline void spin_unlock_irq(spinlock_t *lock)
327{
328 raw_spin_unlock_irq(&lock->rlock);
329}
330
331static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
332{
333 raw_spin_unlock_irqrestore(&lock->rlock, flags);
334}
335
336static inline int spin_trylock_bh(spinlock_t *lock)
337{
338 return raw_spin_trylock_bh(&lock->rlock);
339}
340
341static inline int spin_trylock_irq(spinlock_t *lock)
342{
343 return raw_spin_trylock_irq(&lock->rlock);
344}
345
346#define spin_trylock_irqsave(lock, flags) \
347({ \
348 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
349})
350
351static inline void spin_unlock_wait(spinlock_t *lock)
352{
353 raw_spin_unlock_wait(&lock->rlock);
354}
355
356static inline int spin_is_locked(spinlock_t *lock)
357{
358 return raw_spin_is_locked(&lock->rlock);
359}
360
361static inline int spin_is_contended(spinlock_t *lock)
362{
363 return raw_spin_is_contended(&lock->rlock);
364}
365
366static inline int spin_can_lock(spinlock_t *lock)
367{
368 return raw_spin_can_lock(&lock->rlock);
369}
370
371static inline void assert_spin_locked(spinlock_t *lock)
372{
373 assert_raw_spin_locked(&lock->rlock);
374}
375
228/* 376/*
229 * Pull the atomic_t declaration: 377 * Pull the atomic_t declaration:
230 * (asm-mips/atomic.h needs above definitions) 378 * (asm-mips/atomic.h needs above definitions)
@@ -242,22 +390,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
242#define atomic_dec_and_lock(atomic, lock) \ 390#define atomic_dec_and_lock(atomic, lock) \
243 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 391 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
244 392
245/**
246 * spin_can_lock - would spin_trylock() succeed?
247 * @lock: the spinlock in question.
248 */
249#define spin_can_lock(lock) (!spin_is_locked(lock))
250
251/* Include rwlock functions */
252#include <linux/rwlock.h>
253
254/*
255 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
256 */
257#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
258# include <linux/spinlock_api_smp.h>
259#else
260# include <linux/spinlock_api_up.h>
261#endif
262
263#endif /* __LINUX_SPINLOCK_H */ 393#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index a2b2c9df91de..eabe5068d138 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,26 +17,29 @@
17 17
18int in_lock_functions(unsigned long addr); 18int in_lock_functions(unsigned long addr);
19 19
20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 20#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
21 21
22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); 22void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 23void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); 24 __acquires(lock);
25void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) 25void __lockfunc
26_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
26 __acquires(lock); 27 __acquires(lock);
27void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); 28void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
28void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); 29void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock);
29 30
30unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 31unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
31 __acquires(lock); 32 __acquires(lock);
32unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 33unsigned long __lockfunc
34_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
33 __acquires(lock); 35 __acquires(lock);
34int __lockfunc _spin_trylock(spinlock_t *lock); 36int __lockfunc _spin_trylock(raw_spinlock_t *lock);
35int __lockfunc _spin_trylock_bh(spinlock_t *lock); 37int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
36void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); 38void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock);
37void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); 39void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
38void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); 40void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
39void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 41void __lockfunc
42_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
40 __releases(lock); 43 __releases(lock);
41 44
42#ifdef CONFIG_INLINE_SPIN_LOCK 45#ifdef CONFIG_INLINE_SPIN_LOCK
@@ -79,7 +82,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
79#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 82#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
80#endif 83#endif
81 84
82static inline int __spin_trylock(spinlock_t *lock) 85static inline int __spin_trylock(raw_spinlock_t *lock)
83{ 86{
84 preempt_disable(); 87 preempt_disable();
85 if (_raw_spin_trylock(lock)) { 88 if (_raw_spin_trylock(lock)) {
@@ -97,7 +100,7 @@ static inline int __spin_trylock(spinlock_t *lock)
97 */ 100 */
98#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 101#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
99 102
100static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) 103static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
101{ 104{
102 unsigned long flags; 105 unsigned long flags;
103 106
@@ -117,7 +120,7 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
117 return flags; 120 return flags;
118} 121}
119 122
120static inline void __spin_lock_irq(spinlock_t *lock) 123static inline void __spin_lock_irq(raw_spinlock_t *lock)
121{ 124{
122 local_irq_disable(); 125 local_irq_disable();
123 preempt_disable(); 126 preempt_disable();
@@ -125,7 +128,7 @@ static inline void __spin_lock_irq(spinlock_t *lock)
125 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 128 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
126} 129}
127 130
128static inline void __spin_lock_bh(spinlock_t *lock) 131static inline void __spin_lock_bh(raw_spinlock_t *lock)
129{ 132{
130 local_bh_disable(); 133 local_bh_disable();
131 preempt_disable(); 134 preempt_disable();
@@ -133,7 +136,7 @@ static inline void __spin_lock_bh(spinlock_t *lock)
133 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 136 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
134} 137}
135 138
136static inline void __spin_lock(spinlock_t *lock) 139static inline void __spin_lock(raw_spinlock_t *lock)
137{ 140{
138 preempt_disable(); 141 preempt_disable();
139 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 142 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -142,14 +145,14 @@ static inline void __spin_lock(spinlock_t *lock)
142 145
143#endif /* CONFIG_PREEMPT */ 146#endif /* CONFIG_PREEMPT */
144 147
145static inline void __spin_unlock(spinlock_t *lock) 148static inline void __spin_unlock(raw_spinlock_t *lock)
146{ 149{
147 spin_release(&lock->dep_map, 1, _RET_IP_); 150 spin_release(&lock->dep_map, 1, _RET_IP_);
148 _raw_spin_unlock(lock); 151 _raw_spin_unlock(lock);
149 preempt_enable(); 152 preempt_enable();
150} 153}
151 154
152static inline void __spin_unlock_irqrestore(spinlock_t *lock, 155static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
153 unsigned long flags) 156 unsigned long flags)
154{ 157{
155 spin_release(&lock->dep_map, 1, _RET_IP_); 158 spin_release(&lock->dep_map, 1, _RET_IP_);
@@ -158,7 +161,7 @@ static inline void __spin_unlock_irqrestore(spinlock_t *lock,
158 preempt_enable(); 161 preempt_enable();
159} 162}
160 163
161static inline void __spin_unlock_irq(spinlock_t *lock) 164static inline void __spin_unlock_irq(raw_spinlock_t *lock)
162{ 165{
163 spin_release(&lock->dep_map, 1, _RET_IP_); 166 spin_release(&lock->dep_map, 1, _RET_IP_);
164 _raw_spin_unlock(lock); 167 _raw_spin_unlock(lock);
@@ -166,7 +169,7 @@ static inline void __spin_unlock_irq(spinlock_t *lock)
166 preempt_enable(); 169 preempt_enable();
167} 170}
168 171
169static inline void __spin_unlock_bh(spinlock_t *lock) 172static inline void __spin_unlock_bh(raw_spinlock_t *lock)
170{ 173{
171 spin_release(&lock->dep_map, 1, _RET_IP_); 174 spin_release(&lock->dep_map, 1, _RET_IP_);
172 _raw_spin_unlock(lock); 175 _raw_spin_unlock(lock);
@@ -174,7 +177,7 @@ static inline void __spin_unlock_bh(spinlock_t *lock)
174 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 177 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
175} 178}
176 179
177static inline int __spin_trylock_bh(spinlock_t *lock) 180static inline int __spin_trylock_bh(raw_spinlock_t *lock)
178{ 181{
179 local_bh_disable(); 182 local_bh_disable();
180 preempt_disable(); 183 preempt_disable();
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d3164576..3a9e27adecf9 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
16 16
17#define in_lock_functions(ADDR) 0 17#define in_lock_functions(ADDR) 0
18 18
19#define assert_spin_locked(lock) do { (void)(lock); } while (0) 19#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
20 20
21/* 21/*
22 * In the UP-nondebug case there's no real locking going on, so the 22 * In the UP-nondebug case there's no real locking going on, so the
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 7dadce303ebf..851b7783720d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,7 +17,7 @@
17 17
18#include <linux/lockdep.h> 18#include <linux/lockdep.h>
19 19
20typedef struct { 20typedef struct raw_spinlock {
21 arch_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK 22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock; 23 unsigned int break_lock;
@@ -29,7 +29,7 @@ typedef struct {
29#ifdef CONFIG_DEBUG_LOCK_ALLOC 29#ifdef CONFIG_DEBUG_LOCK_ALLOC
30 struct lockdep_map dep_map; 30 struct lockdep_map dep_map;
31#endif 31#endif
32} spinlock_t; 32} raw_spinlock_t;
33 33
34#define SPINLOCK_MAGIC 0xdead4ead 34#define SPINLOCK_MAGIC 0xdead4ead
35 35
@@ -42,18 +42,45 @@ typedef struct {
42#endif 42#endif
43 43
44#ifdef CONFIG_DEBUG_SPINLOCK 44#ifdef CONFIG_DEBUG_SPINLOCK
45# define __SPIN_LOCK_UNLOCKED(lockname) \ 45# define SPIN_DEBUG_INIT(lockname) \
46 (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ 46 .magic = SPINLOCK_MAGIC, \
47 .magic = SPINLOCK_MAGIC, \ 47 .owner_cpu = -1, \
48 .owner = SPINLOCK_OWNER_INIT, \ 48 .owner = SPINLOCK_OWNER_INIT,
49 .owner_cpu = -1, \
50 SPIN_DEP_MAP_INIT(lockname) }
51#else 49#else
52# define __SPIN_LOCK_UNLOCKED(lockname) \ 50# define SPIN_DEBUG_INIT(lockname)
53 (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
54 SPIN_DEP_MAP_INIT(lockname) }
55#endif 51#endif
56 52
53#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
54 { \
55 .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
56 SPIN_DEBUG_INIT(lockname) \
57 SPIN_DEP_MAP_INIT(lockname) }
58
59#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
60 (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
61
62#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
63
64typedef struct spinlock {
65 union {
66 struct raw_spinlock rlock;
67
68#ifdef CONFIG_DEBUG_LOCK_ALLOC
69# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
70 struct {
71 u8 __padding[LOCK_PADSIZE];
72 struct lockdep_map dep_map;
73 };
74#endif
75 };
76} spinlock_t;
77
78#define __SPIN_LOCK_INITIALIZER(lockname) \
79 { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
80
81#define __SPIN_LOCK_UNLOCKED(lockname) \
82 (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
83
57/* 84/*
58 * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence 85 * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
59 * deprecated. 86 * deprecated.
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 7bebbd15b342..57d527a16f9d 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock)
43 \ 43 \
44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \ 44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \
45 local_irq_save(flags); \ 45 local_irq_save(flags); \
46 arch_spin_lock(&(lock)->raw_lock); \ 46 arch_spin_lock(&(lock)->rlock.raw_lock);\
47 DEBUG_LOCKS_WARN_ON(l->magic != l); \ 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \
48 } while (0) 48 } while (0)
49 49
50#define spin_unlock_mutex(lock, flags) \ 50#define spin_unlock_mutex(lock, flags) \
51 do { \ 51 do { \
52 arch_spin_unlock(&(lock)->raw_lock); \ 52 arch_spin_unlock(&(lock)->rlock.raw_lock); \
53 local_irq_restore(flags); \ 53 local_irq_restore(flags); \
54 preempt_check_resched(); \ 54 preempt_check_resched(); \
55 } while (0) 55 } while (0)
diff --git a/kernel/sched.c b/kernel/sched.c
index fd05861b2111..e6acf2d7b753 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -884,7 +884,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
884{ 884{
885#ifdef CONFIG_DEBUG_SPINLOCK 885#ifdef CONFIG_DEBUG_SPINLOCK
886 /* this is a valid case when another task releases the spinlock */ 886 /* this is a valid case when another task releases the spinlock */
887 rq->lock.owner = current; 887 rq->lock.rlock.owner = current;
888#endif 888#endif
889 /* 889 /*
890 * If we are tracking spinlock dependencies then we have to 890 * If we are tracking spinlock dependencies then we have to
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index fbb5f8b78357..54eb7dd3c608 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -32,6 +32,8 @@
32 * include/linux/spinlock_api_smp.h 32 * include/linux/spinlock_api_smp.h
33 */ 33 */
34#else 34#else
35#define raw_read_can_lock(l) read_can_lock(l)
36#define raw_write_can_lock(l) write_can_lock(l)
35/* 37/*
36 * We build the __lock_function inlines here. They are too large for 38 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function 39 * inlining all over the place, but here is only one user per function
@@ -52,7 +54,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \
52 \ 54 \
53 if (!(lock)->break_lock) \ 55 if (!(lock)->break_lock) \
54 (lock)->break_lock = 1; \ 56 (lock)->break_lock = 1; \
55 while (!op##_can_lock(lock) && (lock)->break_lock) \ 57 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
56 arch_##op##_relax(&lock->raw_lock); \ 58 arch_##op##_relax(&lock->raw_lock); \
57 } \ 59 } \
58 (lock)->break_lock = 0; \ 60 (lock)->break_lock = 0; \
@@ -72,7 +74,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
72 \ 74 \
73 if (!(lock)->break_lock) \ 75 if (!(lock)->break_lock) \
74 (lock)->break_lock = 1; \ 76 (lock)->break_lock = 1; \
75 while (!op##_can_lock(lock) && (lock)->break_lock) \ 77 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
76 arch_##op##_relax(&lock->raw_lock); \ 78 arch_##op##_relax(&lock->raw_lock); \
77 } \ 79 } \
78 (lock)->break_lock = 0; \ 80 (lock)->break_lock = 0; \
@@ -107,14 +109,14 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
107 * __[spin|read|write]_lock_irqsave() 109 * __[spin|read|write]_lock_irqsave()
108 * __[spin|read|write]_lock_bh() 110 * __[spin|read|write]_lock_bh()
109 */ 111 */
110BUILD_LOCK_OPS(spin, spinlock); 112BUILD_LOCK_OPS(spin, raw_spinlock);
111BUILD_LOCK_OPS(read, rwlock); 113BUILD_LOCK_OPS(read, rwlock);
112BUILD_LOCK_OPS(write, rwlock); 114BUILD_LOCK_OPS(write, rwlock);
113 115
114#endif 116#endif
115 117
116#ifndef CONFIG_INLINE_SPIN_TRYLOCK 118#ifndef CONFIG_INLINE_SPIN_TRYLOCK
117int __lockfunc _spin_trylock(spinlock_t *lock) 119int __lockfunc _spin_trylock(raw_spinlock_t *lock)
118{ 120{
119 return __spin_trylock(lock); 121 return __spin_trylock(lock);
120} 122}
@@ -122,7 +124,7 @@ EXPORT_SYMBOL(_spin_trylock);
122#endif 124#endif
123 125
124#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH 126#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
125int __lockfunc _spin_trylock_bh(spinlock_t *lock) 127int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock)
126{ 128{
127 return __spin_trylock_bh(lock); 129 return __spin_trylock_bh(lock);
128} 130}
@@ -130,7 +132,7 @@ EXPORT_SYMBOL(_spin_trylock_bh);
130#endif 132#endif
131 133
132#ifndef CONFIG_INLINE_SPIN_LOCK 134#ifndef CONFIG_INLINE_SPIN_LOCK
133void __lockfunc _spin_lock(spinlock_t *lock) 135void __lockfunc _spin_lock(raw_spinlock_t *lock)
134{ 136{
135 __spin_lock(lock); 137 __spin_lock(lock);
136} 138}
@@ -138,7 +140,7 @@ EXPORT_SYMBOL(_spin_lock);
138#endif 140#endif
139 141
140#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 142#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
141unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 143unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
142{ 144{
143 return __spin_lock_irqsave(lock); 145 return __spin_lock_irqsave(lock);
144} 146}
@@ -146,7 +148,7 @@ EXPORT_SYMBOL(_spin_lock_irqsave);
146#endif 148#endif
147 149
148#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ 150#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
149void __lockfunc _spin_lock_irq(spinlock_t *lock) 151void __lockfunc _spin_lock_irq(raw_spinlock_t *lock)
150{ 152{
151 __spin_lock_irq(lock); 153 __spin_lock_irq(lock);
152} 154}
@@ -154,7 +156,7 @@ EXPORT_SYMBOL(_spin_lock_irq);
154#endif 156#endif
155 157
156#ifndef CONFIG_INLINE_SPIN_LOCK_BH 158#ifndef CONFIG_INLINE_SPIN_LOCK_BH
157void __lockfunc _spin_lock_bh(spinlock_t *lock) 159void __lockfunc _spin_lock_bh(raw_spinlock_t *lock)
158{ 160{
159 __spin_lock_bh(lock); 161 __spin_lock_bh(lock);
160} 162}
@@ -162,7 +164,7 @@ EXPORT_SYMBOL(_spin_lock_bh);
162#endif 164#endif
163 165
164#ifndef CONFIG_INLINE_SPIN_UNLOCK 166#ifndef CONFIG_INLINE_SPIN_UNLOCK
165void __lockfunc _spin_unlock(spinlock_t *lock) 167void __lockfunc _spin_unlock(raw_spinlock_t *lock)
166{ 168{
167 __spin_unlock(lock); 169 __spin_unlock(lock);
168} 170}
@@ -170,7 +172,7 @@ EXPORT_SYMBOL(_spin_unlock);
170#endif 172#endif
171 173
172#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 174#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
173void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 175void __lockfunc _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
174{ 176{
175 __spin_unlock_irqrestore(lock, flags); 177 __spin_unlock_irqrestore(lock, flags);
176} 178}
@@ -178,7 +180,7 @@ EXPORT_SYMBOL(_spin_unlock_irqrestore);
178#endif 180#endif
179 181
180#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ 182#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
181void __lockfunc _spin_unlock_irq(spinlock_t *lock) 183void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock)
182{ 184{
183 __spin_unlock_irq(lock); 185 __spin_unlock_irq(lock);
184} 186}
@@ -186,7 +188,7 @@ EXPORT_SYMBOL(_spin_unlock_irq);
186#endif 188#endif
187 189
188#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH 190#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
189void __lockfunc _spin_unlock_bh(spinlock_t *lock) 191void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock)
190{ 192{
191 __spin_unlock_bh(lock); 193 __spin_unlock_bh(lock);
192} 194}
@@ -339,7 +341,7 @@ EXPORT_SYMBOL(_write_unlock_bh);
339 341
340#ifdef CONFIG_DEBUG_LOCK_ALLOC 342#ifdef CONFIG_DEBUG_LOCK_ALLOC
341 343
342void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 344void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
343{ 345{
344 preempt_disable(); 346 preempt_disable();
345 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
@@ -347,7 +349,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
347} 349}
348EXPORT_SYMBOL(_spin_lock_nested); 350EXPORT_SYMBOL(_spin_lock_nested);
349 351
350unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, 352unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock,
351 int subclass) 353 int subclass)
352{ 354{
353 unsigned long flags; 355 unsigned long flags;
@@ -361,7 +363,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
361} 363}
362EXPORT_SYMBOL(_spin_lock_irqsave_nested); 364EXPORT_SYMBOL(_spin_lock_irqsave_nested);
363 365
364void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, 366void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock,
365 struct lockdep_map *nest_lock) 367 struct lockdep_map *nest_lock)
366{ 368{
367 preempt_disable(); 369 preempt_disable();
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 0cea0bf6114e..e705848cc33c 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16void __spin_lock_init(spinlock_t *lock, const char *name, 16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key) 17 struct lock_class_key *key)
18{ 18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC 19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /* 20 /*
@@ -29,7 +29,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
29 lock->owner_cpu = -1; 29 lock->owner_cpu = -1;
30} 30}
31 31
32EXPORT_SYMBOL(__spin_lock_init); 32EXPORT_SYMBOL(__raw_spin_lock_init);
33 33
34void __rwlock_init(rwlock_t *lock, const char *name, 34void __rwlock_init(rwlock_t *lock, const char *name,
35 struct lock_class_key *key) 35 struct lock_class_key *key)
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
49 49
50EXPORT_SYMBOL(__rwlock_init); 50EXPORT_SYMBOL(__rwlock_init);
51 51
52static void spin_bug(spinlock_t *lock, const char *msg) 52static void spin_bug(raw_spinlock_t *lock, const char *msg)
53{ 53{
54 struct task_struct *owner = NULL; 54 struct task_struct *owner = NULL;
55 55
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) 73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
74 74
75static inline void 75static inline void
76debug_spin_lock_before(spinlock_t *lock) 76debug_spin_lock_before(raw_spinlock_t *lock)
77{ 77{
78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
79 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); 79 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
81 lock, "cpu recursion"); 81 lock, "cpu recursion");
82} 82}
83 83
84static inline void debug_spin_lock_after(spinlock_t *lock) 84static inline void debug_spin_lock_after(raw_spinlock_t *lock)
85{ 85{
86 lock->owner_cpu = raw_smp_processor_id(); 86 lock->owner_cpu = raw_smp_processor_id();
87 lock->owner = current; 87 lock->owner = current;
88} 88}
89 89
90static inline void debug_spin_unlock(spinlock_t *lock) 90static inline void debug_spin_unlock(raw_spinlock_t *lock)
91{ 91{
92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
93 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); 93 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); 94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), 95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
96 lock, "wrong CPU"); 96 lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
98 lock->owner_cpu = -1; 98 lock->owner_cpu = -1;
99} 99}
100 100
101static void __spin_lock_debug(spinlock_t *lock) 101static void __spin_lock_debug(raw_spinlock_t *lock)
102{ 102{
103 u64 i; 103 u64 i;
104 u64 loops = loops_per_jiffy * HZ; 104 u64 loops = loops_per_jiffy * HZ;
@@ -125,7 +125,7 @@ static void __spin_lock_debug(spinlock_t *lock)
125 } 125 }
126} 126}
127 127
128void _raw_spin_lock(spinlock_t *lock) 128void _raw_spin_lock(raw_spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!arch_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
@@ -133,7 +133,7 @@ void _raw_spin_lock(spinlock_t *lock)
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(spinlock_t *lock) 136int _raw_spin_trylock(raw_spinlock_t *lock)
137{ 137{
138 int ret = arch_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
@@ -148,7 +148,7 @@ int _raw_spin_trylock(spinlock_t *lock)
148 return ret; 148 return ret;
149} 149}
150 150
151void _raw_spin_unlock(spinlock_t *lock) 151void _raw_spin_unlock(raw_spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 arch_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);