aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r--kernel/spinlock.c448
1 files changed, 223 insertions, 225 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5ddab730cb2f..be6517fb9c14 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,193 +21,72 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24#ifndef _spin_trylock
25int __lockfunc _spin_trylock(spinlock_t *lock)
26{
27 return __spin_trylock(lock);
28}
29EXPORT_SYMBOL(_spin_trylock);
30#endif
31
32#ifndef _read_trylock
33int __lockfunc _read_trylock(rwlock_t *lock)
34{
35 return __read_trylock(lock);
36}
37EXPORT_SYMBOL(_read_trylock);
38#endif
39
40#ifndef _write_trylock
41int __lockfunc _write_trylock(rwlock_t *lock)
42{
43 return __write_trylock(lock);
44}
45EXPORT_SYMBOL(_write_trylock);
46#endif
47
48/* 24/*
49 * If lockdep is enabled then we use the non-preemption spin-ops 25 * If lockdep is enabled then we use the non-preemption spin-ops
50 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 26 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
51 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
52 */ 28 */
53#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 29#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
54
55#ifndef _read_lock
56void __lockfunc _read_lock(rwlock_t *lock)
57{
58 __read_lock(lock);
59}
60EXPORT_SYMBOL(_read_lock);
61#endif
62
63#ifndef _spin_lock_irqsave
64unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
65{
66 return __spin_lock_irqsave(lock);
67}
68EXPORT_SYMBOL(_spin_lock_irqsave);
69#endif
70
71#ifndef _spin_lock_irq
72void __lockfunc _spin_lock_irq(spinlock_t *lock)
73{
74 __spin_lock_irq(lock);
75}
76EXPORT_SYMBOL(_spin_lock_irq);
77#endif
78
79#ifndef _spin_lock_bh
80void __lockfunc _spin_lock_bh(spinlock_t *lock)
81{
82 __spin_lock_bh(lock);
83}
84EXPORT_SYMBOL(_spin_lock_bh);
85#endif
86
87#ifndef _read_lock_irqsave
88unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
89{
90 return __read_lock_irqsave(lock);
91}
92EXPORT_SYMBOL(_read_lock_irqsave);
93#endif
94
95#ifndef _read_lock_irq
96void __lockfunc _read_lock_irq(rwlock_t *lock)
97{
98 __read_lock_irq(lock);
99}
100EXPORT_SYMBOL(_read_lock_irq);
101#endif
102
103#ifndef _read_lock_bh
104void __lockfunc _read_lock_bh(rwlock_t *lock)
105{
106 __read_lock_bh(lock);
107}
108EXPORT_SYMBOL(_read_lock_bh);
109#endif
110
111#ifndef _write_lock_irqsave
112unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
113{
114 return __write_lock_irqsave(lock);
115}
116EXPORT_SYMBOL(_write_lock_irqsave);
117#endif
118
119#ifndef _write_lock_irq
120void __lockfunc _write_lock_irq(rwlock_t *lock)
121{
122 __write_lock_irq(lock);
123}
124EXPORT_SYMBOL(_write_lock_irq);
125#endif
126
127#ifndef _write_lock_bh
128void __lockfunc _write_lock_bh(rwlock_t *lock)
129{
130 __write_lock_bh(lock);
131}
132EXPORT_SYMBOL(_write_lock_bh);
133#endif
134
135#ifndef _spin_lock
136void __lockfunc _spin_lock(spinlock_t *lock)
137{
138 __spin_lock(lock);
139}
140EXPORT_SYMBOL(_spin_lock);
141#endif
142
143#ifndef _write_lock
144void __lockfunc _write_lock(rwlock_t *lock)
145{
146 __write_lock(lock);
147}
148EXPORT_SYMBOL(_write_lock);
149#endif
150
151#else /* CONFIG_PREEMPT: */
152
153/* 30/*
31 * The __lock_function inlines are taken from
32 * include/linux/spinlock_api_smp.h
33 */
34#else
35#define raw_read_can_lock(l) read_can_lock(l)
36#define raw_write_can_lock(l) write_can_lock(l)
37/*
38 * We build the __lock_function inlines here. They are too large for
39 * inlining all over the place, but here is only one user per function
40 * which embedds them into the calling _lock_function below.
41 *
154 * This could be a long-held lock. We both prepare to spin for a long 42 * This could be a long-held lock. We both prepare to spin for a long
155 * time (making _this_ CPU preemptable if possible), and we also signal 43 * time (making _this_ CPU preemptable if possible), and we also signal
156 * towards that other CPU that it should break the lock ASAP. 44 * towards that other CPU that it should break the lock ASAP.
157 *
158 * (We do this in a function because inlining it would be excessive.)
159 */ 45 */
160
161#define BUILD_LOCK_OPS(op, locktype) \ 46#define BUILD_LOCK_OPS(op, locktype) \
162void __lockfunc _##op##_lock(locktype##_t *lock) \ 47void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
163{ \ 48{ \
164 for (;;) { \ 49 for (;;) { \
165 preempt_disable(); \ 50 preempt_disable(); \
166 if (likely(_raw_##op##_trylock(lock))) \ 51 if (likely(do_raw_##op##_trylock(lock))) \
167 break; \ 52 break; \
168 preempt_enable(); \ 53 preempt_enable(); \
169 \ 54 \
170 if (!(lock)->break_lock) \ 55 if (!(lock)->break_lock) \
171 (lock)->break_lock = 1; \ 56 (lock)->break_lock = 1; \
172 while (!op##_can_lock(lock) && (lock)->break_lock) \ 57 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
173 _raw_##op##_relax(&lock->raw_lock); \ 58 arch_##op##_relax(&lock->raw_lock); \
174 } \ 59 } \
175 (lock)->break_lock = 0; \ 60 (lock)->break_lock = 0; \
176} \ 61} \
177 \ 62 \
178EXPORT_SYMBOL(_##op##_lock); \ 63unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
179 \
180unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
181{ \ 64{ \
182 unsigned long flags; \ 65 unsigned long flags; \
183 \ 66 \
184 for (;;) { \ 67 for (;;) { \
185 preempt_disable(); \ 68 preempt_disable(); \
186 local_irq_save(flags); \ 69 local_irq_save(flags); \
187 if (likely(_raw_##op##_trylock(lock))) \ 70 if (likely(do_raw_##op##_trylock(lock))) \
188 break; \ 71 break; \
189 local_irq_restore(flags); \ 72 local_irq_restore(flags); \
190 preempt_enable(); \ 73 preempt_enable(); \
191 \ 74 \
192 if (!(lock)->break_lock) \ 75 if (!(lock)->break_lock) \
193 (lock)->break_lock = 1; \ 76 (lock)->break_lock = 1; \
194 while (!op##_can_lock(lock) && (lock)->break_lock) \ 77 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
195 _raw_##op##_relax(&lock->raw_lock); \ 78 arch_##op##_relax(&lock->raw_lock); \
196 } \ 79 } \
197 (lock)->break_lock = 0; \ 80 (lock)->break_lock = 0; \
198 return flags; \ 81 return flags; \
199} \ 82} \
200 \ 83 \
201EXPORT_SYMBOL(_##op##_lock_irqsave); \ 84void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
202 \
203void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
204{ \ 85{ \
205 _##op##_lock_irqsave(lock); \ 86 _raw_##op##_lock_irqsave(lock); \
206} \ 87} \
207 \ 88 \
208EXPORT_SYMBOL(_##op##_lock_irq); \ 89void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
209 \
210void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
211{ \ 90{ \
212 unsigned long flags; \ 91 unsigned long flags; \
213 \ 92 \
@@ -216,164 +95,283 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
216 /* irq-disabling. We use the generic preemption-aware */ \ 95 /* irq-disabling. We use the generic preemption-aware */ \
217 /* function: */ \ 96 /* function: */ \
218 /**/ \ 97 /**/ \
219 flags = _##op##_lock_irqsave(lock); \ 98 flags = _raw_##op##_lock_irqsave(lock); \
220 local_bh_disable(); \ 99 local_bh_disable(); \
221 local_irq_restore(flags); \ 100 local_irq_restore(flags); \
222} \ 101} \
223 \
224EXPORT_SYMBOL(_##op##_lock_bh)
225 102
226/* 103/*
227 * Build preemption-friendly versions of the following 104 * Build preemption-friendly versions of the following
228 * lock-spinning functions: 105 * lock-spinning functions:
229 * 106 *
230 * _[spin|read|write]_lock() 107 * __[spin|read|write]_lock()
231 * _[spin|read|write]_lock_irq() 108 * __[spin|read|write]_lock_irq()
232 * _[spin|read|write]_lock_irqsave() 109 * __[spin|read|write]_lock_irqsave()
233 * _[spin|read|write]_lock_bh() 110 * __[spin|read|write]_lock_bh()
234 */ 111 */
235BUILD_LOCK_OPS(spin, spinlock); 112BUILD_LOCK_OPS(spin, raw_spinlock);
236BUILD_LOCK_OPS(read, rwlock); 113BUILD_LOCK_OPS(read, rwlock);
237BUILD_LOCK_OPS(write, rwlock); 114BUILD_LOCK_OPS(write, rwlock);
238 115
239#endif /* CONFIG_PREEMPT */ 116#endif
240 117
241#ifdef CONFIG_DEBUG_LOCK_ALLOC 118#ifndef CONFIG_INLINE_SPIN_TRYLOCK
119int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
120{
121 return __raw_spin_trylock(lock);
122}
123EXPORT_SYMBOL(_raw_spin_trylock);
124#endif
242 125
243void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 126#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
127int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
244{ 128{
245 preempt_disable(); 129 return __raw_spin_trylock_bh(lock);
246 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
247 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
248} 130}
249EXPORT_SYMBOL(_spin_lock_nested); 131EXPORT_SYMBOL(_raw_spin_trylock_bh);
132#endif
250 133
251unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 134#ifndef CONFIG_INLINE_SPIN_LOCK
135void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
252{ 136{
253 unsigned long flags; 137 __raw_spin_lock(lock);
138}
139EXPORT_SYMBOL(_raw_spin_lock);
140#endif
254 141
255 local_irq_save(flags); 142#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
256 preempt_disable(); 143unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
257 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 144{
258 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, 145 return __raw_spin_lock_irqsave(lock);
259 _raw_spin_lock_flags, &flags);
260 return flags;
261} 146}
262EXPORT_SYMBOL(_spin_lock_irqsave_nested); 147EXPORT_SYMBOL(_raw_spin_lock_irqsave);
148#endif
263 149
264void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, 150#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
265 struct lockdep_map *nest_lock) 151void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
266{ 152{
267 preempt_disable(); 153 __raw_spin_lock_irq(lock);
268 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
269 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
270} 154}
271EXPORT_SYMBOL(_spin_lock_nest_lock); 155EXPORT_SYMBOL(_raw_spin_lock_irq);
156#endif
272 157
158#ifndef CONFIG_INLINE_SPIN_LOCK_BH
159void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
160{
161 __raw_spin_lock_bh(lock);
162}
163EXPORT_SYMBOL(_raw_spin_lock_bh);
273#endif 164#endif
274 165
275#ifndef _spin_unlock 166#ifndef CONFIG_INLINE_SPIN_UNLOCK
276void __lockfunc _spin_unlock(spinlock_t *lock) 167void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
277{ 168{
278 __spin_unlock(lock); 169 __raw_spin_unlock(lock);
279} 170}
280EXPORT_SYMBOL(_spin_unlock); 171EXPORT_SYMBOL(_raw_spin_unlock);
281#endif 172#endif
282 173
283#ifndef _write_unlock 174#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
284void __lockfunc _write_unlock(rwlock_t *lock) 175void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
285{ 176{
286 __write_unlock(lock); 177 __raw_spin_unlock_irqrestore(lock, flags);
287} 178}
288EXPORT_SYMBOL(_write_unlock); 179EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
289#endif 180#endif
290 181
291#ifndef _read_unlock 182#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
292void __lockfunc _read_unlock(rwlock_t *lock) 183void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
293{ 184{
294 __read_unlock(lock); 185 __raw_spin_unlock_irq(lock);
295} 186}
296EXPORT_SYMBOL(_read_unlock); 187EXPORT_SYMBOL(_raw_spin_unlock_irq);
297#endif 188#endif
298 189
299#ifndef _spin_unlock_irqrestore 190#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
300void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 191void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
301{ 192{
302 __spin_unlock_irqrestore(lock, flags); 193 __raw_spin_unlock_bh(lock);
303} 194}
304EXPORT_SYMBOL(_spin_unlock_irqrestore); 195EXPORT_SYMBOL(_raw_spin_unlock_bh);
305#endif 196#endif
306 197
307#ifndef _spin_unlock_irq 198#ifndef CONFIG_INLINE_READ_TRYLOCK
308void __lockfunc _spin_unlock_irq(spinlock_t *lock) 199int __lockfunc _raw_read_trylock(rwlock_t *lock)
309{ 200{
310 __spin_unlock_irq(lock); 201 return __raw_read_trylock(lock);
311} 202}
312EXPORT_SYMBOL(_spin_unlock_irq); 203EXPORT_SYMBOL(_raw_read_trylock);
313#endif 204#endif
314 205
315#ifndef _spin_unlock_bh 206#ifndef CONFIG_INLINE_READ_LOCK
316void __lockfunc _spin_unlock_bh(spinlock_t *lock) 207void __lockfunc _raw_read_lock(rwlock_t *lock)
317{ 208{
318 __spin_unlock_bh(lock); 209 __raw_read_lock(lock);
319} 210}
320EXPORT_SYMBOL(_spin_unlock_bh); 211EXPORT_SYMBOL(_raw_read_lock);
321#endif 212#endif
322 213
323#ifndef _read_unlock_irqrestore 214#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
324void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 215unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
325{ 216{
326 __read_unlock_irqrestore(lock, flags); 217 return __raw_read_lock_irqsave(lock);
327} 218}
328EXPORT_SYMBOL(_read_unlock_irqrestore); 219EXPORT_SYMBOL(_raw_read_lock_irqsave);
329#endif 220#endif
330 221
331#ifndef _read_unlock_irq 222#ifndef CONFIG_INLINE_READ_LOCK_IRQ
332void __lockfunc _read_unlock_irq(rwlock_t *lock) 223void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
333{ 224{
334 __read_unlock_irq(lock); 225 __raw_read_lock_irq(lock);
335} 226}
336EXPORT_SYMBOL(_read_unlock_irq); 227EXPORT_SYMBOL(_raw_read_lock_irq);
337#endif 228#endif
338 229
339#ifndef _read_unlock_bh 230#ifndef CONFIG_INLINE_READ_LOCK_BH
340void __lockfunc _read_unlock_bh(rwlock_t *lock) 231void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
341{ 232{
342 __read_unlock_bh(lock); 233 __raw_read_lock_bh(lock);
343} 234}
344EXPORT_SYMBOL(_read_unlock_bh); 235EXPORT_SYMBOL(_raw_read_lock_bh);
345#endif 236#endif
346 237
347#ifndef _write_unlock_irqrestore 238#ifndef CONFIG_INLINE_READ_UNLOCK
348void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 239void __lockfunc _raw_read_unlock(rwlock_t *lock)
349{ 240{
350 __write_unlock_irqrestore(lock, flags); 241 __raw_read_unlock(lock);
351} 242}
352EXPORT_SYMBOL(_write_unlock_irqrestore); 243EXPORT_SYMBOL(_raw_read_unlock);
353#endif 244#endif
354 245
355#ifndef _write_unlock_irq 246#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
356void __lockfunc _write_unlock_irq(rwlock_t *lock) 247void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
357{ 248{
358 __write_unlock_irq(lock); 249 __raw_read_unlock_irqrestore(lock, flags);
359} 250}
360EXPORT_SYMBOL(_write_unlock_irq); 251EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
361#endif 252#endif
362 253
363#ifndef _write_unlock_bh 254#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
364void __lockfunc _write_unlock_bh(rwlock_t *lock) 255void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
365{ 256{
366 __write_unlock_bh(lock); 257 __raw_read_unlock_irq(lock);
367} 258}
368EXPORT_SYMBOL(_write_unlock_bh); 259EXPORT_SYMBOL(_raw_read_unlock_irq);
369#endif 260#endif
370 261
371#ifndef _spin_trylock_bh 262#ifndef CONFIG_INLINE_READ_UNLOCK_BH
372int __lockfunc _spin_trylock_bh(spinlock_t *lock) 263void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
373{ 264{
374 return __spin_trylock_bh(lock); 265 __raw_read_unlock_bh(lock);
375} 266}
376EXPORT_SYMBOL(_spin_trylock_bh); 267EXPORT_SYMBOL(_raw_read_unlock_bh);
268#endif
269
270#ifndef CONFIG_INLINE_WRITE_TRYLOCK
271int __lockfunc _raw_write_trylock(rwlock_t *lock)
272{
273 return __raw_write_trylock(lock);
274}
275EXPORT_SYMBOL(_raw_write_trylock);
276#endif
277
278#ifndef CONFIG_INLINE_WRITE_LOCK
279void __lockfunc _raw_write_lock(rwlock_t *lock)
280{
281 __raw_write_lock(lock);
282}
283EXPORT_SYMBOL(_raw_write_lock);
284#endif
285
286#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
287unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
288{
289 return __raw_write_lock_irqsave(lock);
290}
291EXPORT_SYMBOL(_raw_write_lock_irqsave);
292#endif
293
294#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
295void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
296{
297 __raw_write_lock_irq(lock);
298}
299EXPORT_SYMBOL(_raw_write_lock_irq);
300#endif
301
302#ifndef CONFIG_INLINE_WRITE_LOCK_BH
303void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
304{
305 __raw_write_lock_bh(lock);
306}
307EXPORT_SYMBOL(_raw_write_lock_bh);
308#endif
309
310#ifndef CONFIG_INLINE_WRITE_UNLOCK
311void __lockfunc _raw_write_unlock(rwlock_t *lock)
312{
313 __raw_write_unlock(lock);
314}
315EXPORT_SYMBOL(_raw_write_unlock);
316#endif
317
318#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
319void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
320{
321 __raw_write_unlock_irqrestore(lock, flags);
322}
323EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
324#endif
325
326#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
327void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
328{
329 __raw_write_unlock_irq(lock);
330}
331EXPORT_SYMBOL(_raw_write_unlock_irq);
332#endif
333
334#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
335void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
336{
337 __raw_write_unlock_bh(lock);
338}
339EXPORT_SYMBOL(_raw_write_unlock_bh);
340#endif
341
342#ifdef CONFIG_DEBUG_LOCK_ALLOC
343
344void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
345{
346 preempt_disable();
347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
348 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
349}
350EXPORT_SYMBOL(_raw_spin_lock_nested);
351
352unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
353 int subclass)
354{
355 unsigned long flags;
356
357 local_irq_save(flags);
358 preempt_disable();
359 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
360 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
361 do_raw_spin_lock_flags, &flags);
362 return flags;
363}
364EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
365
366void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
367 struct lockdep_map *nest_lock)
368{
369 preempt_disable();
370 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
371 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
372}
373EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
374
377#endif 375#endif
378 376
379notrace int in_lock_functions(unsigned long addr) 377notrace int in_lock_functions(unsigned long addr)