diff options
Diffstat (limited to 'include/linux/spinlock_api_smp.h')
-rw-r--r-- | include/linux/spinlock_api_smp.h | 360 |
1 files changed, 59 insertions, 301 deletions
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8264a7f459bc..e253ccd7a604 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -17,165 +17,76 @@ | |||
17 | 17 | ||
18 | int in_lock_functions(unsigned long addr); | 18 | int in_lock_functions(unsigned long addr); |
19 | 19 | ||
20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
21 | 21 | ||
22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); | 22 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | 23 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) |
24 | __acquires(lock); | 24 | __acquires(lock); |
25 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) | 25 | void __lockfunc |
26 | __acquires(lock); | 26 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) |
27 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); | 27 | __acquires(lock); |
28 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); | 28 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); |
29 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); | 29 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) |
30 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); | 30 | __acquires(lock); |
31 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); | 31 | |
32 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); | 32 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) |
33 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); | 33 | __acquires(lock); |
34 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); | 34 | unsigned long __lockfunc |
35 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | 35 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) |
36 | __acquires(lock); | 36 | __acquires(lock); |
37 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 37 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); |
38 | __acquires(lock); | 38 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); |
39 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | 39 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
40 | __acquires(lock); | 40 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); |
41 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | 41 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); |
42 | __acquires(lock); | 42 | void __lockfunc |
43 | int __lockfunc _spin_trylock(spinlock_t *lock); | 43 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
44 | int __lockfunc _read_trylock(rwlock_t *lock); | 44 | __releases(lock); |
45 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
46 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
47 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); | ||
48 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); | ||
49 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); | ||
50 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); | ||
51 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); | ||
52 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); | ||
53 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); | ||
54 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); | ||
55 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); | ||
56 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
57 | __releases(lock); | ||
58 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
59 | __releases(lock); | ||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
61 | __releases(lock); | ||
62 | 45 | ||
63 | #ifdef CONFIG_INLINE_SPIN_LOCK | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK |
64 | #define _spin_lock(lock) __spin_lock(lock) | 47 | #define _raw_spin_lock(lock) __raw_spin_lock(lock) |
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_INLINE_READ_LOCK | ||
68 | #define _read_lock(lock) __read_lock(lock) | ||
69 | #endif | ||
70 | |||
71 | #ifdef CONFIG_INLINE_WRITE_LOCK | ||
72 | #define _write_lock(lock) __write_lock(lock) | ||
73 | #endif | 48 | #endif |
74 | 49 | ||
75 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
76 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | 51 | #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) |
77 | #endif | ||
78 | |||
79 | #ifdef CONFIG_INLINE_READ_LOCK_BH | ||
80 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
81 | #endif | ||
82 | |||
83 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH | ||
84 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
85 | #endif | 52 | #endif |
86 | 53 | ||
87 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
88 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | 55 | #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) |
89 | #endif | ||
90 | |||
91 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ | ||
92 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
93 | #endif | ||
94 | |||
95 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
96 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
97 | #endif | 56 | #endif |
98 | 57 | ||
99 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | 58 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
100 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | 59 | #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) |
101 | #endif | ||
102 | |||
103 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
104 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
105 | #endif | ||
106 | |||
107 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
108 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
109 | #endif | 60 | #endif |
110 | 61 | ||
111 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
112 | #define _spin_trylock(lock) __spin_trylock(lock) | 63 | #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) |
113 | #endif | ||
114 | |||
115 | #ifdef CONFIG_INLINE_READ_TRYLOCK | ||
116 | #define _read_trylock(lock) __read_trylock(lock) | ||
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK | ||
120 | #define _write_trylock(lock) __write_trylock(lock) | ||
121 | #endif | 64 | #endif |
122 | 65 | ||
123 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH | 66 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
124 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | 67 | #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) |
125 | #endif | 68 | #endif |
126 | 69 | ||
127 | #ifdef CONFIG_INLINE_SPIN_UNLOCK | 70 | #ifdef CONFIG_INLINE_SPIN_UNLOCK |
128 | #define _spin_unlock(lock) __spin_unlock(lock) | 71 | #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) |
129 | #endif | ||
130 | |||
131 | #ifdef CONFIG_INLINE_READ_UNLOCK | ||
132 | #define _read_unlock(lock) __read_unlock(lock) | ||
133 | #endif | ||
134 | |||
135 | #ifdef CONFIG_INLINE_WRITE_UNLOCK | ||
136 | #define _write_unlock(lock) __write_unlock(lock) | ||
137 | #endif | 72 | #endif |
138 | 73 | ||
139 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
140 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | 75 | #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) |
141 | #endif | ||
142 | |||
143 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH | ||
144 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
145 | #endif | ||
146 | |||
147 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH | ||
148 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
149 | #endif | 76 | #endif |
150 | 77 | ||
151 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
152 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | 79 | #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) |
153 | #endif | ||
154 | |||
155 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ | ||
156 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
157 | #endif | ||
158 | |||
159 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ | ||
160 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
161 | #endif | 80 | #endif |
162 | 81 | ||
163 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE | 82 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
164 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | 83 | #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) |
165 | #endif | ||
166 | |||
167 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | ||
168 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
169 | #endif | ||
170 | |||
171 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
172 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
173 | #endif | 84 | #endif |
174 | 85 | ||
175 | static inline int __spin_trylock(spinlock_t *lock) | 86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
176 | { | 87 | { |
177 | preempt_disable(); | 88 | preempt_disable(); |
178 | if (_raw_spin_trylock(lock)) { | 89 | if (do_raw_spin_trylock(lock)) { |
179 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 90 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
180 | return 1; | 91 | return 1; |
181 | } | 92 | } |
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock) | |||
183 | return 0; | 94 | return 0; |
184 | } | 95 | } |
185 | 96 | ||
186 | static inline int __read_trylock(rwlock_t *lock) | ||
187 | { | ||
188 | preempt_disable(); | ||
189 | if (_raw_read_trylock(lock)) { | ||
190 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
191 | return 1; | ||
192 | } | ||
193 | preempt_enable(); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static inline int __write_trylock(rwlock_t *lock) | ||
198 | { | ||
199 | preempt_disable(); | ||
200 | if (_raw_write_trylock(lock)) { | ||
201 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
202 | return 1; | ||
203 | } | ||
204 | preempt_enable(); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /* | 97 | /* |
209 | * If lockdep is enabled then we use the non-preemption spin-ops | 98 | * If lockdep is enabled then we use the non-preemption spin-ops |
210 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 99 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock) | |||
212 | */ | 101 | */ |
213 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
214 | 103 | ||
215 | static inline void __read_lock(rwlock_t *lock) | 104 | static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) |
216 | { | ||
217 | preempt_disable(); | ||
218 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
219 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
220 | } | ||
221 | |||
222 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | ||
223 | { | 105 | { |
224 | unsigned long flags; | 106 | unsigned long flags; |
225 | 107 | ||
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | |||
228 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 110 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
229 | /* | 111 | /* |
230 | * On lockdep we dont want the hand-coded irq-enable of | 112 | * On lockdep we dont want the hand-coded irq-enable of |
231 | * _raw_spin_lock_flags() code, because lockdep assumes | 113 | * do_raw_spin_lock_flags() code, because lockdep assumes |
232 | * that interrupts are not re-enabled during lock-acquire: | 114 | * that interrupts are not re-enabled during lock-acquire: |
233 | */ | 115 | */ |
234 | #ifdef CONFIG_LOCKDEP | 116 | #ifdef CONFIG_LOCKDEP |
235 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 117 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
236 | #else | 118 | #else |
237 | _raw_spin_lock_flags(lock, &flags); | 119 | do_raw_spin_lock_flags(lock, &flags); |
238 | #endif | 120 | #endif |
239 | return flags; | 121 | return flags; |
240 | } | 122 | } |
241 | 123 | ||
242 | static inline void __spin_lock_irq(spinlock_t *lock) | 124 | static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) |
243 | { | 125 | { |
244 | local_irq_disable(); | 126 | local_irq_disable(); |
245 | preempt_disable(); | 127 | preempt_disable(); |
246 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 128 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
247 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 129 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
248 | } | 130 | } |
249 | 131 | ||
250 | static inline void __spin_lock_bh(spinlock_t *lock) | 132 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
251 | { | 133 | { |
252 | local_bh_disable(); | 134 | local_bh_disable(); |
253 | preempt_disable(); | 135 | preempt_disable(); |
254 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 136 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
255 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 137 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
256 | } | ||
257 | |||
258 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
259 | { | ||
260 | unsigned long flags; | ||
261 | |||
262 | local_irq_save(flags); | ||
263 | preempt_disable(); | ||
264 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
265 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
266 | _raw_read_lock_flags, &flags); | ||
267 | return flags; | ||
268 | } | ||
269 | |||
270 | static inline void __read_lock_irq(rwlock_t *lock) | ||
271 | { | ||
272 | local_irq_disable(); | ||
273 | preempt_disable(); | ||
274 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
275 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
276 | } | ||
277 | |||
278 | static inline void __read_lock_bh(rwlock_t *lock) | ||
279 | { | ||
280 | local_bh_disable(); | ||
281 | preempt_disable(); | ||
282 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
283 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
284 | } | ||
285 | |||
286 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
287 | { | ||
288 | unsigned long flags; | ||
289 | |||
290 | local_irq_save(flags); | ||
291 | preempt_disable(); | ||
292 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
293 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
294 | _raw_write_lock_flags, &flags); | ||
295 | return flags; | ||
296 | } | ||
297 | |||
298 | static inline void __write_lock_irq(rwlock_t *lock) | ||
299 | { | ||
300 | local_irq_disable(); | ||
301 | preempt_disable(); | ||
302 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
303 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
304 | } | 138 | } |
305 | 139 | ||
306 | static inline void __write_lock_bh(rwlock_t *lock) | 140 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
307 | { | ||
308 | local_bh_disable(); | ||
309 | preempt_disable(); | ||
310 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
311 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
312 | } | ||
313 | |||
314 | static inline void __spin_lock(spinlock_t *lock) | ||
315 | { | 141 | { |
316 | preempt_disable(); | 142 | preempt_disable(); |
317 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 143 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
318 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 144 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
319 | } | ||
320 | |||
321 | static inline void __write_lock(rwlock_t *lock) | ||
322 | { | ||
323 | preempt_disable(); | ||
324 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
325 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
326 | } | 145 | } |
327 | 146 | ||
328 | #endif /* CONFIG_PREEMPT */ | 147 | #endif /* CONFIG_PREEMPT */ |
329 | 148 | ||
330 | static inline void __spin_unlock(spinlock_t *lock) | 149 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
331 | { | 150 | { |
332 | spin_release(&lock->dep_map, 1, _RET_IP_); | 151 | spin_release(&lock->dep_map, 1, _RET_IP_); |
333 | _raw_spin_unlock(lock); | 152 | do_raw_spin_unlock(lock); |
334 | preempt_enable(); | ||
335 | } | ||
336 | |||
337 | static inline void __write_unlock(rwlock_t *lock) | ||
338 | { | ||
339 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
340 | _raw_write_unlock(lock); | ||
341 | preempt_enable(); | ||
342 | } | ||
343 | |||
344 | static inline void __read_unlock(rwlock_t *lock) | ||
345 | { | ||
346 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
347 | _raw_read_unlock(lock); | ||
348 | preempt_enable(); | 153 | preempt_enable(); |
349 | } | 154 | } |
350 | 155 | ||
351 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | 156 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
352 | unsigned long flags) | 157 | unsigned long flags) |
353 | { | 158 | { |
354 | spin_release(&lock->dep_map, 1, _RET_IP_); | 159 | spin_release(&lock->dep_map, 1, _RET_IP_); |
355 | _raw_spin_unlock(lock); | 160 | do_raw_spin_unlock(lock); |
356 | local_irq_restore(flags); | 161 | local_irq_restore(flags); |
357 | preempt_enable(); | 162 | preempt_enable(); |
358 | } | 163 | } |
359 | 164 | ||
360 | static inline void __spin_unlock_irq(spinlock_t *lock) | 165 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
361 | { | 166 | { |
362 | spin_release(&lock->dep_map, 1, _RET_IP_); | 167 | spin_release(&lock->dep_map, 1, _RET_IP_); |
363 | _raw_spin_unlock(lock); | 168 | do_raw_spin_unlock(lock); |
364 | local_irq_enable(); | 169 | local_irq_enable(); |
365 | preempt_enable(); | 170 | preempt_enable(); |
366 | } | 171 | } |
367 | 172 | ||
368 | static inline void __spin_unlock_bh(spinlock_t *lock) | 173 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
369 | { | 174 | { |
370 | spin_release(&lock->dep_map, 1, _RET_IP_); | 175 | spin_release(&lock->dep_map, 1, _RET_IP_); |
371 | _raw_spin_unlock(lock); | 176 | do_raw_spin_unlock(lock); |
372 | preempt_enable_no_resched(); | ||
373 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
374 | } | ||
375 | |||
376 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
377 | { | ||
378 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
379 | _raw_read_unlock(lock); | ||
380 | local_irq_restore(flags); | ||
381 | preempt_enable(); | ||
382 | } | ||
383 | |||
384 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
385 | { | ||
386 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
387 | _raw_read_unlock(lock); | ||
388 | local_irq_enable(); | ||
389 | preempt_enable(); | ||
390 | } | ||
391 | |||
392 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
393 | { | ||
394 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
395 | _raw_read_unlock(lock); | ||
396 | preempt_enable_no_resched(); | 177 | preempt_enable_no_resched(); |
397 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 178 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
398 | } | 179 | } |
399 | 180 | ||
400 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | 181 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
401 | unsigned long flags) | ||
402 | { | ||
403 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
404 | _raw_write_unlock(lock); | ||
405 | local_irq_restore(flags); | ||
406 | preempt_enable(); | ||
407 | } | ||
408 | |||
409 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
410 | { | ||
411 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
412 | _raw_write_unlock(lock); | ||
413 | local_irq_enable(); | ||
414 | preempt_enable(); | ||
415 | } | ||
416 | |||
417 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
418 | { | ||
419 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
420 | _raw_write_unlock(lock); | ||
421 | preempt_enable_no_resched(); | ||
422 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
423 | } | ||
424 | |||
425 | static inline int __spin_trylock_bh(spinlock_t *lock) | ||
426 | { | 182 | { |
427 | local_bh_disable(); | 183 | local_bh_disable(); |
428 | preempt_disable(); | 184 | preempt_disable(); |
429 | if (_raw_spin_trylock(lock)) { | 185 | if (do_raw_spin_trylock(lock)) { |
430 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 186 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
431 | return 1; | 187 | return 1; |
432 | } | 188 | } |
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock) | |||
435 | return 0; | 191 | return 0; |
436 | } | 192 | } |
437 | 193 | ||
194 | #include <linux/rwlock_api_smp.h> | ||
195 | |||
438 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | 196 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |