diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-11-16 12:48:37 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
commit | 6b6b4792f89346e47437682c7ba3438e6681c0f9 (patch) | |
tree | 3c3cc94369bc5edaeb5ec8b67c8e172708fdf496 /include/linux | |
parent | ef12f10994281e2e44526fa0abf23fdd7d5bd87f (diff) |
locking: Separate rwlock api from spinlock api
Move the rwlock smp api defines and functions into a separate header
file. Makes the -rt selection simpler and less intrusive.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/rwlock_api_smp.h | 277 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 252 |
2 files changed, 280 insertions, 249 deletions
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h new file mode 100644 index 000000000000..090f876f828d --- /dev/null +++ b/include/linux/rwlock_api_smp.h | |||
@@ -0,0 +1,277 @@ | |||
1 | #ifndef __LINUX_RWLOCK_API_SMP_H | ||
2 | #define __LINUX_RWLOCK_API_SMP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_API_SMP_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/rwlock_api_smp.h | ||
10 | * | ||
11 | * spinlock API declarations on SMP (and debug) | ||
12 | * (implemented in kernel/spinlock.c) | ||
13 | * | ||
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
15 | * Released under the General Public License (GPL). | ||
16 | */ | ||
17 | |||
18 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); | ||
19 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); | ||
20 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); | ||
21 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); | ||
22 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); | ||
23 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); | ||
24 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
25 | __acquires(lock); | ||
26 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
27 | __acquires(lock); | ||
28 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
29 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
30 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); | ||
31 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); | ||
32 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); | ||
33 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); | ||
34 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); | ||
35 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); | ||
36 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
37 | __releases(lock); | ||
38 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
39 | __releases(lock); | ||
40 | |||
41 | #ifdef CONFIG_INLINE_READ_LOCK | ||
42 | #define _read_lock(lock) __read_lock(lock) | ||
43 | #endif | ||
44 | |||
45 | #ifdef CONFIG_INLINE_WRITE_LOCK | ||
46 | #define _write_lock(lock) __write_lock(lock) | ||
47 | #endif | ||
48 | |||
49 | #ifdef CONFIG_INLINE_READ_LOCK_BH | ||
50 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
51 | #endif | ||
52 | |||
53 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH | ||
54 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
55 | #endif | ||
56 | |||
57 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ | ||
58 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
59 | #endif | ||
60 | |||
61 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
62 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
63 | #endif | ||
64 | |||
65 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
66 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
67 | #endif | ||
68 | |||
69 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
70 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
71 | #endif | ||
72 | |||
73 | #ifdef CONFIG_INLINE_READ_TRYLOCK | ||
74 | #define _read_trylock(lock) __read_trylock(lock) | ||
75 | #endif | ||
76 | |||
77 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK | ||
78 | #define _write_trylock(lock) __write_trylock(lock) | ||
79 | #endif | ||
80 | |||
81 | #ifdef CONFIG_INLINE_READ_UNLOCK | ||
82 | #define _read_unlock(lock) __read_unlock(lock) | ||
83 | #endif | ||
84 | |||
85 | #ifdef CONFIG_INLINE_WRITE_UNLOCK | ||
86 | #define _write_unlock(lock) __write_unlock(lock) | ||
87 | #endif | ||
88 | |||
89 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH | ||
90 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
91 | #endif | ||
92 | |||
93 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH | ||
94 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
95 | #endif | ||
96 | |||
97 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ | ||
98 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
99 | #endif | ||
100 | |||
101 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ | ||
102 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
103 | #endif | ||
104 | |||
105 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | ||
106 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
107 | #endif | ||
108 | |||
109 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
110 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
111 | #endif | ||
112 | |||
113 | static inline int __read_trylock(rwlock_t *lock) | ||
114 | { | ||
115 | preempt_disable(); | ||
116 | if (_raw_read_trylock(lock)) { | ||
117 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
118 | return 1; | ||
119 | } | ||
120 | preempt_enable(); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static inline int __write_trylock(rwlock_t *lock) | ||
125 | { | ||
126 | preempt_disable(); | ||
127 | if (_raw_write_trylock(lock)) { | ||
128 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
129 | return 1; | ||
130 | } | ||
131 | preempt_enable(); | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
137 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
138 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
139 | */ | ||
140 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
141 | |||
142 | static inline void __read_lock(rwlock_t *lock) | ||
143 | { | ||
144 | preempt_disable(); | ||
145 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
146 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
147 | } | ||
148 | |||
149 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
150 | { | ||
151 | unsigned long flags; | ||
152 | |||
153 | local_irq_save(flags); | ||
154 | preempt_disable(); | ||
155 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
156 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
157 | _raw_read_lock_flags, &flags); | ||
158 | return flags; | ||
159 | } | ||
160 | |||
161 | static inline void __read_lock_irq(rwlock_t *lock) | ||
162 | { | ||
163 | local_irq_disable(); | ||
164 | preempt_disable(); | ||
165 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
166 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
167 | } | ||
168 | |||
169 | static inline void __read_lock_bh(rwlock_t *lock) | ||
170 | { | ||
171 | local_bh_disable(); | ||
172 | preempt_disable(); | ||
173 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
174 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
175 | } | ||
176 | |||
177 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
178 | { | ||
179 | unsigned long flags; | ||
180 | |||
181 | local_irq_save(flags); | ||
182 | preempt_disable(); | ||
183 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
184 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
185 | _raw_write_lock_flags, &flags); | ||
186 | return flags; | ||
187 | } | ||
188 | |||
189 | static inline void __write_lock_irq(rwlock_t *lock) | ||
190 | { | ||
191 | local_irq_disable(); | ||
192 | preempt_disable(); | ||
193 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
194 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
195 | } | ||
196 | |||
197 | static inline void __write_lock_bh(rwlock_t *lock) | ||
198 | { | ||
199 | local_bh_disable(); | ||
200 | preempt_disable(); | ||
201 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
202 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
203 | } | ||
204 | |||
205 | static inline void __write_lock(rwlock_t *lock) | ||
206 | { | ||
207 | preempt_disable(); | ||
208 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
209 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
210 | } | ||
211 | |||
212 | #endif /* CONFIG_PREEMPT */ | ||
213 | |||
214 | static inline void __write_unlock(rwlock_t *lock) | ||
215 | { | ||
216 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
217 | _raw_write_unlock(lock); | ||
218 | preempt_enable(); | ||
219 | } | ||
220 | |||
221 | static inline void __read_unlock(rwlock_t *lock) | ||
222 | { | ||
223 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
224 | _raw_read_unlock(lock); | ||
225 | preempt_enable(); | ||
226 | } | ||
227 | |||
228 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
229 | { | ||
230 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
231 | _raw_read_unlock(lock); | ||
232 | local_irq_restore(flags); | ||
233 | preempt_enable(); | ||
234 | } | ||
235 | |||
236 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
237 | { | ||
238 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
239 | _raw_read_unlock(lock); | ||
240 | local_irq_enable(); | ||
241 | preempt_enable(); | ||
242 | } | ||
243 | |||
244 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
245 | { | ||
246 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
247 | _raw_read_unlock(lock); | ||
248 | preempt_enable_no_resched(); | ||
249 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
250 | } | ||
251 | |||
252 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | ||
253 | unsigned long flags) | ||
254 | { | ||
255 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
256 | _raw_write_unlock(lock); | ||
257 | local_irq_restore(flags); | ||
258 | preempt_enable(); | ||
259 | } | ||
260 | |||
261 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
262 | { | ||
263 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
264 | _raw_write_unlock(lock); | ||
265 | local_irq_enable(); | ||
266 | preempt_enable(); | ||
267 | } | ||
268 | |||
269 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
270 | { | ||
271 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
272 | _raw_write_unlock(lock); | ||
273 | preempt_enable_no_resched(); | ||
274 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
275 | } | ||
276 | |||
277 | #endif /* __LINUX_RWLOCK_API_SMP_H */ | ||
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8264a7f459bc..a2b2c9df91de 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -24,102 +24,41 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
24 | __acquires(lock); | 24 | __acquires(lock); |
25 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) | 25 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) |
26 | __acquires(lock); | 26 | __acquires(lock); |
27 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); | ||
28 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); | ||
29 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); | 27 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); |
30 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); | ||
31 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); | ||
32 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); | 28 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); |
33 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); | 29 | |
34 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); | ||
35 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | 30 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) |
36 | __acquires(lock); | 31 | __acquires(lock); |
37 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 32 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) |
38 | __acquires(lock); | 33 | __acquires(lock); |
39 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
40 | __acquires(lock); | ||
41 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
42 | __acquires(lock); | ||
43 | int __lockfunc _spin_trylock(spinlock_t *lock); | 34 | int __lockfunc _spin_trylock(spinlock_t *lock); |
44 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
45 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
46 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | 35 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); |
47 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); | 36 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); |
48 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); | ||
49 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); | ||
50 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); | 37 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); |
51 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); | ||
52 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); | ||
53 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); | 38 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); |
54 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); | ||
55 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); | ||
56 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 39 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
57 | __releases(lock); | 40 | __releases(lock); |
58 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
59 | __releases(lock); | ||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
61 | __releases(lock); | ||
62 | 41 | ||
63 | #ifdef CONFIG_INLINE_SPIN_LOCK | 42 | #ifdef CONFIG_INLINE_SPIN_LOCK |
64 | #define _spin_lock(lock) __spin_lock(lock) | 43 | #define _spin_lock(lock) __spin_lock(lock) |
65 | #endif | 44 | #endif |
66 | 45 | ||
67 | #ifdef CONFIG_INLINE_READ_LOCK | ||
68 | #define _read_lock(lock) __read_lock(lock) | ||
69 | #endif | ||
70 | |||
71 | #ifdef CONFIG_INLINE_WRITE_LOCK | ||
72 | #define _write_lock(lock) __write_lock(lock) | ||
73 | #endif | ||
74 | |||
75 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
76 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | 47 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) |
77 | #endif | 48 | #endif |
78 | 49 | ||
79 | #ifdef CONFIG_INLINE_READ_LOCK_BH | ||
80 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
81 | #endif | ||
82 | |||
83 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH | ||
84 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
85 | #endif | ||
86 | |||
87 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
88 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | 51 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) |
89 | #endif | 52 | #endif |
90 | 53 | ||
91 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ | ||
92 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
93 | #endif | ||
94 | |||
95 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
96 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
100 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | 55 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) |
101 | #endif | 56 | #endif |
102 | 57 | ||
103 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
104 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
105 | #endif | ||
106 | |||
107 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
108 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
109 | #endif | ||
110 | |||
111 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK | 58 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
112 | #define _spin_trylock(lock) __spin_trylock(lock) | 59 | #define _spin_trylock(lock) __spin_trylock(lock) |
113 | #endif | 60 | #endif |
114 | 61 | ||
115 | #ifdef CONFIG_INLINE_READ_TRYLOCK | ||
116 | #define _read_trylock(lock) __read_trylock(lock) | ||
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK | ||
120 | #define _write_trylock(lock) __write_trylock(lock) | ||
121 | #endif | ||
122 | |||
123 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
124 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | 63 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) |
125 | #endif | 64 | #endif |
@@ -128,50 +67,18 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
128 | #define _spin_unlock(lock) __spin_unlock(lock) | 67 | #define _spin_unlock(lock) __spin_unlock(lock) |
129 | #endif | 68 | #endif |
130 | 69 | ||
131 | #ifdef CONFIG_INLINE_READ_UNLOCK | ||
132 | #define _read_unlock(lock) __read_unlock(lock) | ||
133 | #endif | ||
134 | |||
135 | #ifdef CONFIG_INLINE_WRITE_UNLOCK | ||
136 | #define _write_unlock(lock) __write_unlock(lock) | ||
137 | #endif | ||
138 | |||
139 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH | 70 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
140 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | 71 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) |
141 | #endif | 72 | #endif |
142 | 73 | ||
143 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH | ||
144 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
145 | #endif | ||
146 | |||
147 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH | ||
148 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
149 | #endif | ||
150 | |||
151 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
152 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | 75 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) |
153 | #endif | 76 | #endif |
154 | 77 | ||
155 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ | ||
156 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
157 | #endif | ||
158 | |||
159 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ | ||
160 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
161 | #endif | ||
162 | |||
163 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
164 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | 79 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) |
165 | #endif | 80 | #endif |
166 | 81 | ||
167 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | ||
168 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
169 | #endif | ||
170 | |||
171 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
172 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
173 | #endif | ||
174 | |||
175 | static inline int __spin_trylock(spinlock_t *lock) | 82 | static inline int __spin_trylock(spinlock_t *lock) |
176 | { | 83 | { |
177 | preempt_disable(); | 84 | preempt_disable(); |
@@ -183,28 +90,6 @@ static inline int __spin_trylock(spinlock_t *lock) | |||
183 | return 0; | 90 | return 0; |
184 | } | 91 | } |
185 | 92 | ||
186 | static inline int __read_trylock(rwlock_t *lock) | ||
187 | { | ||
188 | preempt_disable(); | ||
189 | if (_raw_read_trylock(lock)) { | ||
190 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
191 | return 1; | ||
192 | } | ||
193 | preempt_enable(); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static inline int __write_trylock(rwlock_t *lock) | ||
198 | { | ||
199 | preempt_disable(); | ||
200 | if (_raw_write_trylock(lock)) { | ||
201 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
202 | return 1; | ||
203 | } | ||
204 | preempt_enable(); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /* | 93 | /* |
209 | * If lockdep is enabled then we use the non-preemption spin-ops | 94 | * If lockdep is enabled then we use the non-preemption spin-ops |
210 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 95 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
@@ -212,13 +97,6 @@ static inline int __write_trylock(rwlock_t *lock) | |||
212 | */ | 97 | */ |
213 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 98 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
214 | 99 | ||
215 | static inline void __read_lock(rwlock_t *lock) | ||
216 | { | ||
217 | preempt_disable(); | ||
218 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
219 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
220 | } | ||
221 | |||
222 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | 100 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) |
223 | { | 101 | { |
224 | unsigned long flags; | 102 | unsigned long flags; |
@@ -255,62 +133,6 @@ static inline void __spin_lock_bh(spinlock_t *lock) | |||
255 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 133 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
256 | } | 134 | } |
257 | 135 | ||
258 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
259 | { | ||
260 | unsigned long flags; | ||
261 | |||
262 | local_irq_save(flags); | ||
263 | preempt_disable(); | ||
264 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
265 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
266 | _raw_read_lock_flags, &flags); | ||
267 | return flags; | ||
268 | } | ||
269 | |||
270 | static inline void __read_lock_irq(rwlock_t *lock) | ||
271 | { | ||
272 | local_irq_disable(); | ||
273 | preempt_disable(); | ||
274 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
275 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
276 | } | ||
277 | |||
278 | static inline void __read_lock_bh(rwlock_t *lock) | ||
279 | { | ||
280 | local_bh_disable(); | ||
281 | preempt_disable(); | ||
282 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
283 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
284 | } | ||
285 | |||
286 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
287 | { | ||
288 | unsigned long flags; | ||
289 | |||
290 | local_irq_save(flags); | ||
291 | preempt_disable(); | ||
292 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
293 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
294 | _raw_write_lock_flags, &flags); | ||
295 | return flags; | ||
296 | } | ||
297 | |||
298 | static inline void __write_lock_irq(rwlock_t *lock) | ||
299 | { | ||
300 | local_irq_disable(); | ||
301 | preempt_disable(); | ||
302 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
303 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
304 | } | ||
305 | |||
306 | static inline void __write_lock_bh(rwlock_t *lock) | ||
307 | { | ||
308 | local_bh_disable(); | ||
309 | preempt_disable(); | ||
310 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
311 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
312 | } | ||
313 | |||
314 | static inline void __spin_lock(spinlock_t *lock) | 136 | static inline void __spin_lock(spinlock_t *lock) |
315 | { | 137 | { |
316 | preempt_disable(); | 138 | preempt_disable(); |
@@ -318,13 +140,6 @@ static inline void __spin_lock(spinlock_t *lock) | |||
318 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 140 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
319 | } | 141 | } |
320 | 142 | ||
321 | static inline void __write_lock(rwlock_t *lock) | ||
322 | { | ||
323 | preempt_disable(); | ||
324 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
325 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
326 | } | ||
327 | |||
328 | #endif /* CONFIG_PREEMPT */ | 143 | #endif /* CONFIG_PREEMPT */ |
329 | 144 | ||
330 | static inline void __spin_unlock(spinlock_t *lock) | 145 | static inline void __spin_unlock(spinlock_t *lock) |
@@ -334,20 +149,6 @@ static inline void __spin_unlock(spinlock_t *lock) | |||
334 | preempt_enable(); | 149 | preempt_enable(); |
335 | } | 150 | } |
336 | 151 | ||
337 | static inline void __write_unlock(rwlock_t *lock) | ||
338 | { | ||
339 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
340 | _raw_write_unlock(lock); | ||
341 | preempt_enable(); | ||
342 | } | ||
343 | |||
344 | static inline void __read_unlock(rwlock_t *lock) | ||
345 | { | ||
346 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
347 | _raw_read_unlock(lock); | ||
348 | preempt_enable(); | ||
349 | } | ||
350 | |||
351 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | 152 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, |
352 | unsigned long flags) | 153 | unsigned long flags) |
353 | { | 154 | { |
@@ -373,55 +174,6 @@ static inline void __spin_unlock_bh(spinlock_t *lock) | |||
373 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 174 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
374 | } | 175 | } |
375 | 176 | ||
376 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
377 | { | ||
378 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
379 | _raw_read_unlock(lock); | ||
380 | local_irq_restore(flags); | ||
381 | preempt_enable(); | ||
382 | } | ||
383 | |||
384 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
385 | { | ||
386 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
387 | _raw_read_unlock(lock); | ||
388 | local_irq_enable(); | ||
389 | preempt_enable(); | ||
390 | } | ||
391 | |||
392 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
393 | { | ||
394 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
395 | _raw_read_unlock(lock); | ||
396 | preempt_enable_no_resched(); | ||
397 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
398 | } | ||
399 | |||
400 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | ||
401 | unsigned long flags) | ||
402 | { | ||
403 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
404 | _raw_write_unlock(lock); | ||
405 | local_irq_restore(flags); | ||
406 | preempt_enable(); | ||
407 | } | ||
408 | |||
409 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
410 | { | ||
411 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
412 | _raw_write_unlock(lock); | ||
413 | local_irq_enable(); | ||
414 | preempt_enable(); | ||
415 | } | ||
416 | |||
417 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
418 | { | ||
419 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
420 | _raw_write_unlock(lock); | ||
421 | preempt_enable_no_resched(); | ||
422 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
423 | } | ||
424 | |||
425 | static inline int __spin_trylock_bh(spinlock_t *lock) | 177 | static inline int __spin_trylock_bh(spinlock_t *lock) |
426 | { | 178 | { |
427 | local_bh_disable(); | 179 | local_bh_disable(); |
@@ -435,4 +187,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock) | |||
435 | return 0; | 187 | return 0; |
436 | } | 188 | } |
437 | 189 | ||
190 | #include <linux/rwlock_api_smp.h> | ||
191 | |||
438 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | 192 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |