aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/spinlock.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-11-09 10:21:41 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-11-13 14:53:28 -0500
commit8e13c7b772387f55dc05c6a0e5b30010c3c46ff9 (patch)
tree7cfa275b95febce0cf29c5c8faf2bc7566305265 /kernel/spinlock.c
parent6beb000923882f6204ea2cfcd932e568e900803f (diff)
locking: Reduce ifdefs in kernel/spinlock.c
With the Kconfig based inline decisions we can remove extra ifdefs in kernel/spinlock.c by creating the complex lockbreak functions as inlines which are inserted into the non inlined lock functions. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> LKML-Reference: <20091109151428.548614772@linutronix.de> Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Reviewed-by: Ingo Molnar <mingo@elte.hu> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r--kernel/spinlock.c258
1 files changed, 127 insertions, 131 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 235a9579a875..41e042219ff6 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,6 +21,133 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24/*
25 * If lockdep is enabled then we use the non-preemption spin-ops
26 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
28 */
29#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
30/*
31 * The __lock_function inlines are taken from
32 * include/linux/spinlock_api_smp.h
33 */
34#else
35/*
36 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function
38 * which embedds them into the calling _lock_function below.
39 *
40 * This could be a long-held lock. We both prepare to spin for a long
41 * time (making _this_ CPU preemptable if possible), and we also signal
42 * towards that other CPU that it should break the lock ASAP.
43 */
44#define BUILD_LOCK_OPS(op, locktype) \
45void __lockfunc __##op##_lock(locktype##_t *lock) \
46{ \
47 for (;;) { \
48 preempt_disable(); \
49 if (likely(_raw_##op##_trylock(lock))) \
50 break; \
51 preempt_enable(); \
52 \
53 if (!(lock)->break_lock) \
54 (lock)->break_lock = 1; \
55 while (!op##_can_lock(lock) && (lock)->break_lock) \
56 _raw_##op##_relax(&lock->raw_lock); \
57 } \
58 (lock)->break_lock = 0; \
59} \
60 \
61unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
62{ \
63 unsigned long flags; \
64 \
65 for (;;) { \
66 preempt_disable(); \
67 local_irq_save(flags); \
68 if (likely(_raw_##op##_trylock(lock))) \
69 break; \
70 local_irq_restore(flags); \
71 preempt_enable(); \
72 \
73 if (!(lock)->break_lock) \
74 (lock)->break_lock = 1; \
75 while (!op##_can_lock(lock) && (lock)->break_lock) \
76 _raw_##op##_relax(&lock->raw_lock); \
77 } \
78 (lock)->break_lock = 0; \
79 return flags; \
80} \
81 \
82void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
83{ \
84 _##op##_lock_irqsave(lock); \
85} \
86 \
87void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
88{ \
89 unsigned long flags; \
90 \
91 /* */ \
92 /* Careful: we must exclude softirqs too, hence the */ \
93 /* irq-disabling. We use the generic preemption-aware */ \
94 /* function: */ \
95 /**/ \
96 flags = _##op##_lock_irqsave(lock); \
97 local_bh_disable(); \
98 local_irq_restore(flags); \
99} \
100
101/*
102 * Build preemption-friendly versions of the following
103 * lock-spinning functions:
104 *
105 * __[spin|read|write]_lock()
106 * __[spin|read|write]_lock_irq()
107 * __[spin|read|write]_lock_irqsave()
108 * __[spin|read|write]_lock_bh()
109 */
110BUILD_LOCK_OPS(spin, spinlock);
111BUILD_LOCK_OPS(read, rwlock);
112BUILD_LOCK_OPS(write, rwlock);
113
114#endif
115
116#ifdef CONFIG_DEBUG_LOCK_ALLOC
117
118void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
119{
120 preempt_disable();
121 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
122 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
123}
124EXPORT_SYMBOL(_spin_lock_nested);
125
126unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
127 int subclass)
128{
129 unsigned long flags;
130
131 local_irq_save(flags);
132 preempt_disable();
133 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
134 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
135 _raw_spin_lock_flags, &flags);
136 return flags;
137}
138EXPORT_SYMBOL(_spin_lock_irqsave_nested);
139
140void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
141 struct lockdep_map *nest_lock)
142{
143 preempt_disable();
144 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
145 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
146}
147EXPORT_SYMBOL(_spin_lock_nest_lock);
148
149#endif
150
24#ifndef CONFIG_INLINE_SPIN_TRYLOCK 151#ifndef CONFIG_INLINE_SPIN_TRYLOCK
25int __lockfunc _spin_trylock(spinlock_t *lock) 152int __lockfunc _spin_trylock(spinlock_t *lock)
26{ 153{
@@ -45,13 +172,6 @@ int __lockfunc _write_trylock(rwlock_t *lock)
45EXPORT_SYMBOL(_write_trylock); 172EXPORT_SYMBOL(_write_trylock);
46#endif 173#endif
47 174
48/*
49 * If lockdep is enabled then we use the non-preemption spin-ops
50 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
51 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
52 */
53#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
54
55#ifndef CONFIG_INLINE_READ_LOCK 175#ifndef CONFIG_INLINE_READ_LOCK
56void __lockfunc _read_lock(rwlock_t *lock) 176void __lockfunc _read_lock(rwlock_t *lock)
57{ 177{
@@ -148,130 +268,6 @@ void __lockfunc _write_lock(rwlock_t *lock)
148EXPORT_SYMBOL(_write_lock); 268EXPORT_SYMBOL(_write_lock);
149#endif 269#endif
150 270
151#else /* CONFIG_PREEMPT: */
152
153/*
154 * This could be a long-held lock. We both prepare to spin for a long
155 * time (making _this_ CPU preemptable if possible), and we also signal
156 * towards that other CPU that it should break the lock ASAP.
157 *
158 * (We do this in a function because inlining it would be excessive.)
159 */
160
161#define BUILD_LOCK_OPS(op, locktype) \
162void __lockfunc _##op##_lock(locktype##_t *lock) \
163{ \
164 for (;;) { \
165 preempt_disable(); \
166 if (likely(_raw_##op##_trylock(lock))) \
167 break; \
168 preempt_enable(); \
169 \
170 if (!(lock)->break_lock) \
171 (lock)->break_lock = 1; \
172 while (!op##_can_lock(lock) && (lock)->break_lock) \
173 _raw_##op##_relax(&lock->raw_lock); \
174 } \
175 (lock)->break_lock = 0; \
176} \
177 \
178EXPORT_SYMBOL(_##op##_lock); \
179 \
180unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
181{ \
182 unsigned long flags; \
183 \
184 for (;;) { \
185 preempt_disable(); \
186 local_irq_save(flags); \
187 if (likely(_raw_##op##_trylock(lock))) \
188 break; \
189 local_irq_restore(flags); \
190 preempt_enable(); \
191 \
192 if (!(lock)->break_lock) \
193 (lock)->break_lock = 1; \
194 while (!op##_can_lock(lock) && (lock)->break_lock) \
195 _raw_##op##_relax(&lock->raw_lock); \
196 } \
197 (lock)->break_lock = 0; \
198 return flags; \
199} \
200 \
201EXPORT_SYMBOL(_##op##_lock_irqsave); \
202 \
203void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
204{ \
205 _##op##_lock_irqsave(lock); \
206} \
207 \
208EXPORT_SYMBOL(_##op##_lock_irq); \
209 \
210void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
211{ \
212 unsigned long flags; \
213 \
214 /* */ \
215 /* Careful: we must exclude softirqs too, hence the */ \
216 /* irq-disabling. We use the generic preemption-aware */ \
217 /* function: */ \
218 /**/ \
219 flags = _##op##_lock_irqsave(lock); \
220 local_bh_disable(); \
221 local_irq_restore(flags); \
222} \
223 \
224EXPORT_SYMBOL(_##op##_lock_bh)
225
226/*
227 * Build preemption-friendly versions of the following
228 * lock-spinning functions:
229 *
230 * _[spin|read|write]_lock()
231 * _[spin|read|write]_lock_irq()
232 * _[spin|read|write]_lock_irqsave()
233 * _[spin|read|write]_lock_bh()
234 */
235BUILD_LOCK_OPS(spin, spinlock);
236BUILD_LOCK_OPS(read, rwlock);
237BUILD_LOCK_OPS(write, rwlock);
238
239#endif /* CONFIG_PREEMPT */
240
241#ifdef CONFIG_DEBUG_LOCK_ALLOC
242
243void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
244{
245 preempt_disable();
246 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
247 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
248}
249EXPORT_SYMBOL(_spin_lock_nested);
250
251unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
252{
253 unsigned long flags;
254
255 local_irq_save(flags);
256 preempt_disable();
257 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
258 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
259 _raw_spin_lock_flags, &flags);
260 return flags;
261}
262EXPORT_SYMBOL(_spin_lock_irqsave_nested);
263
264void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
265 struct lockdep_map *nest_lock)
266{
267 preempt_disable();
268 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
269 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
270}
271EXPORT_SYMBOL(_spin_lock_nest_lock);
272
273#endif
274
275#ifndef CONFIG_INLINE_SPIN_UNLOCK 271#ifndef CONFIG_INLINE_SPIN_UNLOCK
276void __lockfunc _spin_unlock(spinlock_t *lock) 272void __lockfunc _spin_unlock(spinlock_t *lock)
277{ 273{