aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-02 14:02:59 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:32 -0500
commitc2f21ce2e31286a0a32f8da0a7856e9ca1122ef3 (patch)
tree6cc8d1fd37ffa6d02481353857b92734241f4dd0 /kernel
parente5931943d02bf751b1ec849c0d2ade23d76a8d41 (diff)
locking: Implement new raw_spinlock
Now that the raw_spin name space is freed up, we can implement raw_spinlock and the related functions which are used to annotate the locks which are not converted to sleeping spinlocks in preempt-rt. A side effect is that only such locks can be used with the low level lock fsunctions which circumvent lockdep. For !rt spin_* functions are mapped to the raw_spin* implementations. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/mutex-debug.h12
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/spinlock.c34
3 files changed, 25 insertions, 23 deletions
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 7bebbd15b342..57d527a16f9d 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock)
43 \ 43 \
44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \ 44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \
45 local_irq_save(flags); \ 45 local_irq_save(flags); \
46 arch_spin_lock(&(lock)->raw_lock); \ 46 arch_spin_lock(&(lock)->rlock.raw_lock);\
47 DEBUG_LOCKS_WARN_ON(l->magic != l); \ 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \
48 } while (0) 48 } while (0)
49 49
50#define spin_unlock_mutex(lock, flags) \ 50#define spin_unlock_mutex(lock, flags) \
51 do { \ 51 do { \
52 arch_spin_unlock(&(lock)->raw_lock); \ 52 arch_spin_unlock(&(lock)->rlock.raw_lock); \
53 local_irq_restore(flags); \ 53 local_irq_restore(flags); \
54 preempt_check_resched(); \ 54 preempt_check_resched(); \
55 } while (0) 55 } while (0)
diff --git a/kernel/sched.c b/kernel/sched.c
index fd05861b2111..e6acf2d7b753 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -884,7 +884,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
884{ 884{
885#ifdef CONFIG_DEBUG_SPINLOCK 885#ifdef CONFIG_DEBUG_SPINLOCK
886 /* this is a valid case when another task releases the spinlock */ 886 /* this is a valid case when another task releases the spinlock */
887 rq->lock.owner = current; 887 rq->lock.rlock.owner = current;
888#endif 888#endif
889 /* 889 /*
890 * If we are tracking spinlock dependencies then we have to 890 * If we are tracking spinlock dependencies then we have to
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index fbb5f8b78357..54eb7dd3c608 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -32,6 +32,8 @@
32 * include/linux/spinlock_api_smp.h 32 * include/linux/spinlock_api_smp.h
33 */ 33 */
34#else 34#else
35#define raw_read_can_lock(l) read_can_lock(l)
36#define raw_write_can_lock(l) write_can_lock(l)
35/* 37/*
36 * We build the __lock_function inlines here. They are too large for 38 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function 39 * inlining all over the place, but here is only one user per function
@@ -52,7 +54,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \
52 \ 54 \
53 if (!(lock)->break_lock) \ 55 if (!(lock)->break_lock) \
54 (lock)->break_lock = 1; \ 56 (lock)->break_lock = 1; \
55 while (!op##_can_lock(lock) && (lock)->break_lock) \ 57 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
56 arch_##op##_relax(&lock->raw_lock); \ 58 arch_##op##_relax(&lock->raw_lock); \
57 } \ 59 } \
58 (lock)->break_lock = 0; \ 60 (lock)->break_lock = 0; \
@@ -72,7 +74,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
72 \ 74 \
73 if (!(lock)->break_lock) \ 75 if (!(lock)->break_lock) \
74 (lock)->break_lock = 1; \ 76 (lock)->break_lock = 1; \
75 while (!op##_can_lock(lock) && (lock)->break_lock) \ 77 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
76 arch_##op##_relax(&lock->raw_lock); \ 78 arch_##op##_relax(&lock->raw_lock); \
77 } \ 79 } \
78 (lock)->break_lock = 0; \ 80 (lock)->break_lock = 0; \
@@ -107,14 +109,14 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
107 * __[spin|read|write]_lock_irqsave() 109 * __[spin|read|write]_lock_irqsave()
108 * __[spin|read|write]_lock_bh() 110 * __[spin|read|write]_lock_bh()
109 */ 111 */
110BUILD_LOCK_OPS(spin, spinlock); 112BUILD_LOCK_OPS(spin, raw_spinlock);
111BUILD_LOCK_OPS(read, rwlock); 113BUILD_LOCK_OPS(read, rwlock);
112BUILD_LOCK_OPS(write, rwlock); 114BUILD_LOCK_OPS(write, rwlock);
113 115
114#endif 116#endif
115 117
116#ifndef CONFIG_INLINE_SPIN_TRYLOCK 118#ifndef CONFIG_INLINE_SPIN_TRYLOCK
117int __lockfunc _spin_trylock(spinlock_t *lock) 119int __lockfunc _spin_trylock(raw_spinlock_t *lock)
118{ 120{
119 return __spin_trylock(lock); 121 return __spin_trylock(lock);
120} 122}
@@ -122,7 +124,7 @@ EXPORT_SYMBOL(_spin_trylock);
122#endif 124#endif
123 125
124#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH 126#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
125int __lockfunc _spin_trylock_bh(spinlock_t *lock) 127int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock)
126{ 128{
127 return __spin_trylock_bh(lock); 129 return __spin_trylock_bh(lock);
128} 130}
@@ -130,7 +132,7 @@ EXPORT_SYMBOL(_spin_trylock_bh);
130#endif 132#endif
131 133
132#ifndef CONFIG_INLINE_SPIN_LOCK 134#ifndef CONFIG_INLINE_SPIN_LOCK
133void __lockfunc _spin_lock(spinlock_t *lock) 135void __lockfunc _spin_lock(raw_spinlock_t *lock)
134{ 136{
135 __spin_lock(lock); 137 __spin_lock(lock);
136} 138}
@@ -138,7 +140,7 @@ EXPORT_SYMBOL(_spin_lock);
138#endif 140#endif
139 141
140#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 142#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
141unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 143unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
142{ 144{
143 return __spin_lock_irqsave(lock); 145 return __spin_lock_irqsave(lock);
144} 146}
@@ -146,7 +148,7 @@ EXPORT_SYMBOL(_spin_lock_irqsave);
146#endif 148#endif
147 149
148#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ 150#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
149void __lockfunc _spin_lock_irq(spinlock_t *lock) 151void __lockfunc _spin_lock_irq(raw_spinlock_t *lock)
150{ 152{
151 __spin_lock_irq(lock); 153 __spin_lock_irq(lock);
152} 154}
@@ -154,7 +156,7 @@ EXPORT_SYMBOL(_spin_lock_irq);
154#endif 156#endif
155 157
156#ifndef CONFIG_INLINE_SPIN_LOCK_BH 158#ifndef CONFIG_INLINE_SPIN_LOCK_BH
157void __lockfunc _spin_lock_bh(spinlock_t *lock) 159void __lockfunc _spin_lock_bh(raw_spinlock_t *lock)
158{ 160{
159 __spin_lock_bh(lock); 161 __spin_lock_bh(lock);
160} 162}
@@ -162,7 +164,7 @@ EXPORT_SYMBOL(_spin_lock_bh);
162#endif 164#endif
163 165
164#ifndef CONFIG_INLINE_SPIN_UNLOCK 166#ifndef CONFIG_INLINE_SPIN_UNLOCK
165void __lockfunc _spin_unlock(spinlock_t *lock) 167void __lockfunc _spin_unlock(raw_spinlock_t *lock)
166{ 168{
167 __spin_unlock(lock); 169 __spin_unlock(lock);
168} 170}
@@ -170,7 +172,7 @@ EXPORT_SYMBOL(_spin_unlock);
170#endif 172#endif
171 173
172#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 174#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
173void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 175void __lockfunc _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
174{ 176{
175 __spin_unlock_irqrestore(lock, flags); 177 __spin_unlock_irqrestore(lock, flags);
176} 178}
@@ -178,7 +180,7 @@ EXPORT_SYMBOL(_spin_unlock_irqrestore);
178#endif 180#endif
179 181
180#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ 182#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
181void __lockfunc _spin_unlock_irq(spinlock_t *lock) 183void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock)
182{ 184{
183 __spin_unlock_irq(lock); 185 __spin_unlock_irq(lock);
184} 186}
@@ -186,7 +188,7 @@ EXPORT_SYMBOL(_spin_unlock_irq);
186#endif 188#endif
187 189
188#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH 190#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
189void __lockfunc _spin_unlock_bh(spinlock_t *lock) 191void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock)
190{ 192{
191 __spin_unlock_bh(lock); 193 __spin_unlock_bh(lock);
192} 194}
@@ -339,7 +341,7 @@ EXPORT_SYMBOL(_write_unlock_bh);
339 341
340#ifdef CONFIG_DEBUG_LOCK_ALLOC 342#ifdef CONFIG_DEBUG_LOCK_ALLOC
341 343
342void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 344void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
343{ 345{
344 preempt_disable(); 346 preempt_disable();
345 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
@@ -347,7 +349,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
347} 349}
348EXPORT_SYMBOL(_spin_lock_nested); 350EXPORT_SYMBOL(_spin_lock_nested);
349 351
350unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, 352unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock,
351 int subclass) 353 int subclass)
352{ 354{
353 unsigned long flags; 355 unsigned long flags;
@@ -361,7 +363,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
361} 363}
362EXPORT_SYMBOL(_spin_lock_irqsave_nested); 364EXPORT_SYMBOL(_spin_lock_irqsave_nested);
363 365
364void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, 366void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock,
365 struct lockdep_map *nest_lock) 367 struct lockdep_map *nest_lock)
366{ 368{
367 preempt_disable(); 369 preempt_disable();