diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-02 14:02:59 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
commit | c2f21ce2e31286a0a32f8da0a7856e9ca1122ef3 (patch) | |
tree | 6cc8d1fd37ffa6d02481353857b92734241f4dd0 /include/linux/spinlock_api_smp.h | |
parent | e5931943d02bf751b1ec849c0d2ade23d76a8d41 (diff) |
locking: Implement new raw_spinlock
Now that the raw_spin name space is freed up, we can implement
raw_spinlock and the related functions which are used to annotate the
locks which are not converted to sleeping spinlocks in preempt-rt.
A side effect is that only such locks can be used with the low level
lock fsunctions which circumvent lockdep.
For !rt spin_* functions are mapped to the raw_spin* implementations.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/spinlock_api_smp.h')
-rw-r--r-- | include/linux/spinlock_api_smp.h | 51 |
1 files changed, 27 insertions, 24 deletions
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index a2b2c9df91de..eabe5068d138 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -17,26 +17,29 @@ | |||
17 | 17 | ||
18 | int in_lock_functions(unsigned long addr); | 18 | int in_lock_functions(unsigned long addr); |
19 | 19 | ||
20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
21 | 21 | ||
22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); | 22 | void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | 23 | void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) |
24 | __acquires(lock); | 24 | __acquires(lock); |
25 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) | 25 | void __lockfunc |
26 | _spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) | ||
26 | __acquires(lock); | 27 | __acquires(lock); |
27 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); | 28 | void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); |
28 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); | 29 | void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock); |
29 | 30 | ||
30 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | 31 | unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock) |
31 | __acquires(lock); | 32 | __acquires(lock); |
32 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 33 | unsigned long __lockfunc |
34 | _spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) | ||
33 | __acquires(lock); | 35 | __acquires(lock); |
34 | int __lockfunc _spin_trylock(spinlock_t *lock); | 36 | int __lockfunc _spin_trylock(raw_spinlock_t *lock); |
35 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | 37 | int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock); |
36 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); | 38 | void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock); |
37 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); | 39 | void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); |
38 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); | 40 | void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); |
39 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 41 | void __lockfunc |
42 | _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | ||
40 | __releases(lock); | 43 | __releases(lock); |
41 | 44 | ||
42 | #ifdef CONFIG_INLINE_SPIN_LOCK | 45 | #ifdef CONFIG_INLINE_SPIN_LOCK |
@@ -79,7 +82,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |||
79 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | 82 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) |
80 | #endif | 83 | #endif |
81 | 84 | ||
82 | static inline int __spin_trylock(spinlock_t *lock) | 85 | static inline int __spin_trylock(raw_spinlock_t *lock) |
83 | { | 86 | { |
84 | preempt_disable(); | 87 | preempt_disable(); |
85 | if (_raw_spin_trylock(lock)) { | 88 | if (_raw_spin_trylock(lock)) { |
@@ -97,7 +100,7 @@ static inline int __spin_trylock(spinlock_t *lock) | |||
97 | */ | 100 | */ |
98 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 101 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
99 | 102 | ||
100 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | 103 | static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) |
101 | { | 104 | { |
102 | unsigned long flags; | 105 | unsigned long flags; |
103 | 106 | ||
@@ -117,7 +120,7 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | |||
117 | return flags; | 120 | return flags; |
118 | } | 121 | } |
119 | 122 | ||
120 | static inline void __spin_lock_irq(spinlock_t *lock) | 123 | static inline void __spin_lock_irq(raw_spinlock_t *lock) |
121 | { | 124 | { |
122 | local_irq_disable(); | 125 | local_irq_disable(); |
123 | preempt_disable(); | 126 | preempt_disable(); |
@@ -125,7 +128,7 @@ static inline void __spin_lock_irq(spinlock_t *lock) | |||
125 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 128 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
126 | } | 129 | } |
127 | 130 | ||
128 | static inline void __spin_lock_bh(spinlock_t *lock) | 131 | static inline void __spin_lock_bh(raw_spinlock_t *lock) |
129 | { | 132 | { |
130 | local_bh_disable(); | 133 | local_bh_disable(); |
131 | preempt_disable(); | 134 | preempt_disable(); |
@@ -133,7 +136,7 @@ static inline void __spin_lock_bh(spinlock_t *lock) | |||
133 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 136 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
134 | } | 137 | } |
135 | 138 | ||
136 | static inline void __spin_lock(spinlock_t *lock) | 139 | static inline void __spin_lock(raw_spinlock_t *lock) |
137 | { | 140 | { |
138 | preempt_disable(); | 141 | preempt_disable(); |
139 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
@@ -142,14 +145,14 @@ static inline void __spin_lock(spinlock_t *lock) | |||
142 | 145 | ||
143 | #endif /* CONFIG_PREEMPT */ | 146 | #endif /* CONFIG_PREEMPT */ |
144 | 147 | ||
145 | static inline void __spin_unlock(spinlock_t *lock) | 148 | static inline void __spin_unlock(raw_spinlock_t *lock) |
146 | { | 149 | { |
147 | spin_release(&lock->dep_map, 1, _RET_IP_); | 150 | spin_release(&lock->dep_map, 1, _RET_IP_); |
148 | _raw_spin_unlock(lock); | 151 | _raw_spin_unlock(lock); |
149 | preempt_enable(); | 152 | preempt_enable(); |
150 | } | 153 | } |
151 | 154 | ||
152 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | 155 | static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, |
153 | unsigned long flags) | 156 | unsigned long flags) |
154 | { | 157 | { |
155 | spin_release(&lock->dep_map, 1, _RET_IP_); | 158 | spin_release(&lock->dep_map, 1, _RET_IP_); |
@@ -158,7 +161,7 @@ static inline void __spin_unlock_irqrestore(spinlock_t *lock, | |||
158 | preempt_enable(); | 161 | preempt_enable(); |
159 | } | 162 | } |
160 | 163 | ||
161 | static inline void __spin_unlock_irq(spinlock_t *lock) | 164 | static inline void __spin_unlock_irq(raw_spinlock_t *lock) |
162 | { | 165 | { |
163 | spin_release(&lock->dep_map, 1, _RET_IP_); | 166 | spin_release(&lock->dep_map, 1, _RET_IP_); |
164 | _raw_spin_unlock(lock); | 167 | _raw_spin_unlock(lock); |
@@ -166,7 +169,7 @@ static inline void __spin_unlock_irq(spinlock_t *lock) | |||
166 | preempt_enable(); | 169 | preempt_enable(); |
167 | } | 170 | } |
168 | 171 | ||
169 | static inline void __spin_unlock_bh(spinlock_t *lock) | 172 | static inline void __spin_unlock_bh(raw_spinlock_t *lock) |
170 | { | 173 | { |
171 | spin_release(&lock->dep_map, 1, _RET_IP_); | 174 | spin_release(&lock->dep_map, 1, _RET_IP_); |
172 | _raw_spin_unlock(lock); | 175 | _raw_spin_unlock(lock); |
@@ -174,7 +177,7 @@ static inline void __spin_unlock_bh(spinlock_t *lock) | |||
174 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 177 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
175 | } | 178 | } |
176 | 179 | ||
177 | static inline int __spin_trylock_bh(spinlock_t *lock) | 180 | static inline int __spin_trylock_bh(raw_spinlock_t *lock) |
178 | { | 181 | { |
179 | local_bh_disable(); | 182 | local_bh_disable(); |
180 | preempt_disable(); | 183 | preempt_disable(); |