aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rwlock.h32
-rw-r--r--include/linux/rwlock_api_smp.h40
-rw-r--r--include/linux/spinlock.h16
-rw-r--r--include/linux/spinlock_api_smp.h24
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/spinlock.c12
-rw-r--r--lib/kernel_lock.c18
-rw-r--r--lib/spinlock_debug.c18
8 files changed, 81 insertions, 81 deletions
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 5725b034defe..bd799bc6d086 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -29,25 +29,25 @@ do { \
29#endif 29#endif
30 30
31#ifdef CONFIG_DEBUG_SPINLOCK 31#ifdef CONFIG_DEBUG_SPINLOCK
32 extern void _raw_read_lock(rwlock_t *lock); 32 extern void do_raw_read_lock(rwlock_t *lock);
33#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 33#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
34 extern int _raw_read_trylock(rwlock_t *lock); 34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void _raw_read_unlock(rwlock_t *lock); 35 extern void do_raw_read_unlock(rwlock_t *lock);
36 extern void _raw_write_lock(rwlock_t *lock); 36 extern void do_raw_write_lock(rwlock_t *lock);
37#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 37#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
38 extern int _raw_write_trylock(rwlock_t *lock); 38 extern int do_raw_write_trylock(rwlock_t *lock);
39 extern void _raw_write_unlock(rwlock_t *lock); 39 extern void do_raw_write_unlock(rwlock_t *lock);
40#else 40#else
41# define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) 41# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
42# define _raw_read_lock_flags(lock, flags) \ 42# define do_raw_read_lock_flags(lock, flags) \
43 arch_read_lock_flags(&(lock)->raw_lock, *(flags)) 43 arch_read_lock_flags(&(lock)->raw_lock, *(flags))
44# define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) 44# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
45# define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) 45# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
46# define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) 46# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
47# define _raw_write_lock_flags(lock, flags) \ 47# define do_raw_write_lock_flags(lock, flags) \
48 arch_write_lock_flags(&(lock)->raw_lock, *(flags)) 48 arch_write_lock_flags(&(lock)->raw_lock, *(flags))
49# define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) 49# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
50# define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) 50# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
51#endif 51#endif
52 52
53#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) 53#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 090f876f828d..b3ba5ae6a8c4 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -113,7 +113,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
113static inline int __read_trylock(rwlock_t *lock) 113static inline int __read_trylock(rwlock_t *lock)
114{ 114{
115 preempt_disable(); 115 preempt_disable();
116 if (_raw_read_trylock(lock)) { 116 if (do_raw_read_trylock(lock)) {
117 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); 117 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
118 return 1; 118 return 1;
119 } 119 }
@@ -124,7 +124,7 @@ static inline int __read_trylock(rwlock_t *lock)
124static inline int __write_trylock(rwlock_t *lock) 124static inline int __write_trylock(rwlock_t *lock)
125{ 125{
126 preempt_disable(); 126 preempt_disable();
127 if (_raw_write_trylock(lock)) { 127 if (do_raw_write_trylock(lock)) {
128 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); 128 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
129 return 1; 129 return 1;
130 } 130 }
@@ -143,7 +143,7 @@ static inline void __read_lock(rwlock_t *lock)
143{ 143{
144 preempt_disable(); 144 preempt_disable();
145 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 145 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
146 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); 146 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
147} 147}
148 148
149static inline unsigned long __read_lock_irqsave(rwlock_t *lock) 149static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
@@ -153,8 +153,8 @@ static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
153 local_irq_save(flags); 153 local_irq_save(flags);
154 preempt_disable(); 154 preempt_disable();
155 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 155 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
156 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, 156 LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
157 _raw_read_lock_flags, &flags); 157 do_raw_read_lock_flags, &flags);
158 return flags; 158 return flags;
159} 159}
160 160
@@ -163,7 +163,7 @@ static inline void __read_lock_irq(rwlock_t *lock)
163 local_irq_disable(); 163 local_irq_disable();
164 preempt_disable(); 164 preempt_disable();
165 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 165 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
166 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); 166 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
167} 167}
168 168
169static inline void __read_lock_bh(rwlock_t *lock) 169static inline void __read_lock_bh(rwlock_t *lock)
@@ -171,7 +171,7 @@ static inline void __read_lock_bh(rwlock_t *lock)
171 local_bh_disable(); 171 local_bh_disable();
172 preempt_disable(); 172 preempt_disable();
173 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 173 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
174 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); 174 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
175} 175}
176 176
177static inline unsigned long __write_lock_irqsave(rwlock_t *lock) 177static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
@@ -181,8 +181,8 @@ static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
181 local_irq_save(flags); 181 local_irq_save(flags);
182 preempt_disable(); 182 preempt_disable();
183 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 183 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
184 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, 184 LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
185 _raw_write_lock_flags, &flags); 185 do_raw_write_lock_flags, &flags);
186 return flags; 186 return flags;
187} 187}
188 188
@@ -191,7 +191,7 @@ static inline void __write_lock_irq(rwlock_t *lock)
191 local_irq_disable(); 191 local_irq_disable();
192 preempt_disable(); 192 preempt_disable();
193 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 193 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
194 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); 194 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
195} 195}
196 196
197static inline void __write_lock_bh(rwlock_t *lock) 197static inline void __write_lock_bh(rwlock_t *lock)
@@ -199,14 +199,14 @@ static inline void __write_lock_bh(rwlock_t *lock)
199 local_bh_disable(); 199 local_bh_disable();
200 preempt_disable(); 200 preempt_disable();
201 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 201 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
202 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); 202 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
203} 203}
204 204
205static inline void __write_lock(rwlock_t *lock) 205static inline void __write_lock(rwlock_t *lock)
206{ 206{
207 preempt_disable(); 207 preempt_disable();
208 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 208 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
209 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); 209 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
210} 210}
211 211
212#endif /* CONFIG_PREEMPT */ 212#endif /* CONFIG_PREEMPT */
@@ -214,21 +214,21 @@ static inline void __write_lock(rwlock_t *lock)
214static inline void __write_unlock(rwlock_t *lock) 214static inline void __write_unlock(rwlock_t *lock)
215{ 215{
216 rwlock_release(&lock->dep_map, 1, _RET_IP_); 216 rwlock_release(&lock->dep_map, 1, _RET_IP_);
217 _raw_write_unlock(lock); 217 do_raw_write_unlock(lock);
218 preempt_enable(); 218 preempt_enable();
219} 219}
220 220
221static inline void __read_unlock(rwlock_t *lock) 221static inline void __read_unlock(rwlock_t *lock)
222{ 222{
223 rwlock_release(&lock->dep_map, 1, _RET_IP_); 223 rwlock_release(&lock->dep_map, 1, _RET_IP_);
224 _raw_read_unlock(lock); 224 do_raw_read_unlock(lock);
225 preempt_enable(); 225 preempt_enable();
226} 226}
227 227
228static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 228static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
229{ 229{
230 rwlock_release(&lock->dep_map, 1, _RET_IP_); 230 rwlock_release(&lock->dep_map, 1, _RET_IP_);
231 _raw_read_unlock(lock); 231 do_raw_read_unlock(lock);
232 local_irq_restore(flags); 232 local_irq_restore(flags);
233 preempt_enable(); 233 preempt_enable();
234} 234}
@@ -236,7 +236,7 @@ static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
236static inline void __read_unlock_irq(rwlock_t *lock) 236static inline void __read_unlock_irq(rwlock_t *lock)
237{ 237{
238 rwlock_release(&lock->dep_map, 1, _RET_IP_); 238 rwlock_release(&lock->dep_map, 1, _RET_IP_);
239 _raw_read_unlock(lock); 239 do_raw_read_unlock(lock);
240 local_irq_enable(); 240 local_irq_enable();
241 preempt_enable(); 241 preempt_enable();
242} 242}
@@ -244,7 +244,7 @@ static inline void __read_unlock_irq(rwlock_t *lock)
244static inline void __read_unlock_bh(rwlock_t *lock) 244static inline void __read_unlock_bh(rwlock_t *lock)
245{ 245{
246 rwlock_release(&lock->dep_map, 1, _RET_IP_); 246 rwlock_release(&lock->dep_map, 1, _RET_IP_);
247 _raw_read_unlock(lock); 247 do_raw_read_unlock(lock);
248 preempt_enable_no_resched(); 248 preempt_enable_no_resched();
249 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 249 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
250} 250}
@@ -253,7 +253,7 @@ static inline void __write_unlock_irqrestore(rwlock_t *lock,
253 unsigned long flags) 253 unsigned long flags)
254{ 254{
255 rwlock_release(&lock->dep_map, 1, _RET_IP_); 255 rwlock_release(&lock->dep_map, 1, _RET_IP_);
256 _raw_write_unlock(lock); 256 do_raw_write_unlock(lock);
257 local_irq_restore(flags); 257 local_irq_restore(flags);
258 preempt_enable(); 258 preempt_enable();
259} 259}
@@ -261,7 +261,7 @@ static inline void __write_unlock_irqrestore(rwlock_t *lock,
261static inline void __write_unlock_irq(rwlock_t *lock) 261static inline void __write_unlock_irq(rwlock_t *lock)
262{ 262{
263 rwlock_release(&lock->dep_map, 1, _RET_IP_); 263 rwlock_release(&lock->dep_map, 1, _RET_IP_);
264 _raw_write_unlock(lock); 264 do_raw_write_unlock(lock);
265 local_irq_enable(); 265 local_irq_enable();
266 preempt_enable(); 266 preempt_enable();
267} 267}
@@ -269,7 +269,7 @@ static inline void __write_unlock_irq(rwlock_t *lock)
269static inline void __write_unlock_bh(rwlock_t *lock) 269static inline void __write_unlock_bh(rwlock_t *lock)
270{ 270{
271 rwlock_release(&lock->dep_map, 1, _RET_IP_); 271 rwlock_release(&lock->dep_map, 1, _RET_IP_);
272 _raw_write_unlock(lock); 272 do_raw_write_unlock(lock);
273 preempt_enable_no_resched(); 273 preempt_enable_no_resched();
274 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 274 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
275} 275}
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index ef5a55d96b9b..0cbc58acf689 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -128,28 +128,28 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
129 129
130#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
131 extern void _raw_spin_lock(raw_spinlock_t *lock); 131 extern void do_raw_spin_lock(raw_spinlock_t *lock);
132#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
133 extern int _raw_spin_trylock(raw_spinlock_t *lock); 133 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
134 extern void _raw_spin_unlock(raw_spinlock_t *lock); 134 extern void do_raw_spin_unlock(raw_spinlock_t *lock);
135#else 135#else
136static inline void _raw_spin_lock(raw_spinlock_t *lock) 136static inline void do_raw_spin_lock(raw_spinlock_t *lock)
137{ 137{
138 arch_spin_lock(&lock->raw_lock); 138 arch_spin_lock(&lock->raw_lock);
139} 139}
140 140
141static inline void 141static inline void
142_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) 142do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
143{ 143{
144 arch_spin_lock_flags(&lock->raw_lock, *flags); 144 arch_spin_lock_flags(&lock->raw_lock, *flags);
145} 145}
146 146
147static inline int _raw_spin_trylock(raw_spinlock_t *lock) 147static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
148{ 148{
149 return arch_spin_trylock(&(lock)->raw_lock); 149 return arch_spin_trylock(&(lock)->raw_lock);
150} 150}
151 151
152static inline void _raw_spin_unlock(raw_spinlock_t *lock) 152static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
153{ 153{
154 arch_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
155} 155}
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index eabe5068d138..1be1fc57fc4b 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -85,7 +85,7 @@ _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
85static inline int __spin_trylock(raw_spinlock_t *lock) 85static inline int __spin_trylock(raw_spinlock_t *lock)
86{ 86{
87 preempt_disable(); 87 preempt_disable();
88 if (_raw_spin_trylock(lock)) { 88 if (do_raw_spin_trylock(lock)) {
89 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 89 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
90 return 1; 90 return 1;
91 } 91 }
@@ -109,13 +109,13 @@ static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
109 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 109 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
110 /* 110 /*
111 * On lockdep we dont want the hand-coded irq-enable of 111 * On lockdep we dont want the hand-coded irq-enable of
112 * _raw_spin_lock_flags() code, because lockdep assumes 112 * do_raw_spin_lock_flags() code, because lockdep assumes
113 * that interrupts are not re-enabled during lock-acquire: 113 * that interrupts are not re-enabled during lock-acquire:
114 */ 114 */
115#ifdef CONFIG_LOCKDEP 115#ifdef CONFIG_LOCKDEP
116 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 116 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
117#else 117#else
118 _raw_spin_lock_flags(lock, &flags); 118 do_raw_spin_lock_flags(lock, &flags);
119#endif 119#endif
120 return flags; 120 return flags;
121} 121}
@@ -125,7 +125,7 @@ static inline void __spin_lock_irq(raw_spinlock_t *lock)
125 local_irq_disable(); 125 local_irq_disable();
126 preempt_disable(); 126 preempt_disable();
127 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 127 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
128 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 128 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
129} 129}
130 130
131static inline void __spin_lock_bh(raw_spinlock_t *lock) 131static inline void __spin_lock_bh(raw_spinlock_t *lock)
@@ -133,14 +133,14 @@ static inline void __spin_lock_bh(raw_spinlock_t *lock)
133 local_bh_disable(); 133 local_bh_disable();
134 preempt_disable(); 134 preempt_disable();
135 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 135 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
136 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 136 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
137} 137}
138 138
139static inline void __spin_lock(raw_spinlock_t *lock) 139static inline void __spin_lock(raw_spinlock_t *lock)
140{ 140{
141 preempt_disable(); 141 preempt_disable();
142 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 142 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
143 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 143 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
144} 144}
145 145
146#endif /* CONFIG_PREEMPT */ 146#endif /* CONFIG_PREEMPT */
@@ -148,7 +148,7 @@ static inline void __spin_lock(raw_spinlock_t *lock)
148static inline void __spin_unlock(raw_spinlock_t *lock) 148static inline void __spin_unlock(raw_spinlock_t *lock)
149{ 149{
150 spin_release(&lock->dep_map, 1, _RET_IP_); 150 spin_release(&lock->dep_map, 1, _RET_IP_);
151 _raw_spin_unlock(lock); 151 do_raw_spin_unlock(lock);
152 preempt_enable(); 152 preempt_enable();
153} 153}
154 154
@@ -156,7 +156,7 @@ static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
156 unsigned long flags) 156 unsigned long flags)
157{ 157{
158 spin_release(&lock->dep_map, 1, _RET_IP_); 158 spin_release(&lock->dep_map, 1, _RET_IP_);
159 _raw_spin_unlock(lock); 159 do_raw_spin_unlock(lock);
160 local_irq_restore(flags); 160 local_irq_restore(flags);
161 preempt_enable(); 161 preempt_enable();
162} 162}
@@ -164,7 +164,7 @@ static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
164static inline void __spin_unlock_irq(raw_spinlock_t *lock) 164static inline void __spin_unlock_irq(raw_spinlock_t *lock)
165{ 165{
166 spin_release(&lock->dep_map, 1, _RET_IP_); 166 spin_release(&lock->dep_map, 1, _RET_IP_);
167 _raw_spin_unlock(lock); 167 do_raw_spin_unlock(lock);
168 local_irq_enable(); 168 local_irq_enable();
169 preempt_enable(); 169 preempt_enable();
170} 170}
@@ -172,7 +172,7 @@ static inline void __spin_unlock_irq(raw_spinlock_t *lock)
172static inline void __spin_unlock_bh(raw_spinlock_t *lock) 172static inline void __spin_unlock_bh(raw_spinlock_t *lock)
173{ 173{
174 spin_release(&lock->dep_map, 1, _RET_IP_); 174 spin_release(&lock->dep_map, 1, _RET_IP_);
175 _raw_spin_unlock(lock); 175 do_raw_spin_unlock(lock);
176 preempt_enable_no_resched(); 176 preempt_enable_no_resched();
177 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 177 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
178} 178}
@@ -181,7 +181,7 @@ static inline int __spin_trylock_bh(raw_spinlock_t *lock)
181{ 181{
182 local_bh_disable(); 182 local_bh_disable();
183 preempt_disable(); 183 preempt_disable();
184 if (_raw_spin_trylock(lock)) { 184 if (do_raw_spin_trylock(lock)) {
185 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 185 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
186 return 1; 186 return 1;
187 } 187 }
diff --git a/kernel/sched.c b/kernel/sched.c
index e6acf2d7b753..91c65dd91435 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6684,7 +6684,7 @@ SYSCALL_DEFINE0(sched_yield)
6684 */ 6684 */
6685 __release(rq->lock); 6685 __release(rq->lock);
6686 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 6686 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
6687 _raw_spin_unlock(&rq->lock); 6687 do_raw_spin_unlock(&rq->lock);
6688 preempt_enable_no_resched(); 6688 preempt_enable_no_resched();
6689 6689
6690 schedule(); 6690 schedule();
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 54eb7dd3c608..795240b81224 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -48,7 +48,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \
48{ \ 48{ \
49 for (;;) { \ 49 for (;;) { \
50 preempt_disable(); \ 50 preempt_disable(); \
51 if (likely(_raw_##op##_trylock(lock))) \ 51 if (likely(do_raw_##op##_trylock(lock))) \
52 break; \ 52 break; \
53 preempt_enable(); \ 53 preempt_enable(); \
54 \ 54 \
@@ -67,7 +67,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
67 for (;;) { \ 67 for (;;) { \
68 preempt_disable(); \ 68 preempt_disable(); \
69 local_irq_save(flags); \ 69 local_irq_save(flags); \
70 if (likely(_raw_##op##_trylock(lock))) \ 70 if (likely(do_raw_##op##_trylock(lock))) \
71 break; \ 71 break; \
72 local_irq_restore(flags); \ 72 local_irq_restore(flags); \
73 preempt_enable(); \ 73 preempt_enable(); \
@@ -345,7 +345,7 @@ void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
345{ 345{
346 preempt_disable(); 346 preempt_disable();
347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
348 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 348 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
349} 349}
350EXPORT_SYMBOL(_spin_lock_nested); 350EXPORT_SYMBOL(_spin_lock_nested);
351 351
@@ -357,8 +357,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock,
357 local_irq_save(flags); 357 local_irq_save(flags);
358 preempt_disable(); 358 preempt_disable();
359 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 359 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
360 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, 360 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
361 _raw_spin_lock_flags, &flags); 361 do_raw_spin_lock_flags, &flags);
362 return flags; 362 return flags;
363} 363}
364EXPORT_SYMBOL(_spin_lock_irqsave_nested); 364EXPORT_SYMBOL(_spin_lock_irqsave_nested);
@@ -368,7 +368,7 @@ void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock,
368{ 368{
369 preempt_disable(); 369 preempt_disable();
370 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); 370 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
371 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 371 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
372} 372}
373EXPORT_SYMBOL(_spin_lock_nest_lock); 373EXPORT_SYMBOL(_spin_lock_nest_lock);
374 374
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5526b46aba94..fdd23cdb53f3 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
36 * If it successfully gets the lock, it should increment 36 * If it successfully gets the lock, it should increment
37 * the preemption count like any spinlock does. 37 * the preemption count like any spinlock does.
38 * 38 *
39 * (This works on UP too - _raw_spin_trylock will never 39 * (This works on UP too - do_raw_spin_trylock will never
40 * return false in that case) 40 * return false in that case)
41 */ 41 */
42int __lockfunc __reacquire_kernel_lock(void) 42int __lockfunc __reacquire_kernel_lock(void)
43{ 43{
44 while (!_raw_spin_trylock(&kernel_flag)) { 44 while (!do_raw_spin_trylock(&kernel_flag)) {
45 if (need_resched()) 45 if (need_resched())
46 return -EAGAIN; 46 return -EAGAIN;
47 cpu_relax(); 47 cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
52 52
53void __lockfunc __release_kernel_lock(void) 53void __lockfunc __release_kernel_lock(void)
54{ 54{
55 _raw_spin_unlock(&kernel_flag); 55 do_raw_spin_unlock(&kernel_flag);
56 preempt_enable_no_resched(); 56 preempt_enable_no_resched();
57} 57}
58 58
59/* 59/*
60 * These are the BKL spinlocks - we try to be polite about preemption. 60 * These are the BKL spinlocks - we try to be polite about preemption.
61 * If SMP is not on (ie UP preemption), this all goes away because the 61 * If SMP is not on (ie UP preemption), this all goes away because the
62 * _raw_spin_trylock() will always succeed. 62 * do_raw_spin_trylock() will always succeed.
63 */ 63 */
64#ifdef CONFIG_PREEMPT 64#ifdef CONFIG_PREEMPT
65static inline void __lock_kernel(void) 65static inline void __lock_kernel(void)
66{ 66{
67 preempt_disable(); 67 preempt_disable();
68 if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
69 /* 69 /*
70 * If preemption was disabled even before this 70 * If preemption was disabled even before this
71 * was called, there's nothing we can be polite 71 * was called, there's nothing we can be polite
72 * about - just spin. 72 * about - just spin.
73 */ 73 */
74 if (preempt_count() > 1) { 74 if (preempt_count() > 1) {
75 _raw_spin_lock(&kernel_flag); 75 do_raw_spin_lock(&kernel_flag);
76 return; 76 return;
77 } 77 }
78 78
@@ -85,7 +85,7 @@ static inline void __lock_kernel(void)
85 while (spin_is_locked(&kernel_flag)) 85 while (spin_is_locked(&kernel_flag))
86 cpu_relax(); 86 cpu_relax();
87 preempt_disable(); 87 preempt_disable();
88 } while (!_raw_spin_trylock(&kernel_flag)); 88 } while (!do_raw_spin_trylock(&kernel_flag));
89 } 89 }
90} 90}
91 91
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
96 */ 96 */
97static inline void __lock_kernel(void) 97static inline void __lock_kernel(void)
98{ 98{
99 _raw_spin_lock(&kernel_flag); 99 do_raw_spin_lock(&kernel_flag);
100} 100}
101#endif 101#endif
102 102
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
106 * the BKL is not covered by lockdep, so we open-code the 106 * the BKL is not covered by lockdep, so we open-code the
107 * unlocking sequence (and thus avoid the dep-chain ops): 107 * unlocking sequence (and thus avoid the dep-chain ops):
108 */ 108 */
109 _raw_spin_unlock(&kernel_flag); 109 do_raw_spin_unlock(&kernel_flag);
110 preempt_enable(); 110 preempt_enable();
111} 111}
112 112
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index e705848cc33c..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -125,7 +125,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
125 } 125 }
126} 126}
127 127
128void _raw_spin_lock(raw_spinlock_t *lock) 128void do_raw_spin_lock(raw_spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!arch_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
@@ -133,7 +133,7 @@ void _raw_spin_lock(raw_spinlock_t *lock)
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(raw_spinlock_t *lock) 136int do_raw_spin_trylock(raw_spinlock_t *lock)
137{ 137{
138 int ret = arch_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
@@ -148,7 +148,7 @@ int _raw_spin_trylock(raw_spinlock_t *lock)
148 return ret; 148 return ret;
149} 149}
150 150
151void _raw_spin_unlock(raw_spinlock_t *lock) 151void do_raw_spin_unlock(raw_spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 arch_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
@@ -193,13 +193,13 @@ static void __read_lock_debug(rwlock_t *lock)
193} 193}
194#endif 194#endif
195 195
196void _raw_read_lock(rwlock_t *lock) 196void do_raw_read_lock(rwlock_t *lock)
197{ 197{
198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
199 arch_read_lock(&lock->raw_lock); 199 arch_read_lock(&lock->raw_lock);
200} 200}
201 201
202int _raw_read_trylock(rwlock_t *lock) 202int do_raw_read_trylock(rwlock_t *lock)
203{ 203{
204 int ret = arch_read_trylock(&lock->raw_lock); 204 int ret = arch_read_trylock(&lock->raw_lock);
205 205
@@ -212,7 +212,7 @@ int _raw_read_trylock(rwlock_t *lock)
212 return ret; 212 return ret;
213} 213}
214 214
215void _raw_read_unlock(rwlock_t *lock) 215void do_raw_read_unlock(rwlock_t *lock)
216{ 216{
217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218 arch_read_unlock(&lock->raw_lock); 218 arch_read_unlock(&lock->raw_lock);
@@ -268,14 +268,14 @@ static void __write_lock_debug(rwlock_t *lock)
268} 268}
269#endif 269#endif
270 270
271void _raw_write_lock(rwlock_t *lock) 271void do_raw_write_lock(rwlock_t *lock)
272{ 272{
273 debug_write_lock_before(lock); 273 debug_write_lock_before(lock);
274 arch_write_lock(&lock->raw_lock); 274 arch_write_lock(&lock->raw_lock);
275 debug_write_lock_after(lock); 275 debug_write_lock_after(lock);
276} 276}
277 277
278int _raw_write_trylock(rwlock_t *lock) 278int do_raw_write_trylock(rwlock_t *lock)
279{ 279{
280 int ret = arch_write_trylock(&lock->raw_lock); 280 int ret = arch_write_trylock(&lock->raw_lock);
281 281
@@ -290,7 +290,7 @@ int _raw_write_trylock(rwlock_t *lock)
290 return ret; 290 return ret;
291} 291}
292 292
293void _raw_write_unlock(rwlock_t *lock) 293void do_raw_write_unlock(rwlock_t *lock)
294{ 294{
295 debug_write_unlock(lock); 295 debug_write_unlock(lock);
296 arch_write_unlock(&lock->raw_lock); 296 arch_write_unlock(&lock->raw_lock);