aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/kernel_lock.c18
-rw-r--r--lib/spinlock_debug.c18
2 files changed, 18 insertions, 18 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5526b46aba94..fdd23cdb53f3 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
36 * If it successfully gets the lock, it should increment 36 * If it successfully gets the lock, it should increment
37 * the preemption count like any spinlock does. 37 * the preemption count like any spinlock does.
38 * 38 *
39 * (This works on UP too - _raw_spin_trylock will never 39 * (This works on UP too - do_raw_spin_trylock will never
40 * return false in that case) 40 * return false in that case)
41 */ 41 */
42int __lockfunc __reacquire_kernel_lock(void) 42int __lockfunc __reacquire_kernel_lock(void)
43{ 43{
44 while (!_raw_spin_trylock(&kernel_flag)) { 44 while (!do_raw_spin_trylock(&kernel_flag)) {
45 if (need_resched()) 45 if (need_resched())
46 return -EAGAIN; 46 return -EAGAIN;
47 cpu_relax(); 47 cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
52 52
53void __lockfunc __release_kernel_lock(void) 53void __lockfunc __release_kernel_lock(void)
54{ 54{
55 _raw_spin_unlock(&kernel_flag); 55 do_raw_spin_unlock(&kernel_flag);
56 preempt_enable_no_resched(); 56 preempt_enable_no_resched();
57} 57}
58 58
59/* 59/*
60 * These are the BKL spinlocks - we try to be polite about preemption. 60 * These are the BKL spinlocks - we try to be polite about preemption.
61 * If SMP is not on (ie UP preemption), this all goes away because the 61 * If SMP is not on (ie UP preemption), this all goes away because the
62 * _raw_spin_trylock() will always succeed. 62 * do_raw_spin_trylock() will always succeed.
63 */ 63 */
64#ifdef CONFIG_PREEMPT 64#ifdef CONFIG_PREEMPT
65static inline void __lock_kernel(void) 65static inline void __lock_kernel(void)
66{ 66{
67 preempt_disable(); 67 preempt_disable();
68 if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
69 /* 69 /*
70 * If preemption was disabled even before this 70 * If preemption was disabled even before this
71 * was called, there's nothing we can be polite 71 * was called, there's nothing we can be polite
72 * about - just spin. 72 * about - just spin.
73 */ 73 */
74 if (preempt_count() > 1) { 74 if (preempt_count() > 1) {
75 _raw_spin_lock(&kernel_flag); 75 do_raw_spin_lock(&kernel_flag);
76 return; 76 return;
77 } 77 }
78 78
@@ -85,7 +85,7 @@ static inline void __lock_kernel(void)
85 while (spin_is_locked(&kernel_flag)) 85 while (spin_is_locked(&kernel_flag))
86 cpu_relax(); 86 cpu_relax();
87 preempt_disable(); 87 preempt_disable();
88 } while (!_raw_spin_trylock(&kernel_flag)); 88 } while (!do_raw_spin_trylock(&kernel_flag));
89 } 89 }
90} 90}
91 91
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
96 */ 96 */
97static inline void __lock_kernel(void) 97static inline void __lock_kernel(void)
98{ 98{
99 _raw_spin_lock(&kernel_flag); 99 do_raw_spin_lock(&kernel_flag);
100} 100}
101#endif 101#endif
102 102
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
106 * the BKL is not covered by lockdep, so we open-code the 106 * the BKL is not covered by lockdep, so we open-code the
107 * unlocking sequence (and thus avoid the dep-chain ops): 107 * unlocking sequence (and thus avoid the dep-chain ops):
108 */ 108 */
109 _raw_spin_unlock(&kernel_flag); 109 do_raw_spin_unlock(&kernel_flag);
110 preempt_enable(); 110 preempt_enable();
111} 111}
112 112
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index e705848cc33c..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -125,7 +125,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
125 } 125 }
126} 126}
127 127
128void _raw_spin_lock(raw_spinlock_t *lock) 128void do_raw_spin_lock(raw_spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!arch_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
@@ -133,7 +133,7 @@ void _raw_spin_lock(raw_spinlock_t *lock)
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(raw_spinlock_t *lock) 136int do_raw_spin_trylock(raw_spinlock_t *lock)
137{ 137{
138 int ret = arch_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
@@ -148,7 +148,7 @@ int _raw_spin_trylock(raw_spinlock_t *lock)
148 return ret; 148 return ret;
149} 149}
150 150
151void _raw_spin_unlock(raw_spinlock_t *lock) 151void do_raw_spin_unlock(raw_spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 arch_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
@@ -193,13 +193,13 @@ static void __read_lock_debug(rwlock_t *lock)
193} 193}
194#endif 194#endif
195 195
196void _raw_read_lock(rwlock_t *lock) 196void do_raw_read_lock(rwlock_t *lock)
197{ 197{
198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
199 arch_read_lock(&lock->raw_lock); 199 arch_read_lock(&lock->raw_lock);
200} 200}
201 201
202int _raw_read_trylock(rwlock_t *lock) 202int do_raw_read_trylock(rwlock_t *lock)
203{ 203{
204 int ret = arch_read_trylock(&lock->raw_lock); 204 int ret = arch_read_trylock(&lock->raw_lock);
205 205
@@ -212,7 +212,7 @@ int _raw_read_trylock(rwlock_t *lock)
212 return ret; 212 return ret;
213} 213}
214 214
215void _raw_read_unlock(rwlock_t *lock) 215void do_raw_read_unlock(rwlock_t *lock)
216{ 216{
217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218 arch_read_unlock(&lock->raw_lock); 218 arch_read_unlock(&lock->raw_lock);
@@ -268,14 +268,14 @@ static void __write_lock_debug(rwlock_t *lock)
268} 268}
269#endif 269#endif
270 270
271void _raw_write_lock(rwlock_t *lock) 271void do_raw_write_lock(rwlock_t *lock)
272{ 272{
273 debug_write_lock_before(lock); 273 debug_write_lock_before(lock);
274 arch_write_lock(&lock->raw_lock); 274 arch_write_lock(&lock->raw_lock);
275 debug_write_lock_after(lock); 275 debug_write_lock_after(lock);
276} 276}
277 277
278int _raw_write_trylock(rwlock_t *lock) 278int do_raw_write_trylock(rwlock_t *lock)
279{ 279{
280 int ret = arch_write_trylock(&lock->raw_lock); 280 int ret = arch_write_trylock(&lock->raw_lock);
281 281
@@ -290,7 +290,7 @@ int _raw_write_trylock(rwlock_t *lock)
290 return ret; 290 return ret;
291} 291}
292 292
293void _raw_write_unlock(rwlock_t *lock) 293void do_raw_write_unlock(rwlock_t *lock)
294{ 294{
295 debug_write_unlock(lock); 295 debug_write_unlock(lock);
296 arch_write_unlock(&lock->raw_lock); 296 arch_write_unlock(&lock->raw_lock);