aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel_lock.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-03 14:55:53 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:33 -0500
commit9828ea9d75c38fe3dce05d00566eed61c85732e6 (patch)
tree6cee5c8ffb07cdf45cc12d58f74a3053ffefcb5f /lib/kernel_lock.c
parent5f6384c5fb6bfc9aac506e058974d3ba293951b3 (diff)
locking: Further name space cleanups
The name space hierarchy for the internal lock functions is now a bit backwards. raw_spin* functions map to _spin* which use __spin*, while we would like to have _raw_spin* and __raw_spin*. _raw_spin* is already used by lock debugging, so rename those funtions to do_raw_spin* to free up the _raw_spin* name space. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r--lib/kernel_lock.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5526b46aba94..fdd23cdb53f3 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
36 * If it successfully gets the lock, it should increment 36 * If it successfully gets the lock, it should increment
37 * the preemption count like any spinlock does. 37 * the preemption count like any spinlock does.
38 * 38 *
39 * (This works on UP too - _raw_spin_trylock will never 39 * (This works on UP too - do_raw_spin_trylock will never
40 * return false in that case) 40 * return false in that case)
41 */ 41 */
42int __lockfunc __reacquire_kernel_lock(void) 42int __lockfunc __reacquire_kernel_lock(void)
43{ 43{
44 while (!_raw_spin_trylock(&kernel_flag)) { 44 while (!do_raw_spin_trylock(&kernel_flag)) {
45 if (need_resched()) 45 if (need_resched())
46 return -EAGAIN; 46 return -EAGAIN;
47 cpu_relax(); 47 cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
52 52
53void __lockfunc __release_kernel_lock(void) 53void __lockfunc __release_kernel_lock(void)
54{ 54{
55 _raw_spin_unlock(&kernel_flag); 55 do_raw_spin_unlock(&kernel_flag);
56 preempt_enable_no_resched(); 56 preempt_enable_no_resched();
57} 57}
58 58
59/* 59/*
60 * These are the BKL spinlocks - we try to be polite about preemption. 60 * These are the BKL spinlocks - we try to be polite about preemption.
61 * If SMP is not on (ie UP preemption), this all goes away because the 61 * If SMP is not on (ie UP preemption), this all goes away because the
62 * _raw_spin_trylock() will always succeed. 62 * do_raw_spin_trylock() will always succeed.
63 */ 63 */
64#ifdef CONFIG_PREEMPT 64#ifdef CONFIG_PREEMPT
65static inline void __lock_kernel(void) 65static inline void __lock_kernel(void)
66{ 66{
67 preempt_disable(); 67 preempt_disable();
68 if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
69 /* 69 /*
70 * If preemption was disabled even before this 70 * If preemption was disabled even before this
71 * was called, there's nothing we can be polite 71 * was called, there's nothing we can be polite
72 * about - just spin. 72 * about - just spin.
73 */ 73 */
74 if (preempt_count() > 1) { 74 if (preempt_count() > 1) {
75 _raw_spin_lock(&kernel_flag); 75 do_raw_spin_lock(&kernel_flag);
76 return; 76 return;
77 } 77 }
78 78
@@ -85,7 +85,7 @@ static inline void __lock_kernel(void)
85 while (spin_is_locked(&kernel_flag)) 85 while (spin_is_locked(&kernel_flag))
86 cpu_relax(); 86 cpu_relax();
87 preempt_disable(); 87 preempt_disable();
88 } while (!_raw_spin_trylock(&kernel_flag)); 88 } while (!do_raw_spin_trylock(&kernel_flag));
89 } 89 }
90} 90}
91 91
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
96 */ 96 */
97static inline void __lock_kernel(void) 97static inline void __lock_kernel(void)
98{ 98{
99 _raw_spin_lock(&kernel_flag); 99 do_raw_spin_lock(&kernel_flag);
100} 100}
101#endif 101#endif
102 102
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
106 * the BKL is not covered by lockdep, so we open-code the 106 * the BKL is not covered by lockdep, so we open-code the
107 * unlocking sequence (and thus avoid the dep-chain ops): 107 * unlocking sequence (and thus avoid the dep-chain ops):
108 */ 108 */
109 _raw_spin_unlock(&kernel_flag); 109 do_raw_spin_unlock(&kernel_flag);
110 preempt_enable(); 110 preempt_enable();
111} 111}
112 112