diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-03 14:55:53 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:33 -0500 |
commit | 9828ea9d75c38fe3dce05d00566eed61c85732e6 (patch) | |
tree | 6cee5c8ffb07cdf45cc12d58f74a3053ffefcb5f /include/linux/spinlock_api_smp.h | |
parent | 5f6384c5fb6bfc9aac506e058974d3ba293951b3 (diff) |
locking: Further name space cleanups
The name space hierarchy for the internal lock functions is now a bit
backwards. raw_spin* functions map to _spin* which use __spin*, while
we would like to have _raw_spin* and __raw_spin*.
_raw_spin* is already used by lock debugging, so rename those funtions
to do_raw_spin* to free up the _raw_spin* name space.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/spinlock_api_smp.h')
-rw-r--r-- | include/linux/spinlock_api_smp.h | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index eabe5068d138..1be1fc57fc4b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -85,7 +85,7 @@ _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | |||
85 | static inline int __spin_trylock(raw_spinlock_t *lock) | 85 | static inline int __spin_trylock(raw_spinlock_t *lock) |
86 | { | 86 | { |
87 | preempt_disable(); | 87 | preempt_disable(); |
88 | if (_raw_spin_trylock(lock)) { | 88 | if (do_raw_spin_trylock(lock)) { |
89 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 89 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
90 | return 1; | 90 | return 1; |
91 | } | 91 | } |
@@ -109,13 +109,13 @@ static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) | |||
109 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 109 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
110 | /* | 110 | /* |
111 | * On lockdep we dont want the hand-coded irq-enable of | 111 | * On lockdep we dont want the hand-coded irq-enable of |
112 | * _raw_spin_lock_flags() code, because lockdep assumes | 112 | * do_raw_spin_lock_flags() code, because lockdep assumes |
113 | * that interrupts are not re-enabled during lock-acquire: | 113 | * that interrupts are not re-enabled during lock-acquire: |
114 | */ | 114 | */ |
115 | #ifdef CONFIG_LOCKDEP | 115 | #ifdef CONFIG_LOCKDEP |
116 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 116 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
117 | #else | 117 | #else |
118 | _raw_spin_lock_flags(lock, &flags); | 118 | do_raw_spin_lock_flags(lock, &flags); |
119 | #endif | 119 | #endif |
120 | return flags; | 120 | return flags; |
121 | } | 121 | } |
@@ -125,7 +125,7 @@ static inline void __spin_lock_irq(raw_spinlock_t *lock) | |||
125 | local_irq_disable(); | 125 | local_irq_disable(); |
126 | preempt_disable(); | 126 | preempt_disable(); |
127 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 127 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
128 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 128 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline void __spin_lock_bh(raw_spinlock_t *lock) | 131 | static inline void __spin_lock_bh(raw_spinlock_t *lock) |
@@ -133,14 +133,14 @@ static inline void __spin_lock_bh(raw_spinlock_t *lock) | |||
133 | local_bh_disable(); | 133 | local_bh_disable(); |
134 | preempt_disable(); | 134 | preempt_disable(); |
135 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 135 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
136 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 136 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline void __spin_lock(raw_spinlock_t *lock) | 139 | static inline void __spin_lock(raw_spinlock_t *lock) |
140 | { | 140 | { |
141 | preempt_disable(); | 141 | preempt_disable(); |
142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
143 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 143 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
144 | } | 144 | } |
145 | 145 | ||
146 | #endif /* CONFIG_PREEMPT */ | 146 | #endif /* CONFIG_PREEMPT */ |
@@ -148,7 +148,7 @@ static inline void __spin_lock(raw_spinlock_t *lock) | |||
148 | static inline void __spin_unlock(raw_spinlock_t *lock) | 148 | static inline void __spin_unlock(raw_spinlock_t *lock) |
149 | { | 149 | { |
150 | spin_release(&lock->dep_map, 1, _RET_IP_); | 150 | spin_release(&lock->dep_map, 1, _RET_IP_); |
151 | _raw_spin_unlock(lock); | 151 | do_raw_spin_unlock(lock); |
152 | preempt_enable(); | 152 | preempt_enable(); |
153 | } | 153 | } |
154 | 154 | ||
@@ -156,7 +156,7 @@ static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, | |||
156 | unsigned long flags) | 156 | unsigned long flags) |
157 | { | 157 | { |
158 | spin_release(&lock->dep_map, 1, _RET_IP_); | 158 | spin_release(&lock->dep_map, 1, _RET_IP_); |
159 | _raw_spin_unlock(lock); | 159 | do_raw_spin_unlock(lock); |
160 | local_irq_restore(flags); | 160 | local_irq_restore(flags); |
161 | preempt_enable(); | 161 | preempt_enable(); |
162 | } | 162 | } |
@@ -164,7 +164,7 @@ static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, | |||
164 | static inline void __spin_unlock_irq(raw_spinlock_t *lock) | 164 | static inline void __spin_unlock_irq(raw_spinlock_t *lock) |
165 | { | 165 | { |
166 | spin_release(&lock->dep_map, 1, _RET_IP_); | 166 | spin_release(&lock->dep_map, 1, _RET_IP_); |
167 | _raw_spin_unlock(lock); | 167 | do_raw_spin_unlock(lock); |
168 | local_irq_enable(); | 168 | local_irq_enable(); |
169 | preempt_enable(); | 169 | preempt_enable(); |
170 | } | 170 | } |
@@ -172,7 +172,7 @@ static inline void __spin_unlock_irq(raw_spinlock_t *lock) | |||
172 | static inline void __spin_unlock_bh(raw_spinlock_t *lock) | 172 | static inline void __spin_unlock_bh(raw_spinlock_t *lock) |
173 | { | 173 | { |
174 | spin_release(&lock->dep_map, 1, _RET_IP_); | 174 | spin_release(&lock->dep_map, 1, _RET_IP_); |
175 | _raw_spin_unlock(lock); | 175 | do_raw_spin_unlock(lock); |
176 | preempt_enable_no_resched(); | 176 | preempt_enable_no_resched(); |
177 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 177 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
178 | } | 178 | } |
@@ -181,7 +181,7 @@ static inline int __spin_trylock_bh(raw_spinlock_t *lock) | |||
181 | { | 181 | { |
182 | local_bh_disable(); | 182 | local_bh_disable(); |
183 | preempt_disable(); | 183 | preempt_disable(); |
184 | if (_raw_spin_trylock(lock)) { | 184 | if (do_raw_spin_trylock(lock)) { |
185 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 185 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
186 | return 1; | 186 | return 1; |
187 | } | 187 | } |