aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-03 14:01:19 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:32 -0500
commitfb3a6bbc912b12347614e5742c7c61416cdb0ca0 (patch)
treef9dbf8dab23cea6f033a58672ba16abf2ae09ebd /arch/powerpc
parent0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (diff)
locking: Convert raw_rwlock to arch_rwlock
Not strictly necessary for -rt as -rt does not have non sleeping rwlocks, but it's odd to not have a consistent naming convention. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/spinlock.h18
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h4
-rw-r--r--arch/powerpc/lib/locks.c2
3 files changed, 12 insertions, 12 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index cdcaf6b97087..2fad2c07c593 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -97,7 +97,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
97/* We only yield to the hypervisor if we are in shared processor mode */ 97/* We only yield to the hypervisor if we are in shared processor mode */
98#define SHARED_PROCESSOR (get_lppaca()->shared_proc) 98#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
99extern void __spin_yield(arch_spinlock_t *lock); 99extern void __spin_yield(arch_spinlock_t *lock);
100extern void __rw_yield(raw_rwlock_t *lock); 100extern void __rw_yield(arch_rwlock_t *lock);
101#else /* SPLPAR || ISERIES */ 101#else /* SPLPAR || ISERIES */
102#define __spin_yield(x) barrier() 102#define __spin_yield(x) barrier()
103#define __rw_yield(x) barrier() 103#define __rw_yield(x) barrier()
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
181 * This returns the old value in the lock + 1, 181 * This returns the old value in the lock + 1,
182 * so we got a read lock if the return value is > 0. 182 * so we got a read lock if the return value is > 0.
183 */ 183 */
184static inline long arch_read_trylock(raw_rwlock_t *rw) 184static inline long arch_read_trylock(arch_rwlock_t *rw)
185{ 185{
186 long tmp; 186 long tmp;
187 187
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw)
205 * This returns the old value in the lock, 205 * This returns the old value in the lock,
206 * so we got the write lock if the return value is 0. 206 * so we got the write lock if the return value is 0.
207 */ 207 */
208static inline long arch_write_trylock(raw_rwlock_t *rw) 208static inline long arch_write_trylock(arch_rwlock_t *rw)
209{ 209{
210 long tmp, token; 210 long tmp, token;
211 211
@@ -225,7 +225,7 @@ static inline long arch_write_trylock(raw_rwlock_t *rw)
225 return tmp; 225 return tmp;
226} 226}
227 227
228static inline void __raw_read_lock(raw_rwlock_t *rw) 228static inline void __raw_read_lock(arch_rwlock_t *rw)
229{ 229{
230 while (1) { 230 while (1) {
231 if (likely(arch_read_trylock(rw) > 0)) 231 if (likely(arch_read_trylock(rw) > 0))
@@ -239,7 +239,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
239 } 239 }
240} 240}
241 241
242static inline void __raw_write_lock(raw_rwlock_t *rw) 242static inline void __raw_write_lock(arch_rwlock_t *rw)
243{ 243{
244 while (1) { 244 while (1) {
245 if (likely(arch_write_trylock(rw) == 0)) 245 if (likely(arch_write_trylock(rw) == 0))
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
253 } 253 }
254} 254}
255 255
256static inline int __raw_read_trylock(raw_rwlock_t *rw) 256static inline int __raw_read_trylock(arch_rwlock_t *rw)
257{ 257{
258 return arch_read_trylock(rw) > 0; 258 return arch_read_trylock(rw) > 0;
259} 259}
260 260
261static inline int __raw_write_trylock(raw_rwlock_t *rw) 261static inline int __raw_write_trylock(arch_rwlock_t *rw)
262{ 262{
263 return arch_write_trylock(rw) == 0; 263 return arch_write_trylock(rw) == 0;
264} 264}
265 265
266static inline void __raw_read_unlock(raw_rwlock_t *rw) 266static inline void __raw_read_unlock(arch_rwlock_t *rw)
267{ 267{
268 long tmp; 268 long tmp;
269 269
@@ -280,7 +280,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
280 : "cr0", "xer", "memory"); 280 : "cr0", "xer", "memory");
281} 281}
282 282
283static inline void __raw_write_unlock(raw_rwlock_t *rw) 283static inline void __raw_write_unlock(arch_rwlock_t *rw)
284{ 284{
285 __asm__ __volatile__("# write_unlock\n\t" 285 __asm__ __volatile__("# write_unlock\n\t"
286 LWSYNC_ON_SMP: : :"memory"); 286 LWSYNC_ON_SMP: : :"memory");
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index f5f39d82711f..2351adc4fdc4 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
13 13
14typedef struct { 14typedef struct {
15 volatile signed int lock; 15 volatile signed int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index ee395e392115..58e14fba11b1 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -55,7 +55,7 @@ void __spin_yield(arch_spinlock_t *lock)
55 * This turns out to be the same for read and write locks, since 55 * This turns out to be the same for read and write locks, since
56 * we only know the holder if it is write-locked. 56 * we only know the holder if it is write-locked.
57 */ 57 */
58void __rw_yield(raw_rwlock_t *rw) 58void __rw_yield(arch_rwlock_t *rw)
59{ 59{
60 int lock_value; 60 int lock_value;
61 unsigned int holder_cpu, yield_count; 61 unsigned int holder_cpu, yield_count;