aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-02 14:01:25 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:32 -0500
commit0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (patch)
treee371d17bd73d64332349debbf45962ec67e7269d /arch/powerpc/include
parentedc35bd72e2079b25f99c5da7d7a65dbbffc4a26 (diff)
locking: Convert __raw_spin* functions to arch_spin*
Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/spinlock.h32
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index c0d44c92ff0e..cdcaf6b97087 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
28#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
29#include <asm/synch.h> 29#include <asm/synch.h>
30 30
31#define __raw_spin_is_locked(x) ((x)->slock != 0) 31#define arch_spin_is_locked(x) ((x)->slock != 0)
32 32
33#ifdef CONFIG_PPC64 33#ifdef CONFIG_PPC64
34/* use 0x800000yy when locked, where yy == CPU number */ 34/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) 57static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
73 return tmp; 73 return tmp;
74} 74}
75 75
76static inline int __raw_spin_trylock(arch_spinlock_t *lock) 76static inline int arch_spin_trylock(arch_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return arch_spin_trylock(lock) == 0; 79 return __arch_spin_trylock(lock) == 0;
80} 80}
81 81
82/* 82/*
@@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *lock);
104#define SHARED_PROCESSOR 0 104#define SHARED_PROCESSOR 0
105#endif 105#endif
106 106
107static inline void __raw_spin_lock(arch_spinlock_t *lock) 107static inline void arch_spin_lock(arch_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
111 if (likely(arch_spin_trylock(lock) == 0)) 111 if (likely(__arch_spin_trylock(lock) == 0))
112 break; 112 break;
113 do { 113 do {
114 HMT_low(); 114 HMT_low();
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
120} 120}
121 121
122static inline 122static inline
123void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 123void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
124{ 124{
125 unsigned long flags_dis; 125 unsigned long flags_dis;
126 126
127 CLEAR_IO_SYNC; 127 CLEAR_IO_SYNC;
128 while (1) { 128 while (1) {
129 if (likely(arch_spin_trylock(lock) == 0)) 129 if (likely(__arch_spin_trylock(lock) == 0))
130 break; 130 break;
131 local_save_flags(flags_dis); 131 local_save_flags(flags_dis);
132 local_irq_restore(flags); 132 local_irq_restore(flags);
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
140 } 140 }
141} 141}
142 142
143static inline void __raw_spin_unlock(arch_spinlock_t *lock) 143static inline void arch_spin_unlock(arch_spinlock_t *lock)
144{ 144{
145 SYNC_IO; 145 SYNC_IO;
146 __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 __asm__ __volatile__("# arch_spin_unlock\n\t"
147 LWSYNC_ON_SMP: : :"memory"); 147 LWSYNC_ON_SMP: : :"memory");
148 lock->slock = 0; 148 lock->slock = 0;
149} 149}
150 150
151#ifdef CONFIG_PPC64 151#ifdef CONFIG_PPC64
152extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); 152extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
153#else 153#else
154#define __raw_spin_unlock_wait(lock) \ 154#define arch_spin_unlock_wait(lock) \
155 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 155 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
156#endif 156#endif
157 157
158/* 158/*
@@ -290,9 +290,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
292 292
293#define _raw_spin_relax(lock) __spin_yield(lock) 293#define arch_spin_relax(lock) __spin_yield(lock)
294#define _raw_read_relax(lock) __rw_yield(lock) 294#define arch_read_relax(lock) __rw_yield(lock)
295#define _raw_write_relax(lock) __rw_yield(lock) 295#define arch_write_relax(lock) __rw_yield(lock)
296 296
297#endif /* __KERNEL__ */ 297#endif /* __KERNEL__ */
298#endif /* __ASM_SPINLOCK_H */ 298#endif /* __ASM_SPINLOCK_H */