aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-02 14:01:25 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:32 -0500
commit0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (patch)
treee371d17bd73d64332349debbf45962ec67e7269d /arch/powerpc
parentedc35bd72e2079b25f99c5da7d7a65dbbffc4a26 (diff)
locking: Convert __raw_spin* functions to arch_spin*
Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/spinlock.h32
-rw-r--r--arch/powerpc/kernel/rtas.c12
-rw-r--r--arch/powerpc/lib/locks.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c8
4 files changed, 28 insertions, 28 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index c0d44c92ff0e..cdcaf6b97087 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
28#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
29#include <asm/synch.h> 29#include <asm/synch.h>
30 30
31#define __raw_spin_is_locked(x) ((x)->slock != 0) 31#define arch_spin_is_locked(x) ((x)->slock != 0)
32 32
33#ifdef CONFIG_PPC64 33#ifdef CONFIG_PPC64
34/* use 0x800000yy when locked, where yy == CPU number */ 34/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) 57static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
73 return tmp; 73 return tmp;
74} 74}
75 75
76static inline int __raw_spin_trylock(arch_spinlock_t *lock) 76static inline int arch_spin_trylock(arch_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return arch_spin_trylock(lock) == 0; 79 return __arch_spin_trylock(lock) == 0;
80} 80}
81 81
82/* 82/*
@@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *lock);
104#define SHARED_PROCESSOR 0 104#define SHARED_PROCESSOR 0
105#endif 105#endif
106 106
107static inline void __raw_spin_lock(arch_spinlock_t *lock) 107static inline void arch_spin_lock(arch_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
111 if (likely(arch_spin_trylock(lock) == 0)) 111 if (likely(__arch_spin_trylock(lock) == 0))
112 break; 112 break;
113 do { 113 do {
114 HMT_low(); 114 HMT_low();
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
120} 120}
121 121
122static inline 122static inline
123void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 123void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
124{ 124{
125 unsigned long flags_dis; 125 unsigned long flags_dis;
126 126
127 CLEAR_IO_SYNC; 127 CLEAR_IO_SYNC;
128 while (1) { 128 while (1) {
129 if (likely(arch_spin_trylock(lock) == 0)) 129 if (likely(__arch_spin_trylock(lock) == 0))
130 break; 130 break;
131 local_save_flags(flags_dis); 131 local_save_flags(flags_dis);
132 local_irq_restore(flags); 132 local_irq_restore(flags);
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
140 } 140 }
141} 141}
142 142
143static inline void __raw_spin_unlock(arch_spinlock_t *lock) 143static inline void arch_spin_unlock(arch_spinlock_t *lock)
144{ 144{
145 SYNC_IO; 145 SYNC_IO;
146 __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 __asm__ __volatile__("# arch_spin_unlock\n\t"
147 LWSYNC_ON_SMP: : :"memory"); 147 LWSYNC_ON_SMP: : :"memory");
148 lock->slock = 0; 148 lock->slock = 0;
149} 149}
150 150
151#ifdef CONFIG_PPC64 151#ifdef CONFIG_PPC64
152extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); 152extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
153#else 153#else
154#define __raw_spin_unlock_wait(lock) \ 154#define arch_spin_unlock_wait(lock) \
155 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 155 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
156#endif 156#endif
157 157
158/* 158/*
@@ -290,9 +290,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
292 292
293#define _raw_spin_relax(lock) __spin_yield(lock) 293#define arch_spin_relax(lock) __spin_yield(lock)
294#define _raw_read_relax(lock) __rw_yield(lock) 294#define arch_read_relax(lock) __rw_yield(lock)
295#define _raw_write_relax(lock) __rw_yield(lock) 295#define arch_write_relax(lock) __rw_yield(lock)
296 296
297#endif /* __KERNEL__ */ 297#endif /* __KERNEL__ */
298#endif /* __ASM_SPINLOCK_H */ 298#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 57dfa414cfb8..fd0d29493fd6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void)
80 80
81 local_irq_save(flags); 81 local_irq_save(flags);
82 preempt_disable(); 82 preempt_disable();
83 __raw_spin_lock_flags(&rtas.lock, flags); 83 arch_spin_lock_flags(&rtas.lock, flags);
84 return flags; 84 return flags;
85} 85}
86 86
87static void unlock_rtas(unsigned long flags) 87static void unlock_rtas(unsigned long flags)
88{ 88{
89 __raw_spin_unlock(&rtas.lock); 89 arch_spin_unlock(&rtas.lock);
90 local_irq_restore(flags); 90 local_irq_restore(flags);
91 preempt_enable(); 91 preempt_enable();
92} 92}
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void)
987 987
988 local_irq_save(flags); 988 local_irq_save(flags);
989 hard_irq_disable(); 989 hard_irq_disable();
990 __raw_spin_lock(&timebase_lock); 990 arch_spin_lock(&timebase_lock);
991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); 991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
992 timebase = get_tb(); 992 timebase = get_tb();
993 __raw_spin_unlock(&timebase_lock); 993 arch_spin_unlock(&timebase_lock);
994 994
995 while (timebase) 995 while (timebase)
996 barrier(); 996 barrier();
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void)
1002{ 1002{
1003 while (!timebase) 1003 while (!timebase)
1004 barrier(); 1004 barrier();
1005 __raw_spin_lock(&timebase_lock); 1005 arch_spin_lock(&timebase_lock);
1006 set_tb(timebase >> 32, timebase & 0xffffffff); 1006 set_tb(timebase >> 32, timebase & 0xffffffff);
1007 timebase = 0; 1007 timebase = 0;
1008 __raw_spin_unlock(&timebase_lock); 1008 arch_spin_unlock(&timebase_lock);
1009} 1009}
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index b06294cde499..ee395e392115 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
82} 82}
83#endif 83#endif
84 84
85void __raw_spin_unlock_wait(arch_spinlock_t *lock) 85void arch_spin_unlock_wait(arch_spinlock_t *lock)
86{ 86{
87 while (lock->slock) { 87 while (lock->slock) {
88 HMT_low(); 88 HMT_low();
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(arch_spinlock_t *lock)
92 HMT_medium(); 92 HMT_medium();
93} 93}
94 94
95EXPORT_SYMBOL(__raw_spin_unlock_wait); 95EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index be36fece41d7..242f8095c2df 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void)
80 80
81 local_irq_save(flags); 81 local_irq_save(flags);
82 hard_irq_disable(); 82 hard_irq_disable();
83 __raw_spin_lock(&timebase_lock); 83 arch_spin_lock(&timebase_lock);
84 mtspr(SPRN_TBCTL, TBCTL_FREEZE); 84 mtspr(SPRN_TBCTL, TBCTL_FREEZE);
85 isync(); 85 isync();
86 timebase = get_tb(); 86 timebase = get_tb();
87 __raw_spin_unlock(&timebase_lock); 87 arch_spin_unlock(&timebase_lock);
88 88
89 while (timebase) 89 while (timebase)
90 barrier(); 90 barrier();
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void)
97 while (!timebase) 97 while (!timebase)
98 smp_rmb(); 98 smp_rmb();
99 99
100 __raw_spin_lock(&timebase_lock); 100 arch_spin_lock(&timebase_lock);
101 set_tb(timebase >> 32, timebase & 0xffffffff); 101 set_tb(timebase >> 32, timebase & 0xffffffff);
102 timebase = 0; 102 timebase = 0;
103 __raw_spin_unlock(&timebase_lock); 103 arch_spin_unlock(&timebase_lock);
104} 104}
105 105
106struct smp_ops_t pas_smp_ops = { 106struct smp_ops_t pas_smp_ops = {