aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include/asm/spinlock.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-03 14:08:46 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:32 -0500
commite5931943d02bf751b1ec849c0d2ade23d76a8d41 (patch)
tree119fe3bc583d0d043d97cb9edd98bad52692a546 /arch/ia64/include/asm/spinlock.h
parentfb3a6bbc912b12347614e5742c7c61416cdb0ca0 (diff)
locking: Convert raw_rwlock functions to arch_rwlock
Name space cleanup for rwlock functions. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/ia64/include/asm/spinlock.h')
-rw-r--r--arch/ia64/include/asm/spinlock.h32
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 6715b6a8ebc3..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
140 __ticket_spin_unlock_wait(lock); 140 __ticket_spin_unlock_wait(lock);
141} 141}
142 142
143#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 143#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
144#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 144#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
145 145
146#ifdef ASM_SUPPORTED 146#ifdef ASM_SUPPORTED
147 147
148static __always_inline void 148static __always_inline void
149__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) 149arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
150{ 150{
151 __asm__ __volatile__ ( 151 __asm__ __volatile__ (
152 "tbit.nz p6, p0 = %1,%2\n" 152 "tbit.nz p6, p0 = %1,%2\n"
@@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
169 : "p6", "p7", "r2", "memory"); 169 : "p6", "p7", "r2", "memory");
170} 170}
171 171
172#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
173 173
174#else /* !ASM_SUPPORTED */ 174#else /* !ASM_SUPPORTED */
175 175
176#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
177 177
178#define __raw_read_lock(rw) \ 178#define arch_read_lock(rw) \
179do { \ 179do { \
180 arch_rwlock_t *__read_lock_ptr = (rw); \ 180 arch_rwlock_t *__read_lock_ptr = (rw); \
181 \ 181 \
@@ -188,7 +188,7 @@ do { \
188 188
189#endif /* !ASM_SUPPORTED */ 189#endif /* !ASM_SUPPORTED */
190 190
191#define __raw_read_unlock(rw) \ 191#define arch_read_unlock(rw) \
192do { \ 192do { \
193 arch_rwlock_t *__read_lock_ptr = (rw); \ 193 arch_rwlock_t *__read_lock_ptr = (rw); \
194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -197,7 +197,7 @@ do { \
197#ifdef ASM_SUPPORTED 197#ifdef ASM_SUPPORTED
198 198
199static __always_inline void 199static __always_inline void
200__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) 200arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
201{ 201{
202 __asm__ __volatile__ ( 202 __asm__ __volatile__ (
203 "tbit.nz p6, p0 = %1, %2\n" 203 "tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
222} 222}
223 223
224#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
225 225
226#define __raw_write_trylock(rw) \ 226#define arch_write_trylock(rw) \
227({ \ 227({ \
228 register long result; \ 228 register long result; \
229 \ 229 \
@@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
235 (result == 0); \ 235 (result == 0); \
236}) 236})
237 237
238static inline void __raw_write_unlock(arch_rwlock_t *x) 238static inline void arch_write_unlock(arch_rwlock_t *x)
239{ 239{
240 u8 *y = (u8 *)x; 240 u8 *y = (u8 *)x;
241 barrier(); 241 barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
244 244
245#else /* !ASM_SUPPORTED */ 245#else /* !ASM_SUPPORTED */
246 246
247#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247#define arch_write_lock_flags(l, flags) arch_write_lock(l)
248 248
249#define __raw_write_lock(l) \ 249#define arch_write_lock(l) \
250({ \ 250({ \
251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
257 } while (ia64_val); \ 257 } while (ia64_val); \
258}) 258})
259 259
260#define __raw_write_trylock(rw) \ 260#define arch_write_trylock(rw) \
261({ \ 261({ \
262 __u64 ia64_val; \ 262 __u64 ia64_val; \
263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
265 (ia64_val == 0); \ 265 (ia64_val == 0); \
266}) 266})
267 267
268static inline void __raw_write_unlock(arch_rwlock_t *x) 268static inline void arch_write_unlock(arch_rwlock_t *x)
269{ 269{
270 barrier(); 270 barrier();
271 x->write_lock = 0; 271 x->write_lock = 0;
@@ -273,7 +273,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x)
273 273
274#endif /* !ASM_SUPPORTED */ 274#endif /* !ASM_SUPPORTED */
275 275
276static inline int __raw_read_trylock(arch_rwlock_t *x) 276static inline int arch_read_trylock(arch_rwlock_t *x)
277{ 277{
278 union { 278 union {
279 arch_rwlock_t lock; 279 arch_rwlock_t lock;