diff options
Diffstat (limited to 'arch/m32r/include/asm/spinlock.h')
-rw-r--r-- | arch/m32r/include/asm/spinlock.h | 48 |
1 files changed, 24 insertions, 24 deletions
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index dded923883b2..179a06489b10 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h | |||
@@ -24,19 +24,19 @@ | |||
24 | * We make no fairness assumptions. They have a cost. | 24 | * We make no fairness assumptions. They have a cost. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 27 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
28 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 28 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
29 | #define __raw_spin_unlock_wait(x) \ | 29 | #define arch_spin_unlock_wait(x) \ |
30 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 30 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * __raw_spin_trylock - Try spin lock and return a result | 33 | * arch_spin_trylock - Try spin lock and return a result |
34 | * @lock: Pointer to the lock variable | 34 | * @lock: Pointer to the lock variable |
35 | * | 35 | * |
36 | * __raw_spin_trylock() tries to get the lock and returns a result. | 36 | * arch_spin_trylock() tries to get the lock and returns a result. |
37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
38 | */ | 38 | */ |
39 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
40 | { | 40 | { |
41 | int oldval; | 41 | int oldval; |
42 | unsigned long tmp1, tmp2; | 42 | unsigned long tmp1, tmp2; |
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
50 | * } | 50 | * } |
51 | */ | 51 | */ |
52 | __asm__ __volatile__ ( | 52 | __asm__ __volatile__ ( |
53 | "# __raw_spin_trylock \n\t" | 53 | "# arch_spin_trylock \n\t" |
54 | "ldi %1, #0; \n\t" | 54 | "ldi %1, #0; \n\t" |
55 | "mvfc %2, psw; \n\t" | 55 | "mvfc %2, psw; \n\t" |
56 | "clrpsw #0x40 -> nop; \n\t" | 56 | "clrpsw #0x40 -> nop; \n\t" |
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
69 | return (oldval > 0); | 69 | return (oldval > 0); |
70 | } | 70 | } |
71 | 71 | ||
72 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 72 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
73 | { | 73 | { |
74 | unsigned long tmp0, tmp1; | 74 | unsigned long tmp0, tmp1; |
75 | 75 | ||
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
84 | * } | 84 | * } |
85 | */ | 85 | */ |
86 | __asm__ __volatile__ ( | 86 | __asm__ __volatile__ ( |
87 | "# __raw_spin_lock \n\t" | 87 | "# arch_spin_lock \n\t" |
88 | ".fillinsn \n" | 88 | ".fillinsn \n" |
89 | "1: \n\t" | 89 | "1: \n\t" |
90 | "mvfc %1, psw; \n\t" | 90 | "mvfc %1, psw; \n\t" |
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
111 | ); | 111 | ); |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 114 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
115 | { | 115 | { |
116 | mb(); | 116 | mb(); |
117 | lock->slock = 1; | 117 | lock->slock = 1; |
@@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
140 | * read_can_lock - would read_trylock() succeed? | 140 | * read_can_lock - would read_trylock() succeed? |
141 | * @lock: the rwlock in question. | 141 | * @lock: the rwlock in question. |
142 | */ | 142 | */ |
143 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 143 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) |
144 | 144 | ||
145 | /** | 145 | /** |
146 | * write_can_lock - would write_trylock() succeed? | 146 | * write_can_lock - would write_trylock() succeed? |
147 | * @lock: the rwlock in question. | 147 | * @lock: the rwlock in question. |
148 | */ | 148 | */ |
149 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 149 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
150 | 150 | ||
151 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 151 | static inline void arch_read_lock(arch_rwlock_t *rw) |
152 | { | 152 | { |
153 | unsigned long tmp0, tmp1; | 153 | unsigned long tmp0, tmp1; |
154 | 154 | ||
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
199 | ); | 199 | ); |
200 | } | 200 | } |
201 | 201 | ||
202 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 202 | static inline void arch_write_lock(arch_rwlock_t *rw) |
203 | { | 203 | { |
204 | unsigned long tmp0, tmp1, tmp2; | 204 | unsigned long tmp0, tmp1, tmp2; |
205 | 205 | ||
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
252 | ); | 252 | ); |
253 | } | 253 | } |
254 | 254 | ||
255 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 255 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
256 | { | 256 | { |
257 | unsigned long tmp0, tmp1; | 257 | unsigned long tmp0, tmp1; |
258 | 258 | ||
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
274 | ); | 274 | ); |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 277 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
278 | { | 278 | { |
279 | unsigned long tmp0, tmp1, tmp2; | 279 | unsigned long tmp0, tmp1, tmp2; |
280 | 280 | ||
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
298 | ); | 298 | ); |
299 | } | 299 | } |
300 | 300 | ||
301 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 301 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
302 | { | 302 | { |
303 | atomic_t *count = (atomic_t*)lock; | 303 | atomic_t *count = (atomic_t*)lock; |
304 | if (atomic_dec_return(count) >= 0) | 304 | if (atomic_dec_return(count) >= 0) |
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 310 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
311 | { | 311 | { |
312 | atomic_t *count = (atomic_t *)lock; | 312 | atomic_t *count = (atomic_t *)lock; |
313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
@@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
316 | return 0; | 316 | return 0; |
317 | } | 317 | } |
318 | 318 | ||
319 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 319 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
320 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 320 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
321 | 321 | ||
322 | #define _raw_spin_relax(lock) cpu_relax() | 322 | #define arch_spin_relax(lock) cpu_relax() |
323 | #define _raw_read_relax(lock) cpu_relax() | 323 | #define arch_read_relax(lock) cpu_relax() |
324 | #define _raw_write_relax(lock) cpu_relax() | 324 | #define arch_write_relax(lock) cpu_relax() |
325 | 325 | ||
326 | #endif /* _ASM_M32R_SPINLOCK_H */ | 326 | #endif /* _ASM_M32R_SPINLOCK_H */ |