diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-03 14:08:46 -0500 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
| commit | e5931943d02bf751b1ec849c0d2ade23d76a8d41 (patch) | |
| tree | 119fe3bc583d0d043d97cb9edd98bad52692a546 /arch/m32r/include | |
| parent | fb3a6bbc912b12347614e5742c7c61416cdb0ca0 (diff) | |
locking: Convert raw_rwlock functions to arch_rwlock
Name space cleanup for rwlock functions. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/m32r/include')
| -rw-r--r-- | arch/m32r/include/asm/spinlock.h | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 1c76af8c8e1b..179a06489b10 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h | |||
| @@ -140,15 +140,15 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
| 140 | * read_can_lock - would read_trylock() succeed? | 140 | * read_can_lock - would read_trylock() succeed? |
| 141 | * @lock: the rwlock in question. | 141 | * @lock: the rwlock in question. |
| 142 | */ | 142 | */ |
| 143 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 143 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) |
| 144 | 144 | ||
| 145 | /** | 145 | /** |
| 146 | * write_can_lock - would write_trylock() succeed? | 146 | * write_can_lock - would write_trylock() succeed? |
| 147 | * @lock: the rwlock in question. | 147 | * @lock: the rwlock in question. |
| 148 | */ | 148 | */ |
| 149 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 149 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
| 150 | 150 | ||
| 151 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 151 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 152 | { | 152 | { |
| 153 | unsigned long tmp0, tmp1; | 153 | unsigned long tmp0, tmp1; |
| 154 | 154 | ||
| @@ -199,7 +199,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 199 | ); | 199 | ); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 202 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 203 | { | 203 | { |
| 204 | unsigned long tmp0, tmp1, tmp2; | 204 | unsigned long tmp0, tmp1, tmp2; |
| 205 | 205 | ||
| @@ -252,7 +252,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 252 | ); | 252 | ); |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 255 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 256 | { | 256 | { |
| 257 | unsigned long tmp0, tmp1; | 257 | unsigned long tmp0, tmp1; |
| 258 | 258 | ||
| @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 274 | ); | 274 | ); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 277 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 278 | { | 278 | { |
| 279 | unsigned long tmp0, tmp1, tmp2; | 279 | unsigned long tmp0, tmp1, tmp2; |
| 280 | 280 | ||
| @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) | |||
| 298 | ); | 298 | ); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | static inline int __raw_read_trylock(arch_rwlock_t *lock) | 301 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
| 302 | { | 302 | { |
| 303 | atomic_t *count = (atomic_t*)lock; | 303 | atomic_t *count = (atomic_t*)lock; |
| 304 | if (atomic_dec_return(count) >= 0) | 304 | if (atomic_dec_return(count) >= 0) |
| @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock) | |||
| 307 | return 0; | 307 | return 0; |
| 308 | } | 308 | } |
| 309 | 309 | ||
| 310 | static inline int __raw_write_trylock(arch_rwlock_t *lock) | 310 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
| 311 | { | 311 | { |
| 312 | atomic_t *count = (atomic_t *)lock; | 312 | atomic_t *count = (atomic_t *)lock; |
| 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
| @@ -316,8 +316,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock) | |||
| 316 | return 0; | 316 | return 0; |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 319 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 320 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 320 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 321 | 321 | ||
| 322 | #define arch_spin_relax(lock) cpu_relax() | 322 | #define arch_spin_relax(lock) cpu_relax() |
| 323 | #define arch_read_relax(lock) cpu_relax() | 323 | #define arch_read_relax(lock) cpu_relax() |
