diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-02 14:01:25 -0500 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
| commit | 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (patch) | |
| tree | e371d17bd73d64332349debbf45962ec67e7269d /arch/mips/include/asm/spinlock.h | |
| parent | edc35bd72e2079b25f99c5da7d7a65dbbffc4a26 (diff) | |
locking: Convert __raw_spin* functions to arch_spin*
Name space cleanup. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/mips/include/asm/spinlock.h')
| -rw-r--r-- | arch/mips/include/asm/spinlock.h | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 0f16d0673b4a..95edebaaf22a 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
| @@ -34,33 +34,33 @@ | |||
| 34 | * becomes equal to the the initial value of the tail. | 34 | * becomes equal to the the initial value of the tail. |
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | static inline int __raw_spin_is_locked(arch_spinlock_t *lock) | 37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
| 38 | { | 38 | { |
| 39 | unsigned int counters = ACCESS_ONCE(lock->lock); | 39 | unsigned int counters = ACCESS_ONCE(lock->lock); |
| 40 | 40 | ||
| 41 | return ((counters >> 14) ^ counters) & 0x1fff; | 41 | return ((counters >> 14) ^ counters) & 0x1fff; |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 44 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 45 | #define __raw_spin_unlock_wait(x) \ | 45 | #define arch_spin_unlock_wait(x) \ |
| 46 | while (__raw_spin_is_locked(x)) { cpu_relax(); } | 46 | while (arch_spin_is_locked(x)) { cpu_relax(); } |
| 47 | 47 | ||
| 48 | static inline int __raw_spin_is_contended(arch_spinlock_t *lock) | 48 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
| 49 | { | 49 | { |
| 50 | unsigned int counters = ACCESS_ONCE(lock->lock); | 50 | unsigned int counters = ACCESS_ONCE(lock->lock); |
| 51 | 51 | ||
| 52 | return (((counters >> 14) - counters) & 0x1fff) > 1; | 52 | return (((counters >> 14) - counters) & 0x1fff) > 1; |
| 53 | } | 53 | } |
| 54 | #define __raw_spin_is_contended __raw_spin_is_contended | 54 | #define arch_spin_is_contended arch_spin_is_contended |
| 55 | 55 | ||
| 56 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 56 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 57 | { | 57 | { |
| 58 | int my_ticket; | 58 | int my_ticket; |
| 59 | int tmp; | 59 | int tmp; |
| 60 | 60 | ||
| 61 | if (R10000_LLSC_WAR) { | 61 | if (R10000_LLSC_WAR) { |
| 62 | __asm__ __volatile__ ( | 62 | __asm__ __volatile__ ( |
| 63 | " .set push # __raw_spin_lock \n" | 63 | " .set push # arch_spin_lock \n" |
| 64 | " .set noreorder \n" | 64 | " .set noreorder \n" |
| 65 | " \n" | 65 | " \n" |
| 66 | "1: ll %[ticket], %[ticket_ptr] \n" | 66 | "1: ll %[ticket], %[ticket_ptr] \n" |
| @@ -94,7 +94,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
| 94 | [my_ticket] "=&r" (my_ticket)); | 94 | [my_ticket] "=&r" (my_ticket)); |
| 95 | } else { | 95 | } else { |
| 96 | __asm__ __volatile__ ( | 96 | __asm__ __volatile__ ( |
| 97 | " .set push # __raw_spin_lock \n" | 97 | " .set push # arch_spin_lock \n" |
| 98 | " .set noreorder \n" | 98 | " .set noreorder \n" |
| 99 | " \n" | 99 | " \n" |
| 100 | " ll %[ticket], %[ticket_ptr] \n" | 100 | " ll %[ticket], %[ticket_ptr] \n" |
| @@ -134,7 +134,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
| 134 | smp_llsc_mb(); | 134 | smp_llsc_mb(); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 137 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
| 138 | { | 138 | { |
| 139 | int tmp; | 139 | int tmp; |
| 140 | 140 | ||
| @@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
| 142 | 142 | ||
| 143 | if (R10000_LLSC_WAR) { | 143 | if (R10000_LLSC_WAR) { |
| 144 | __asm__ __volatile__ ( | 144 | __asm__ __volatile__ ( |
| 145 | " # __raw_spin_unlock \n" | 145 | " # arch_spin_unlock \n" |
| 146 | "1: ll %[ticket], %[ticket_ptr] \n" | 146 | "1: ll %[ticket], %[ticket_ptr] \n" |
| 147 | " addiu %[ticket], %[ticket], 1 \n" | 147 | " addiu %[ticket], %[ticket], 1 \n" |
| 148 | " ori %[ticket], %[ticket], 0x2000 \n" | 148 | " ori %[ticket], %[ticket], 0x2000 \n" |
| @@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
| 153 | [ticket] "=&r" (tmp)); | 153 | [ticket] "=&r" (tmp)); |
| 154 | } else { | 154 | } else { |
| 155 | __asm__ __volatile__ ( | 155 | __asm__ __volatile__ ( |
| 156 | " .set push # __raw_spin_unlock \n" | 156 | " .set push # arch_spin_unlock \n" |
| 157 | " .set noreorder \n" | 157 | " .set noreorder \n" |
| 158 | " \n" | 158 | " \n" |
| 159 | " ll %[ticket], %[ticket_ptr] \n" | 159 | " ll %[ticket], %[ticket_ptr] \n" |
| @@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
| 174 | } | 174 | } |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) | 177 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
| 178 | { | 178 | { |
| 179 | int tmp, tmp2, tmp3; | 179 | int tmp, tmp2, tmp3; |
| 180 | 180 | ||
| 181 | if (R10000_LLSC_WAR) { | 181 | if (R10000_LLSC_WAR) { |
| 182 | __asm__ __volatile__ ( | 182 | __asm__ __volatile__ ( |
| 183 | " .set push # __raw_spin_trylock \n" | 183 | " .set push # arch_spin_trylock \n" |
| 184 | " .set noreorder \n" | 184 | " .set noreorder \n" |
| 185 | " \n" | 185 | " \n" |
| 186 | "1: ll %[ticket], %[ticket_ptr] \n" | 186 | "1: ll %[ticket], %[ticket_ptr] \n" |
| @@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) | |||
| 204 | [now_serving] "=&r" (tmp3)); | 204 | [now_serving] "=&r" (tmp3)); |
| 205 | } else { | 205 | } else { |
| 206 | __asm__ __volatile__ ( | 206 | __asm__ __volatile__ ( |
| 207 | " .set push # __raw_spin_trylock \n" | 207 | " .set push # arch_spin_trylock \n" |
| 208 | " .set noreorder \n" | 208 | " .set noreorder \n" |
| 209 | " \n" | 209 | " \n" |
| 210 | " ll %[ticket], %[ticket_ptr] \n" | 210 | " ll %[ticket], %[ticket_ptr] \n" |
| @@ -483,8 +483,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
| 483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
| 484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
| 485 | 485 | ||
| 486 | #define _raw_spin_relax(lock) cpu_relax() | 486 | #define arch_spin_relax(lock) cpu_relax() |
| 487 | #define _raw_read_relax(lock) cpu_relax() | 487 | #define arch_read_relax(lock) cpu_relax() |
| 488 | #define _raw_write_relax(lock) cpu_relax() | 488 | #define arch_write_relax(lock) cpu_relax() |
| 489 | 489 | ||
| 490 | #endif /* _ASM_SPINLOCK_H */ | 490 | #endif /* _ASM_SPINLOCK_H */ |
