diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 08:43:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 12:08:48 -0400 |
commit | 8307a98097222f4d9c2e62ebccd6f5df439328de (patch) | |
tree | 0816a6bcdc5a91a71507b6d92e32c258699f67fe /arch/powerpc | |
parent | bbe69aa57a7374b51242b95a54eefcf0d0393b7e (diff) |
locking, powerpc: Rename __spin_try_lock() and friends
Needed to avoid namespace conflicts when the common code
function bodies of _spin_try_lock() etc. are moved to a header
file where the function name would be __spin_try_lock().
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124415.918799705@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/spinlock.h | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c3b193121f81..198266cf9e2d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | 57 | static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | |||
76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return __spin_trylock(lock) == 0; | 79 | return arch_spin_trylock(lock) == 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
111 | if (likely(__spin_trylock(lock) == 0)) | 111 | if (likely(arch_spin_trylock(lock) == 0)) |
112 | break; | 112 | break; |
113 | do { | 113 | do { |
114 | HMT_low(); | 114 | HMT_low(); |
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | |||
126 | 126 | ||
127 | CLEAR_IO_SYNC; | 127 | CLEAR_IO_SYNC; |
128 | while (1) { | 128 | while (1) { |
129 | if (likely(__spin_trylock(lock) == 0)) | 129 | if (likely(arch_spin_trylock(lock) == 0)) |
130 | break; | 130 | break; |
131 | local_save_flags(flags_dis); | 131 | local_save_flags(flags_dis); |
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
183 | */ | 183 | */ |
184 | static inline long __read_trylock(raw_rwlock_t *rw) | 184 | static inline long arch_read_trylock(raw_rwlock_t *rw) |
185 | { | 185 | { |
186 | long tmp; | 186 | long tmp; |
187 | 187 | ||
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw) | |||
205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
207 | */ | 207 | */ |
208 | static inline long __write_trylock(raw_rwlock_t *rw) | 208 | static inline long arch_write_trylock(raw_rwlock_t *rw) |
209 | { | 209 | { |
210 | long tmp, token; | 210 | long tmp, token; |
211 | 211 | ||
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw) | |||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
229 | { | 229 | { |
230 | while (1) { | 230 | while (1) { |
231 | if (likely(__read_trylock(rw) > 0)) | 231 | if (likely(arch_read_trylock(rw) > 0)) |
232 | break; | 232 | break; |
233 | do { | 233 | do { |
234 | HMT_low(); | 234 | HMT_low(); |
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
243 | { | 243 | { |
244 | while (1) { | 244 | while (1) { |
245 | if (likely(__write_trylock(rw) == 0)) | 245 | if (likely(arch_write_trylock(rw) == 0)) |
246 | break; | 246 | break; |
247 | do { | 247 | do { |
248 | HMT_low(); | 248 | HMT_low(); |
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
255 | 255 | ||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
257 | { | 257 | { |
258 | return __read_trylock(rw) > 0; | 258 | return arch_read_trylock(rw) > 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
262 | { | 262 | { |
263 | return __write_trylock(rw) == 0; | 263 | return arch_write_trylock(rw) == 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |