diff options
Diffstat (limited to 'arch/mips/include/asm/spinlock.h')
-rw-r--r-- | arch/mips/include/asm/spinlock.h | 78 |
1 files changed, 39 insertions, 39 deletions
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 5b60a09a0f08..21ef9efbde43 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
@@ -34,33 +34,33 @@ | |||
34 | * becomes equal to the the initial value of the tail. | 34 | * becomes equal to the the initial value of the tail. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned int counters = ACCESS_ONCE(lock->lock); | 39 | unsigned int counters = ACCESS_ONCE(lock->lock); |
40 | 40 | ||
41 | return ((counters >> 14) ^ counters) & 0x1fff; | 41 | return ((counters >> 14) ^ counters) & 0x1fff; |
42 | } | 42 | } |
43 | 43 | ||
44 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 44 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
45 | #define __raw_spin_unlock_wait(x) \ | 45 | #define arch_spin_unlock_wait(x) \ |
46 | while (__raw_spin_is_locked(x)) { cpu_relax(); } | 46 | while (arch_spin_is_locked(x)) { cpu_relax(); } |
47 | 47 | ||
48 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 48 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
49 | { | 49 | { |
50 | unsigned int counters = ACCESS_ONCE(lock->lock); | 50 | unsigned int counters = ACCESS_ONCE(lock->lock); |
51 | 51 | ||
52 | return (((counters >> 14) - counters) & 0x1fff) > 1; | 52 | return (((counters >> 14) - counters) & 0x1fff) > 1; |
53 | } | 53 | } |
54 | #define __raw_spin_is_contended __raw_spin_is_contended | 54 | #define arch_spin_is_contended arch_spin_is_contended |
55 | 55 | ||
56 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 56 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
57 | { | 57 | { |
58 | int my_ticket; | 58 | int my_ticket; |
59 | int tmp; | 59 | int tmp; |
60 | 60 | ||
61 | if (R10000_LLSC_WAR) { | 61 | if (R10000_LLSC_WAR) { |
62 | __asm__ __volatile__ ( | 62 | __asm__ __volatile__ ( |
63 | " .set push # __raw_spin_lock \n" | 63 | " .set push # arch_spin_lock \n" |
64 | " .set noreorder \n" | 64 | " .set noreorder \n" |
65 | " \n" | 65 | " \n" |
66 | "1: ll %[ticket], %[ticket_ptr] \n" | 66 | "1: ll %[ticket], %[ticket_ptr] \n" |
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
94 | [my_ticket] "=&r" (my_ticket)); | 94 | [my_ticket] "=&r" (my_ticket)); |
95 | } else { | 95 | } else { |
96 | __asm__ __volatile__ ( | 96 | __asm__ __volatile__ ( |
97 | " .set push # __raw_spin_lock \n" | 97 | " .set push # arch_spin_lock \n" |
98 | " .set noreorder \n" | 98 | " .set noreorder \n" |
99 | " \n" | 99 | " \n" |
100 | " ll %[ticket], %[ticket_ptr] \n" | 100 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
134 | smp_llsc_mb(); | 134 | smp_llsc_mb(); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 137 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
138 | { | 138 | { |
139 | int tmp; | 139 | int tmp; |
140 | 140 | ||
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
142 | 142 | ||
143 | if (R10000_LLSC_WAR) { | 143 | if (R10000_LLSC_WAR) { |
144 | __asm__ __volatile__ ( | 144 | __asm__ __volatile__ ( |
145 | " # __raw_spin_unlock \n" | 145 | " # arch_spin_unlock \n" |
146 | "1: ll %[ticket], %[ticket_ptr] \n" | 146 | "1: ll %[ticket], %[ticket_ptr] \n" |
147 | " addiu %[ticket], %[ticket], 1 \n" | 147 | " addiu %[ticket], %[ticket], 1 \n" |
148 | " ori %[ticket], %[ticket], 0x2000 \n" | 148 | " ori %[ticket], %[ticket], 0x2000 \n" |
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
153 | [ticket] "=&r" (tmp)); | 153 | [ticket] "=&r" (tmp)); |
154 | } else { | 154 | } else { |
155 | __asm__ __volatile__ ( | 155 | __asm__ __volatile__ ( |
156 | " .set push # __raw_spin_unlock \n" | 156 | " .set push # arch_spin_unlock \n" |
157 | " .set noreorder \n" | 157 | " .set noreorder \n" |
158 | " \n" | 158 | " \n" |
159 | " ll %[ticket], %[ticket_ptr] \n" | 159 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
174 | } | 174 | } |
175 | } | 175 | } |
176 | 176 | ||
177 | static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | 177 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
178 | { | 178 | { |
179 | int tmp, tmp2, tmp3; | 179 | int tmp, tmp2, tmp3; |
180 | 180 | ||
181 | if (R10000_LLSC_WAR) { | 181 | if (R10000_LLSC_WAR) { |
182 | __asm__ __volatile__ ( | 182 | __asm__ __volatile__ ( |
183 | " .set push # __raw_spin_trylock \n" | 183 | " .set push # arch_spin_trylock \n" |
184 | " .set noreorder \n" | 184 | " .set noreorder \n" |
185 | " \n" | 185 | " \n" |
186 | "1: ll %[ticket], %[ticket_ptr] \n" | 186 | "1: ll %[ticket], %[ticket_ptr] \n" |
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
204 | [now_serving] "=&r" (tmp3)); | 204 | [now_serving] "=&r" (tmp3)); |
205 | } else { | 205 | } else { |
206 | __asm__ __volatile__ ( | 206 | __asm__ __volatile__ ( |
207 | " .set push # __raw_spin_trylock \n" | 207 | " .set push # arch_spin_trylock \n" |
208 | " .set noreorder \n" | 208 | " .set noreorder \n" |
209 | " \n" | 209 | " \n" |
210 | " ll %[ticket], %[ticket_ptr] \n" | 210 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
248 | * read_can_lock - would read_trylock() succeed? | 248 | * read_can_lock - would read_trylock() succeed? |
249 | * @lock: the rwlock in question. | 249 | * @lock: the rwlock in question. |
250 | */ | 250 | */ |
251 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) | 251 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * write_can_lock - would write_trylock() succeed? | 254 | * write_can_lock - would write_trylock() succeed? |
255 | * @lock: the rwlock in question. | 255 | * @lock: the rwlock in question. |
256 | */ | 256 | */ |
257 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 257 | #define arch_write_can_lock(rw) (!(rw)->lock) |
258 | 258 | ||
259 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 259 | static inline void arch_read_lock(arch_rwlock_t *rw) |
260 | { | 260 | { |
261 | unsigned int tmp; | 261 | unsigned int tmp; |
262 | 262 | ||
263 | if (R10000_LLSC_WAR) { | 263 | if (R10000_LLSC_WAR) { |
264 | __asm__ __volatile__( | 264 | __asm__ __volatile__( |
265 | " .set noreorder # __raw_read_lock \n" | 265 | " .set noreorder # arch_read_lock \n" |
266 | "1: ll %1, %2 \n" | 266 | "1: ll %1, %2 \n" |
267 | " bltz %1, 1b \n" | 267 | " bltz %1, 1b \n" |
268 | " addu %1, 1 \n" | 268 | " addu %1, 1 \n" |
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
275 | : "memory"); | 275 | : "memory"); |
276 | } else { | 276 | } else { |
277 | __asm__ __volatile__( | 277 | __asm__ __volatile__( |
278 | " .set noreorder # __raw_read_lock \n" | 278 | " .set noreorder # arch_read_lock \n" |
279 | "1: ll %1, %2 \n" | 279 | "1: ll %1, %2 \n" |
280 | " bltz %1, 2f \n" | 280 | " bltz %1, 2f \n" |
281 | " addu %1, 1 \n" | 281 | " addu %1, 1 \n" |
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
301 | /* Note the use of sub, not subu which will make the kernel die with an | 301 | /* Note the use of sub, not subu which will make the kernel die with an |
302 | overflow exception if we ever try to unlock an rwlock that is already | 302 | overflow exception if we ever try to unlock an rwlock that is already |
303 | unlocked or is being held by a writer. */ | 303 | unlocked or is being held by a writer. */ |
304 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 304 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
305 | { | 305 | { |
306 | unsigned int tmp; | 306 | unsigned int tmp; |
307 | 307 | ||
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
309 | 309 | ||
310 | if (R10000_LLSC_WAR) { | 310 | if (R10000_LLSC_WAR) { |
311 | __asm__ __volatile__( | 311 | __asm__ __volatile__( |
312 | "1: ll %1, %2 # __raw_read_unlock \n" | 312 | "1: ll %1, %2 # arch_read_unlock \n" |
313 | " sub %1, 1 \n" | 313 | " sub %1, 1 \n" |
314 | " sc %1, %0 \n" | 314 | " sc %1, %0 \n" |
315 | " beqzl %1, 1b \n" | 315 | " beqzl %1, 1b \n" |
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
318 | : "memory"); | 318 | : "memory"); |
319 | } else { | 319 | } else { |
320 | __asm__ __volatile__( | 320 | __asm__ __volatile__( |
321 | " .set noreorder # __raw_read_unlock \n" | 321 | " .set noreorder # arch_read_unlock \n" |
322 | "1: ll %1, %2 \n" | 322 | "1: ll %1, %2 \n" |
323 | " sub %1, 1 \n" | 323 | " sub %1, 1 \n" |
324 | " sc %1, %0 \n" | 324 | " sc %1, %0 \n" |
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
338 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 338 | static inline void arch_write_lock(arch_rwlock_t *rw) |
339 | { | 339 | { |
340 | unsigned int tmp; | 340 | unsigned int tmp; |
341 | 341 | ||
342 | if (R10000_LLSC_WAR) { | 342 | if (R10000_LLSC_WAR) { |
343 | __asm__ __volatile__( | 343 | __asm__ __volatile__( |
344 | " .set noreorder # __raw_write_lock \n" | 344 | " .set noreorder # arch_write_lock \n" |
345 | "1: ll %1, %2 \n" | 345 | "1: ll %1, %2 \n" |
346 | " bnez %1, 1b \n" | 346 | " bnez %1, 1b \n" |
347 | " lui %1, 0x8000 \n" | 347 | " lui %1, 0x8000 \n" |
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
354 | : "memory"); | 354 | : "memory"); |
355 | } else { | 355 | } else { |
356 | __asm__ __volatile__( | 356 | __asm__ __volatile__( |
357 | " .set noreorder # __raw_write_lock \n" | 357 | " .set noreorder # arch_write_lock \n" |
358 | "1: ll %1, %2 \n" | 358 | "1: ll %1, %2 \n" |
359 | " bnez %1, 2f \n" | 359 | " bnez %1, 2f \n" |
360 | " lui %1, 0x8000 \n" | 360 | " lui %1, 0x8000 \n" |
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
377 | smp_llsc_mb(); | 377 | smp_llsc_mb(); |
378 | } | 378 | } |
379 | 379 | ||
380 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 380 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
381 | { | 381 | { |
382 | smp_mb(); | 382 | smp_mb(); |
383 | 383 | ||
384 | __asm__ __volatile__( | 384 | __asm__ __volatile__( |
385 | " # __raw_write_unlock \n" | 385 | " # arch_write_unlock \n" |
386 | " sw $0, %0 \n" | 386 | " sw $0, %0 \n" |
387 | : "=m" (rw->lock) | 387 | : "=m" (rw->lock) |
388 | : "m" (rw->lock) | 388 | : "m" (rw->lock) |
389 | : "memory"); | 389 | : "memory"); |
390 | } | 390 | } |
391 | 391 | ||
392 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 392 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
393 | { | 393 | { |
394 | unsigned int tmp; | 394 | unsigned int tmp; |
395 | int ret; | 395 | int ret; |
396 | 396 | ||
397 | if (R10000_LLSC_WAR) { | 397 | if (R10000_LLSC_WAR) { |
398 | __asm__ __volatile__( | 398 | __asm__ __volatile__( |
399 | " .set noreorder # __raw_read_trylock \n" | 399 | " .set noreorder # arch_read_trylock \n" |
400 | " li %2, 0 \n" | 400 | " li %2, 0 \n" |
401 | "1: ll %1, %3 \n" | 401 | "1: ll %1, %3 \n" |
402 | " bltz %1, 2f \n" | 402 | " bltz %1, 2f \n" |
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
413 | : "memory"); | 413 | : "memory"); |
414 | } else { | 414 | } else { |
415 | __asm__ __volatile__( | 415 | __asm__ __volatile__( |
416 | " .set noreorder # __raw_read_trylock \n" | 416 | " .set noreorder # arch_read_trylock \n" |
417 | " li %2, 0 \n" | 417 | " li %2, 0 \n" |
418 | "1: ll %1, %3 \n" | 418 | "1: ll %1, %3 \n" |
419 | " bltz %1, 2f \n" | 419 | " bltz %1, 2f \n" |
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
433 | return ret; | 433 | return ret; |
434 | } | 434 | } |
435 | 435 | ||
436 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 436 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
437 | { | 437 | { |
438 | unsigned int tmp; | 438 | unsigned int tmp; |
439 | int ret; | 439 | int ret; |
440 | 440 | ||
441 | if (R10000_LLSC_WAR) { | 441 | if (R10000_LLSC_WAR) { |
442 | __asm__ __volatile__( | 442 | __asm__ __volatile__( |
443 | " .set noreorder # __raw_write_trylock \n" | 443 | " .set noreorder # arch_write_trylock \n" |
444 | " li %2, 0 \n" | 444 | " li %2, 0 \n" |
445 | "1: ll %1, %3 \n" | 445 | "1: ll %1, %3 \n" |
446 | " bnez %1, 2f \n" | 446 | " bnez %1, 2f \n" |
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
457 | : "memory"); | 457 | : "memory"); |
458 | } else { | 458 | } else { |
459 | __asm__ __volatile__( | 459 | __asm__ __volatile__( |
460 | " .set noreorder # __raw_write_trylock \n" | 460 | " .set noreorder # arch_write_trylock \n" |
461 | " li %2, 0 \n" | 461 | " li %2, 0 \n" |
462 | "1: ll %1, %3 \n" | 462 | "1: ll %1, %3 \n" |
463 | " bnez %1, 2f \n" | 463 | " bnez %1, 2f \n" |
@@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
480 | return ret; | 480 | return ret; |
481 | } | 481 | } |
482 | 482 | ||
483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 483 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 484 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
485 | 485 | ||
486 | #define _raw_spin_relax(lock) cpu_relax() | 486 | #define arch_spin_relax(lock) cpu_relax() |
487 | #define _raw_read_relax(lock) cpu_relax() | 487 | #define arch_read_relax(lock) cpu_relax() |
488 | #define _raw_write_relax(lock) cpu_relax() | 488 | #define arch_write_relax(lock) cpu_relax() |
489 | 489 | ||
490 | #endif /* _ASM_SPINLOCK_H */ | 490 | #endif /* _ASM_SPINLOCK_H */ |