aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/spinlock.h')
-rw-r--r--arch/mips/include/asm/spinlock.h42
1 files changed, 21 insertions, 21 deletions
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 7bf27c8a3364..21ef9efbde43 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -248,21 +248,21 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
248 * read_can_lock - would read_trylock() succeed? 248 * read_can_lock - would read_trylock() succeed?
249 * @lock: the rwlock in question. 249 * @lock: the rwlock in question.
250 */ 250 */
251#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 251#define arch_read_can_lock(rw) ((rw)->lock >= 0)
252 252
253/* 253/*
254 * write_can_lock - would write_trylock() succeed? 254 * write_can_lock - would write_trylock() succeed?
255 * @lock: the rwlock in question. 255 * @lock: the rwlock in question.
256 */ 256 */
257#define __raw_write_can_lock(rw) (!(rw)->lock) 257#define arch_write_can_lock(rw) (!(rw)->lock)
258 258
259static inline void __raw_read_lock(arch_rwlock_t *rw) 259static inline void arch_read_lock(arch_rwlock_t *rw)
260{ 260{
261 unsigned int tmp; 261 unsigned int tmp;
262 262
263 if (R10000_LLSC_WAR) { 263 if (R10000_LLSC_WAR) {
264 __asm__ __volatile__( 264 __asm__ __volatile__(
265 " .set noreorder # __raw_read_lock \n" 265 " .set noreorder # arch_read_lock \n"
266 "1: ll %1, %2 \n" 266 "1: ll %1, %2 \n"
267 " bltz %1, 1b \n" 267 " bltz %1, 1b \n"
268 " addu %1, 1 \n" 268 " addu %1, 1 \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
275 : "memory"); 275 : "memory");
276 } else { 276 } else {
277 __asm__ __volatile__( 277 __asm__ __volatile__(
278 " .set noreorder # __raw_read_lock \n" 278 " .set noreorder # arch_read_lock \n"
279 "1: ll %1, %2 \n" 279 "1: ll %1, %2 \n"
280 " bltz %1, 2f \n" 280 " bltz %1, 2f \n"
281 " addu %1, 1 \n" 281 " addu %1, 1 \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
301/* Note the use of sub, not subu which will make the kernel die with an 301/* Note the use of sub, not subu which will make the kernel die with an
302 overflow exception if we ever try to unlock an rwlock that is already 302 overflow exception if we ever try to unlock an rwlock that is already
303 unlocked or is being held by a writer. */ 303 unlocked or is being held by a writer. */
304static inline void __raw_read_unlock(arch_rwlock_t *rw) 304static inline void arch_read_unlock(arch_rwlock_t *rw)
305{ 305{
306 unsigned int tmp; 306 unsigned int tmp;
307 307
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
309 309
310 if (R10000_LLSC_WAR) { 310 if (R10000_LLSC_WAR) {
311 __asm__ __volatile__( 311 __asm__ __volatile__(
312 "1: ll %1, %2 # __raw_read_unlock \n" 312 "1: ll %1, %2 # arch_read_unlock \n"
313 " sub %1, 1 \n" 313 " sub %1, 1 \n"
314 " sc %1, %0 \n" 314 " sc %1, %0 \n"
315 " beqzl %1, 1b \n" 315 " beqzl %1, 1b \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
318 : "memory"); 318 : "memory");
319 } else { 319 } else {
320 __asm__ __volatile__( 320 __asm__ __volatile__(
321 " .set noreorder # __raw_read_unlock \n" 321 " .set noreorder # arch_read_unlock \n"
322 "1: ll %1, %2 \n" 322 "1: ll %1, %2 \n"
323 " sub %1, 1 \n" 323 " sub %1, 1 \n"
324 " sc %1, %0 \n" 324 " sc %1, %0 \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
335 } 335 }
336} 336}
337 337
338static inline void __raw_write_lock(arch_rwlock_t *rw) 338static inline void arch_write_lock(arch_rwlock_t *rw)
339{ 339{
340 unsigned int tmp; 340 unsigned int tmp;
341 341
342 if (R10000_LLSC_WAR) { 342 if (R10000_LLSC_WAR) {
343 __asm__ __volatile__( 343 __asm__ __volatile__(
344 " .set noreorder # __raw_write_lock \n" 344 " .set noreorder # arch_write_lock \n"
345 "1: ll %1, %2 \n" 345 "1: ll %1, %2 \n"
346 " bnez %1, 1b \n" 346 " bnez %1, 1b \n"
347 " lui %1, 0x8000 \n" 347 " lui %1, 0x8000 \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
354 : "memory"); 354 : "memory");
355 } else { 355 } else {
356 __asm__ __volatile__( 356 __asm__ __volatile__(
357 " .set noreorder # __raw_write_lock \n" 357 " .set noreorder # arch_write_lock \n"
358 "1: ll %1, %2 \n" 358 "1: ll %1, %2 \n"
359 " bnez %1, 2f \n" 359 " bnez %1, 2f \n"
360 " lui %1, 0x8000 \n" 360 " lui %1, 0x8000 \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
377 smp_llsc_mb(); 377 smp_llsc_mb();
378} 378}
379 379
380static inline void __raw_write_unlock(arch_rwlock_t *rw) 380static inline void arch_write_unlock(arch_rwlock_t *rw)
381{ 381{
382 smp_mb(); 382 smp_mb();
383 383
384 __asm__ __volatile__( 384 __asm__ __volatile__(
385 " # __raw_write_unlock \n" 385 " # arch_write_unlock \n"
386 " sw $0, %0 \n" 386 " sw $0, %0 \n"
387 : "=m" (rw->lock) 387 : "=m" (rw->lock)
388 : "m" (rw->lock) 388 : "m" (rw->lock)
389 : "memory"); 389 : "memory");
390} 390}
391 391
392static inline int __raw_read_trylock(arch_rwlock_t *rw) 392static inline int arch_read_trylock(arch_rwlock_t *rw)
393{ 393{
394 unsigned int tmp; 394 unsigned int tmp;
395 int ret; 395 int ret;
396 396
397 if (R10000_LLSC_WAR) { 397 if (R10000_LLSC_WAR) {
398 __asm__ __volatile__( 398 __asm__ __volatile__(
399 " .set noreorder # __raw_read_trylock \n" 399 " .set noreorder # arch_read_trylock \n"
400 " li %2, 0 \n" 400 " li %2, 0 \n"
401 "1: ll %1, %3 \n" 401 "1: ll %1, %3 \n"
402 " bltz %1, 2f \n" 402 " bltz %1, 2f \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
413 : "memory"); 413 : "memory");
414 } else { 414 } else {
415 __asm__ __volatile__( 415 __asm__ __volatile__(
416 " .set noreorder # __raw_read_trylock \n" 416 " .set noreorder # arch_read_trylock \n"
417 " li %2, 0 \n" 417 " li %2, 0 \n"
418 "1: ll %1, %3 \n" 418 "1: ll %1, %3 \n"
419 " bltz %1, 2f \n" 419 " bltz %1, 2f \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
433 return ret; 433 return ret;
434} 434}
435 435
436static inline int __raw_write_trylock(arch_rwlock_t *rw) 436static inline int arch_write_trylock(arch_rwlock_t *rw)
437{ 437{
438 unsigned int tmp; 438 unsigned int tmp;
439 int ret; 439 int ret;
440 440
441 if (R10000_LLSC_WAR) { 441 if (R10000_LLSC_WAR) {
442 __asm__ __volatile__( 442 __asm__ __volatile__(
443 " .set noreorder # __raw_write_trylock \n" 443 " .set noreorder # arch_write_trylock \n"
444 " li %2, 0 \n" 444 " li %2, 0 \n"
445 "1: ll %1, %3 \n" 445 "1: ll %1, %3 \n"
446 " bnez %1, 2f \n" 446 " bnez %1, 2f \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
457 : "memory"); 457 : "memory");
458 } else { 458 } else {
459 __asm__ __volatile__( 459 __asm__ __volatile__(
460 " .set noreorder # __raw_write_trylock \n" 460 " .set noreorder # arch_write_trylock \n"
461 " li %2, 0 \n" 461 " li %2, 0 \n"
462 "1: ll %1, %3 \n" 462 "1: ll %1, %3 \n"
463 " bnez %1, 2f \n" 463 " bnez %1, 2f \n"
@@ -480,8 +480,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
480 return ret; 480 return ret;
481} 481}
482 482
483#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 483#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
484#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 484#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
485 485
486#define arch_spin_relax(lock) cpu_relax() 486#define arch_spin_relax(lock) cpu_relax()
487#define arch_read_relax(lock) cpu_relax() 487#define arch_read_relax(lock) cpu_relax()