aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:02:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:02:01 -0500
commit8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch)
treeb907c35c79caadafff6ad46a91614e30afd2f967
parent050cbb09dac0402672edeaeac06094ef8ff1749a (diff)
parentb5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) clockevents: Convert to raw_spinlock clockevents: Make tick_device_lock static debugobjects: Convert to raw_spinlocks perf_event: Convert to raw_spinlock hrtimers: Convert to raw_spinlocks genirq: Convert irq_desc.lock to raw_spinlock smp: Convert smplocks to raw_spinlocks rtmutes: Convert rtmutex.lock to raw_spinlock sched: Convert pi_lock to raw_spinlock sched: Convert cpupri lock to raw_spinlock sched: Convert rt_runtime_lock to raw_spinlock sched: Convert rq->lock to raw_spinlock plist: Make plist debugging raw_spinlock aware bkl: Fixup core_lock fallout locking: Cleanup the name space completely locking: Further name space cleanups alpha: Fix fallout from locking changes locking: Implement new raw_spinlock locking: Convert raw_rwlock functions to arch_rwlock locking: Convert raw_rwlock to arch_rwlock ...
-rw-r--r--arch/alpha/include/asm/core_t2.h34
-rw-r--r--arch/alpha/include/asm/spinlock.h38
-rw-r--r--arch/alpha/include/asm/spinlock_types.h8
-rw-r--r--arch/alpha/kernel/core_t2.c2
-rw-r--r--arch/alpha/kernel/irq.c4
-rw-r--r--arch/arm/include/asm/mach/irq.h4
-rw-r--r--arch/arm/include/asm/spinlock.h40
-rw-r--r--arch/arm/include/asm/spinlock_types.h8
-rw-r--r--arch/arm/kernel/irq.c12
-rw-r--r--arch/arm/mach-ns9xxx/irq.c8
-rw-r--r--arch/avr32/kernel/irq.c4
-rw-r--r--arch/blackfin/include/asm/spinlock.h62
-rw-r--r--arch/blackfin/include/asm/spinlock_types.h8
-rw-r--r--arch/blackfin/kernel/irqchip.c6
-rw-r--r--arch/blackfin/kernel/traps.c4
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h62
-rw-r--r--arch/cris/kernel/irq.c4
-rw-r--r--arch/frv/kernel/irq.c4
-rw-r--r--arch/h8300/kernel/irq.c4
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h76
-rw-r--r--arch/ia64/include/asm/spinlock_types.h8
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/m32r/include/asm/spinlock.h48
-rw-r--r--arch/m32r/include/asm/spinlock_types.h8
-rw-r--r--arch/m32r/kernel/irq.c4
-rw-r--r--arch/microblaze/kernel/irq.c4
-rw-r--r--arch/mips/include/asm/spinlock.h78
-rw-r--r--arch/mips/include/asm/spinlock_types.h8
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/vr41xx/common/icu.c92
-rw-r--r--arch/mn10300/kernel/irq.c4
-rw-r--r--arch/parisc/include/asm/atomic.h10
-rw-r--r--arch/parisc/include/asm/spinlock.h64
-rw-r--r--arch/parisc/include/asm/spinlock_types.h12
-rw-r--r--arch/parisc/kernel/irq.c4
-rw-r--r--arch/parisc/lib/bitops.c4
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h68
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h8
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/rtas.c16
-rw-r--r--arch/powerpc/lib/locks.c8
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c8
-rw-r--r--arch/powerpc/platforms/iseries/irq.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/uic.c8
-rw-r--r--arch/s390/include/asm/spinlock.h66
-rw-r--r--arch/s390/include/asm/spinlock_types.h8
-rw-r--r--arch/s390/lib/spinlock.c46
-rw-r--r--arch/sh/include/asm/spinlock.h58
-rw-r--r--arch/sh/include/asm/spinlock_types.h8
-rw-r--r--arch/sh/kernel/irq.c4
-rw-r--r--arch/sparc/include/asm/spinlock_32.h62
-rw-r--r--arch/sparc/include/asm/spinlock_64.h54
-rw-r--r--arch/sparc/include/asm/spinlock_types.h8
-rw-r--r--arch/sparc/kernel/irq_64.c8
-rw-r--r--arch/um/kernel/irq.c4
-rw-r--r--arch/x86/include/asm/paravirt.h14
-rw-r--r--arch/x86/include/asm/paravirt_types.h14
-rw-r--r--arch/x86/include/asm/spinlock.h62
-rw-r--r--arch/x86/include/asm/spinlock_types.h10
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/dumpstack.c8
-rw-r--r--arch/x86/kernel/irq.c14
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c4
-rw-r--r--arch/x86/kernel/tsc_sync.c10
-rw-r--r--arch/x86/xen/spinlock.c16
-rw-r--r--arch/xtensa/kernel/irq.c4
-rw-r--r--include/asm-generic/bitops/atomic.h10
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c50
-rw-r--r--kernel/hrtimer.c50
-rw-r--r--kernel/hw_breakpoint.c4
-rw-r--r--kernel/irq/autoprobe.c20
-rw-r--r--kernel/irq/chip.c86
-rw-r--r--kernel/irq/handle.c22
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c50
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/irq/numa_migrate.c8
-rw-r--r--kernel/irq/pm.c8
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/lockdep.c20
-rw-r--r--kernel/mutex-debug.h12
-rw-r--r--kernel/perf_event.c106
-rw-r--r--kernel/rtmutex-debug.c4
-rw-r--r--kernel/rtmutex.c106
-rw-r--r--kernel/sched.c223
-rw-r--r--kernel/sched_cpupri.c10
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/sched_idletask.c4
-rw-r--r--kernel/sched_rt.c60
-rw-r--r--kernel/smp.c32
-rw-r--r--kernel/spinlock.c306
-rw-r--r--kernel/time/clockevents.c14
-rw-r--r--kernel/time/tick-broadcast.c42
-rw-r--r--kernel/time/tick-common.c20
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--kernel/time/timer_stats.c17
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c50
-rw-r--r--kernel/trace/trace_clock.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c16
-rw-r--r--kernel/trace/trace_selftest.c4
-rw-r--r--kernel/trace/trace_stack.c16
-rw-r--r--lib/debugobjects.c74
-rw-r--r--lib/kernel_lock.c22
-rw-r--r--lib/plist.c8
-rw-r--r--lib/spinlock_debug.c64
136 files changed, 2394 insertions, 2086 deletions
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 46bfff58f670..471c07292e0b 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr)
435 set_hae(msb); \ 435 set_hae(msb); \
436} 436}
437 437
438extern spinlock_t t2_hae_lock; 438extern raw_spinlock_t t2_hae_lock;
439 439
440/* 440/*
441 * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since 441 * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
448 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 448 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
449 unsigned long result, msb; 449 unsigned long result, msb;
450 unsigned long flags; 450 unsigned long flags;
451 spin_lock_irqsave(&t2_hae_lock, flags); 451 raw_spin_lock_irqsave(&t2_hae_lock, flags);
452 452
453 t2_set_hae; 453 t2_set_hae;
454 454
455 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); 455 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
456 spin_unlock_irqrestore(&t2_hae_lock, flags); 456 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
457 return __kernel_extbl(result, addr & 3); 457 return __kernel_extbl(result, addr & 3);
458} 458}
459 459
@@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
462 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 462 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
463 unsigned long result, msb; 463 unsigned long result, msb;
464 unsigned long flags; 464 unsigned long flags;
465 spin_lock_irqsave(&t2_hae_lock, flags); 465 raw_spin_lock_irqsave(&t2_hae_lock, flags);
466 466
467 t2_set_hae; 467 t2_set_hae;
468 468
469 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); 469 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
470 spin_unlock_irqrestore(&t2_hae_lock, flags); 470 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
471 return __kernel_extwl(result, addr & 3); 471 return __kernel_extwl(result, addr & 3);
472} 472}
473 473
@@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
480 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 480 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
481 unsigned long result, msb; 481 unsigned long result, msb;
482 unsigned long flags; 482 unsigned long flags;
483 spin_lock_irqsave(&t2_hae_lock, flags); 483 raw_spin_lock_irqsave(&t2_hae_lock, flags);
484 484
485 t2_set_hae; 485 t2_set_hae;
486 486
487 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); 487 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
488 spin_unlock_irqrestore(&t2_hae_lock, flags); 488 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
489 return result & 0xffffffffUL; 489 return result & 0xffffffffUL;
490} 490}
491 491
@@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
494 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 494 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
495 unsigned long r0, r1, work, msb; 495 unsigned long r0, r1, work, msb;
496 unsigned long flags; 496 unsigned long flags;
497 spin_lock_irqsave(&t2_hae_lock, flags); 497 raw_spin_lock_irqsave(&t2_hae_lock, flags);
498 498
499 t2_set_hae; 499 t2_set_hae;
500 500
501 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 501 work = (addr << 5) + T2_SPARSE_MEM + 0x18;
502 r0 = *(vuip)(work); 502 r0 = *(vuip)(work);
503 r1 = *(vuip)(work + (4 << 5)); 503 r1 = *(vuip)(work + (4 << 5));
504 spin_unlock_irqrestore(&t2_hae_lock, flags); 504 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
505 return r1 << 32 | r0; 505 return r1 << 32 | r0;
506} 506}
507 507
@@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
510 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 510 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
511 unsigned long msb, w; 511 unsigned long msb, w;
512 unsigned long flags; 512 unsigned long flags;
513 spin_lock_irqsave(&t2_hae_lock, flags); 513 raw_spin_lock_irqsave(&t2_hae_lock, flags);
514 514
515 t2_set_hae; 515 t2_set_hae;
516 516
517 w = __kernel_insbl(b, addr & 3); 517 w = __kernel_insbl(b, addr & 3);
518 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; 518 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
519 spin_unlock_irqrestore(&t2_hae_lock, flags); 519 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
520} 520}
521 521
522__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) 522__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
@@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
524 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 524 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
525 unsigned long msb, w; 525 unsigned long msb, w;
526 unsigned long flags; 526 unsigned long flags;
527 spin_lock_irqsave(&t2_hae_lock, flags); 527 raw_spin_lock_irqsave(&t2_hae_lock, flags);
528 528
529 t2_set_hae; 529 t2_set_hae;
530 530
531 w = __kernel_inswl(b, addr & 3); 531 w = __kernel_inswl(b, addr & 3);
532 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; 532 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
533 spin_unlock_irqrestore(&t2_hae_lock, flags); 533 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
534} 534}
535 535
536/* 536/*
@@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
542 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 542 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
543 unsigned long msb; 543 unsigned long msb;
544 unsigned long flags; 544 unsigned long flags;
545 spin_lock_irqsave(&t2_hae_lock, flags); 545 raw_spin_lock_irqsave(&t2_hae_lock, flags);
546 546
547 t2_set_hae; 547 t2_set_hae;
548 548
549 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; 549 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
550 spin_unlock_irqrestore(&t2_hae_lock, flags); 550 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
551} 551}
552 552
553__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) 553__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
@@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
555 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 555 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
556 unsigned long msb, work; 556 unsigned long msb, work;
557 unsigned long flags; 557 unsigned long flags;
558 spin_lock_irqsave(&t2_hae_lock, flags); 558 raw_spin_lock_irqsave(&t2_hae_lock, flags);
559 559
560 t2_set_hae; 560 t2_set_hae;
561 561
562 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 562 work = (addr << 5) + T2_SPARSE_MEM + 0x18;
563 *(vuip)work = b; 563 *(vuip)work = b;
564 *(vuip)(work + (4 << 5)) = b >> 32; 564 *(vuip)(work + (4 << 5)) = b >> 32;
565 spin_unlock_irqrestore(&t2_hae_lock, flags); 565 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
566} 566}
567 567
568__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) 568__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index e38fb95cb335..d0faca1e992d 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -12,18 +12,18 @@
12 * We make no fairness assumptions. They have a cost. 12 * We make no fairness assumptions. They have a cost.
13 */ 13 */
14 14
15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 15#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
16#define __raw_spin_is_locked(x) ((x)->lock != 0) 16#define arch_spin_is_locked(x) ((x)->lock != 0)
17#define __raw_spin_unlock_wait(x) \ 17#define arch_spin_unlock_wait(x) \
18 do { cpu_relax(); } while ((x)->lock) 18 do { cpu_relax(); } while ((x)->lock)
19 19
20static inline void __raw_spin_unlock(raw_spinlock_t * lock) 20static inline void arch_spin_unlock(arch_spinlock_t * lock)
21{ 21{
22 mb(); 22 mb();
23 lock->lock = 0; 23 lock->lock = 0;
24} 24}
25 25
26static inline void __raw_spin_lock(raw_spinlock_t * lock) 26static inline void arch_spin_lock(arch_spinlock_t * lock)
27{ 27{
28 long tmp; 28 long tmp;
29 29
@@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
43 : "m"(lock->lock) : "memory"); 43 : "m"(lock->lock) : "memory");
44} 44}
45 45
46static inline int __raw_spin_trylock(raw_spinlock_t *lock) 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
47{ 47{
48 return !test_and_set_bit(0, &lock->lock); 48 return !test_and_set_bit(0, &lock->lock);
49} 49}
50 50
51/***********************************************************/ 51/***********************************************************/
52 52
53static inline int __raw_read_can_lock(raw_rwlock_t *lock) 53static inline int arch_read_can_lock(arch_rwlock_t *lock)
54{ 54{
55 return (lock->lock & 1) == 0; 55 return (lock->lock & 1) == 0;
56} 56}
57 57
58static inline int __raw_write_can_lock(raw_rwlock_t *lock) 58static inline int arch_write_can_lock(arch_rwlock_t *lock)
59{ 59{
60 return lock->lock == 0; 60 return lock->lock == 0;
61} 61}
62 62
63static inline void __raw_read_lock(raw_rwlock_t *lock) 63static inline void arch_read_lock(arch_rwlock_t *lock)
64{ 64{
65 long regx; 65 long regx;
66 66
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock)
80 : "m" (*lock) : "memory"); 80 : "m" (*lock) : "memory");
81} 81}
82 82
83static inline void __raw_write_lock(raw_rwlock_t *lock) 83static inline void arch_write_lock(arch_rwlock_t *lock)
84{ 84{
85 long regx; 85 long regx;
86 86
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock)
100 : "m" (*lock) : "memory"); 100 : "m" (*lock) : "memory");
101} 101}
102 102
103static inline int __raw_read_trylock(raw_rwlock_t * lock) 103static inline int arch_read_trylock(arch_rwlock_t * lock)
104{ 104{
105 long regx; 105 long regx;
106 int success; 106 int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock)
122 return success; 122 return success;
123} 123}
124 124
125static inline int __raw_write_trylock(raw_rwlock_t * lock) 125static inline int arch_write_trylock(arch_rwlock_t * lock)
126{ 126{
127 long regx; 127 long regx;
128 int success; 128 int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock)
144 return success; 144 return success;
145} 145}
146 146
147static inline void __raw_read_unlock(raw_rwlock_t * lock) 147static inline void arch_read_unlock(arch_rwlock_t * lock)
148{ 148{
149 long regx; 149 long regx;
150 __asm__ __volatile__( 150 __asm__ __volatile__(
@@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock)
160 : "m" (*lock) : "memory"); 160 : "m" (*lock) : "memory");
161} 161}
162 162
163static inline void __raw_write_unlock(raw_rwlock_t * lock) 163static inline void arch_write_unlock(arch_rwlock_t * lock)
164{ 164{
165 mb(); 165 mb();
166 lock->lock = 0; 166 lock->lock = 0;
167} 167}
168 168
169#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 169#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
170#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 170#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
171 171
172#define _raw_spin_relax(lock) cpu_relax() 172#define arch_spin_relax(lock) cpu_relax()
173#define _raw_read_relax(lock) cpu_relax() 173#define arch_read_relax(lock) cpu_relax()
174#define _raw_write_relax(lock) cpu_relax() 174#define arch_write_relax(lock) cpu_relax()
175 175
176#endif /* _ALPHA_SPINLOCK_H */ 176#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 8141eb5ebf0d..54c2afce0a1d 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index d9980d47ab81..e6d90568b65d 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -74,7 +74,7 @@
74# define DBG(args) 74# define DBG(args)
75#endif 75#endif
76 76
77DEFINE_SPINLOCK(t2_hae_lock); 77DEFINE_RAW_SPINLOCK(t2_hae_lock);
78 78
79static volatile unsigned int t2_mcheck_any_expected; 79static volatile unsigned int t2_mcheck_any_expected;
80static volatile unsigned int t2_mcheck_last_taken; 80static volatile unsigned int t2_mcheck_last_taken;
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index c0de072b8305..5f2cf23c4648 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v)
81#endif 81#endif
82 82
83 if (irq < ACTUAL_NR_IRQS) { 83 if (irq < ACTUAL_NR_IRQS) {
84 spin_lock_irqsave(&irq_desc[irq].lock, flags); 84 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
85 action = irq_desc[irq].action; 85 action = irq_desc[irq].action;
86 if (!action) 86 if (!action)
87 goto unlock; 87 goto unlock;
@@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v)
105 105
106 seq_putc(p, '\n'); 106 seq_putc(p, '\n');
107unlock: 107unlock:
108 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 108 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
109 } else if (irq == ACTUAL_NR_IRQS) { 109 } else if (irq == ACTUAL_NR_IRQS) {
110#ifdef CONFIG_SMP 110#ifdef CONFIG_SMP
111 seq_puts(p, "IPI: "); 111 seq_puts(p, "IPI: ");
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index acac5302e4ea..8920b2d6e3b8 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *);
26 */ 26 */
27#define do_bad_IRQ(irq,desc) \ 27#define do_bad_IRQ(irq,desc) \
28do { \ 28do { \
29 spin_lock(&desc->lock); \ 29 raw_spin_lock(&desc->lock); \
30 handle_bad_irq(irq, desc); \ 30 handle_bad_irq(irq, desc); \
31 spin_unlock(&desc->lock); \ 31 raw_spin_unlock(&desc->lock); \
32} while(0) 32} while(0)
33 33
34#endif 34#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c13681ac1ede..c91c64cab922 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -17,13 +17,13 @@
17 * Locked value: 1 17 * Locked value: 1
18 */ 18 */
19 19
20#define __raw_spin_is_locked(x) ((x)->lock != 0) 20#define arch_spin_is_locked(x) ((x)->lock != 0)
21#define __raw_spin_unlock_wait(lock) \ 21#define arch_spin_unlock_wait(lock) \
22 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 22 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
23 23
24#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 24#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
25 25
26static inline void __raw_spin_lock(raw_spinlock_t *lock) 26static inline void arch_spin_lock(arch_spinlock_t *lock)
27{ 27{
28 unsigned long tmp; 28 unsigned long tmp;
29 29
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
43 smp_mb(); 43 smp_mb();
44} 44}
45 45
46static inline int __raw_spin_trylock(raw_spinlock_t *lock) 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
47{ 47{
48 unsigned long tmp; 48 unsigned long tmp;
49 49
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
63 } 63 }
64} 64}
65 65
66static inline void __raw_spin_unlock(raw_spinlock_t *lock) 66static inline void arch_spin_unlock(arch_spinlock_t *lock)
67{ 67{
68 smp_mb(); 68 smp_mb();
69 69
@@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
86 * just write zero since the lock is exclusively held. 86 * just write zero since the lock is exclusively held.
87 */ 87 */
88 88
89static inline void __raw_write_lock(raw_rwlock_t *rw) 89static inline void arch_write_lock(arch_rwlock_t *rw)
90{ 90{
91 unsigned long tmp; 91 unsigned long tmp;
92 92
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
106 smp_mb(); 106 smp_mb();
107} 107}
108 108
109static inline int __raw_write_trylock(raw_rwlock_t *rw) 109static inline int arch_write_trylock(arch_rwlock_t *rw)
110{ 110{
111 unsigned long tmp; 111 unsigned long tmp;
112 112
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
126 } 126 }
127} 127}
128 128
129static inline void __raw_write_unlock(raw_rwlock_t *rw) 129static inline void arch_write_unlock(arch_rwlock_t *rw)
130{ 130{
131 smp_mb(); 131 smp_mb();
132 132
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
142} 142}
143 143
144/* write_can_lock - would write_trylock() succeed? */ 144/* write_can_lock - would write_trylock() succeed? */
145#define __raw_write_can_lock(x) ((x)->lock == 0) 145#define arch_write_can_lock(x) ((x)->lock == 0)
146 146
147/* 147/*
148 * Read locks are a bit more hairy: 148 * Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
156 * currently active. However, we know we won't have any write 156 * currently active. However, we know we won't have any write
157 * locks. 157 * locks.
158 */ 158 */
159static inline void __raw_read_lock(raw_rwlock_t *rw) 159static inline void arch_read_lock(arch_rwlock_t *rw)
160{ 160{
161 unsigned long tmp, tmp2; 161 unsigned long tmp, tmp2;
162 162
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
176 smp_mb(); 176 smp_mb();
177} 177}
178 178
179static inline void __raw_read_unlock(raw_rwlock_t *rw) 179static inline void arch_read_unlock(arch_rwlock_t *rw)
180{ 180{
181 unsigned long tmp, tmp2; 181 unsigned long tmp, tmp2;
182 182
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
198 : "cc"); 198 : "cc");
199} 199}
200 200
201static inline int __raw_read_trylock(raw_rwlock_t *rw) 201static inline int arch_read_trylock(arch_rwlock_t *rw)
202{ 202{
203 unsigned long tmp, tmp2 = 1; 203 unsigned long tmp, tmp2 = 1;
204 204
@@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
215} 215}
216 216
217/* read_can_lock - would read_trylock() succeed? */ 217/* read_can_lock - would read_trylock() succeed? */
218#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) 218#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
219 219
220#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 220#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
221#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 221#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
222 222
223#define _raw_spin_relax(lock) cpu_relax() 223#define arch_spin_relax(lock) cpu_relax()
224#define _raw_read_relax(lock) cpu_relax() 224#define arch_read_relax(lock) cpu_relax()
225#define _raw_write_relax(lock) cpu_relax() 225#define arch_write_relax(lock) cpu_relax()
226 226
227#endif /* __ASM_SPINLOCK_H */ 227#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 43e83f6d2ee5..d14d197ae04a 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index c9a8619f3856..b7cb45bb91e8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
69 } 69 }
70 70
71 if (i < NR_IRQS) { 71 if (i < NR_IRQS) {
72 spin_lock_irqsave(&irq_desc[i].lock, flags); 72 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
73 action = irq_desc[i].action; 73 action = irq_desc[i].action;
74 if (!action) 74 if (!action)
75 goto unlock; 75 goto unlock;
@@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v)
84 84
85 seq_putc(p, '\n'); 85 seq_putc(p, '\n');
86unlock: 86unlock:
87 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 87 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
88 } else if (i == NR_IRQS) { 88 } else if (i == NR_IRQS) {
89#ifdef CONFIG_FIQ 89#ifdef CONFIG_FIQ
90 show_fiq_list(p, v); 90 show_fiq_list(p, v);
@@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
139 } 139 }
140 140
141 desc = irq_desc + irq; 141 desc = irq_desc + irq;
142 spin_lock_irqsave(&desc->lock, flags); 142 raw_spin_lock_irqsave(&desc->lock, flags);
143 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 143 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
144 if (iflags & IRQF_VALID) 144 if (iflags & IRQF_VALID)
145 desc->status &= ~IRQ_NOREQUEST; 145 desc->status &= ~IRQ_NOREQUEST;
@@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
147 desc->status &= ~IRQ_NOPROBE; 147 desc->status &= ~IRQ_NOPROBE;
148 if (!(iflags & IRQF_NOAUTOEN)) 148 if (!(iflags & IRQF_NOAUTOEN))
149 desc->status &= ~IRQ_NOAUTOEN; 149 desc->status &= ~IRQ_NOAUTOEN;
150 spin_unlock_irqrestore(&desc->lock, flags); 150 raw_spin_unlock_irqrestore(&desc->lock, flags);
151} 151}
152 152
153void __init init_IRQ(void) 153void __init init_IRQ(void)
@@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
166{ 166{
167 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); 167 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
168 168
169 spin_lock_irq(&desc->lock); 169 raw_spin_lock_irq(&desc->lock);
170 desc->chip->set_affinity(irq, cpumask_of(cpu)); 170 desc->chip->set_affinity(irq, cpumask_of(cpu));
171 spin_unlock_irq(&desc->lock); 171 raw_spin_unlock_irq(&desc->lock);
172} 172}
173 173
174/* 174/*
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index feb0e54a91de..038f24d47023 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
66 struct irqaction *action; 66 struct irqaction *action;
67 irqreturn_t action_ret; 67 irqreturn_t action_ret;
68 68
69 spin_lock(&desc->lock); 69 raw_spin_lock(&desc->lock);
70 70
71 BUG_ON(desc->status & IRQ_INPROGRESS); 71 BUG_ON(desc->status & IRQ_INPROGRESS);
72 72
@@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
78 goto out_mask; 78 goto out_mask;
79 79
80 desc->status |= IRQ_INPROGRESS; 80 desc->status |= IRQ_INPROGRESS;
81 spin_unlock(&desc->lock); 81 raw_spin_unlock(&desc->lock);
82 82
83 action_ret = handle_IRQ_event(irq, action); 83 action_ret = handle_IRQ_event(irq, action);
84 84
@@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
87 * Maybe this function should go to kernel/irq/chip.c? */ 87 * Maybe this function should go to kernel/irq/chip.c? */
88 note_interrupt(irq, desc, action_ret); 88 note_interrupt(irq, desc, action_ret);
89 89
90 spin_lock(&desc->lock); 90 raw_spin_lock(&desc->lock);
91 desc->status &= ~IRQ_INPROGRESS; 91 desc->status &= ~IRQ_INPROGRESS;
92 92
93 if (desc->status & IRQ_DISABLED) 93 if (desc->status & IRQ_DISABLED)
@@ -97,7 +97,7 @@ out_mask:
97 /* ack unconditionally to unmask lower prio irqs */ 97 /* ack unconditionally to unmask lower prio irqs */
98 desc->chip->ack(irq); 98 desc->chip->ack(irq);
99 99
100 spin_unlock(&desc->lock); 100 raw_spin_unlock(&desc->lock);
101} 101}
102#define handle_irq handle_prio_irq 102#define handle_irq handle_prio_irq
103#endif 103#endif
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
index 09904d22309f..9604f7758f9a 100644
--- a/arch/avr32/kernel/irq.c
+++ b/arch/avr32/kernel/irq.c
@@ -42,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v)
42 } 42 }
43 43
44 if (i < NR_IRQS) { 44 if (i < NR_IRQS) {
45 spin_lock_irqsave(&irq_desc[i].lock, flags); 45 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
46 action = irq_desc[i].action; 46 action = irq_desc[i].action;
47 if (!action) 47 if (!action)
48 goto unlock; 48 goto unlock;
@@ -57,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v)
57 57
58 seq_putc(p, '\n'); 58 seq_putc(p, '\n');
59 unlock: 59 unlock:
60 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 60 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
61 } 61 }
62 62
63 return 0; 63 return 0;
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index b0c7f0ee4b03..1942ccfedbe0 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -17,84 +17,84 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
17asmlinkage void __raw_spin_lock_asm(volatile int *ptr); 17asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
18asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); 18asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
19asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); 19asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
20asmlinkage void __raw_read_lock_asm(volatile int *ptr); 20asmlinkage void arch_read_lock_asm(volatile int *ptr);
21asmlinkage int __raw_read_trylock_asm(volatile int *ptr); 21asmlinkage int arch_read_trylock_asm(volatile int *ptr);
22asmlinkage void __raw_read_unlock_asm(volatile int *ptr); 22asmlinkage void arch_read_unlock_asm(volatile int *ptr);
23asmlinkage void __raw_write_lock_asm(volatile int *ptr); 23asmlinkage void arch_write_lock_asm(volatile int *ptr);
24asmlinkage int __raw_write_trylock_asm(volatile int *ptr); 24asmlinkage int arch_write_trylock_asm(volatile int *ptr);
25asmlinkage void __raw_write_unlock_asm(volatile int *ptr); 25asmlinkage void arch_write_unlock_asm(volatile int *ptr);
26 26
27static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 27static inline int arch_spin_is_locked(arch_spinlock_t *lock)
28{ 28{
29 return __raw_spin_is_locked_asm(&lock->lock); 29 return __raw_spin_is_locked_asm(&lock->lock);
30} 30}
31 31
32static inline void __raw_spin_lock(raw_spinlock_t *lock) 32static inline void arch_spin_lock(arch_spinlock_t *lock)
33{ 33{
34 __raw_spin_lock_asm(&lock->lock); 34 __raw_spin_lock_asm(&lock->lock);
35} 35}
36 36
37#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 37#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
38 38
39static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39static inline int arch_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 return __raw_spin_trylock_asm(&lock->lock); 41 return __raw_spin_trylock_asm(&lock->lock);
42} 42}
43 43
44static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44static inline void arch_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 __raw_spin_unlock_asm(&lock->lock); 46 __raw_spin_unlock_asm(&lock->lock);
47} 47}
48 48
49static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 49static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
50{ 50{
51 while (__raw_spin_is_locked(lock)) 51 while (arch_spin_is_locked(lock))
52 cpu_relax(); 52 cpu_relax();
53} 53}
54 54
55static inline int __raw_read_can_lock(raw_rwlock_t *rw) 55static inline int arch_read_can_lock(arch_rwlock_t *rw)
56{ 56{
57 return __raw_uncached_fetch_asm(&rw->lock) > 0; 57 return __raw_uncached_fetch_asm(&rw->lock) > 0;
58} 58}
59 59
60static inline int __raw_write_can_lock(raw_rwlock_t *rw) 60static inline int arch_write_can_lock(arch_rwlock_t *rw)
61{ 61{
62 return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; 62 return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
63} 63}
64 64
65static inline void __raw_read_lock(raw_rwlock_t *rw) 65static inline void arch_read_lock(arch_rwlock_t *rw)
66{ 66{
67 __raw_read_lock_asm(&rw->lock); 67 arch_read_lock_asm(&rw->lock);
68} 68}
69 69
70static inline int __raw_read_trylock(raw_rwlock_t *rw) 70static inline int arch_read_trylock(arch_rwlock_t *rw)
71{ 71{
72 return __raw_read_trylock_asm(&rw->lock); 72 return arch_read_trylock_asm(&rw->lock);
73} 73}
74 74
75static inline void __raw_read_unlock(raw_rwlock_t *rw) 75static inline void arch_read_unlock(arch_rwlock_t *rw)
76{ 76{
77 __raw_read_unlock_asm(&rw->lock); 77 arch_read_unlock_asm(&rw->lock);
78} 78}
79 79
80static inline void __raw_write_lock(raw_rwlock_t *rw) 80static inline void arch_write_lock(arch_rwlock_t *rw)
81{ 81{
82 __raw_write_lock_asm(&rw->lock); 82 arch_write_lock_asm(&rw->lock);
83} 83}
84 84
85static inline int __raw_write_trylock(raw_rwlock_t *rw) 85static inline int arch_write_trylock(arch_rwlock_t *rw)
86{ 86{
87 return __raw_write_trylock_asm(&rw->lock); 87 return arch_write_trylock_asm(&rw->lock);
88} 88}
89 89
90static inline void __raw_write_unlock(raw_rwlock_t *rw) 90static inline void arch_write_unlock(arch_rwlock_t *rw)
91{ 91{
92 __raw_write_unlock_asm(&rw->lock); 92 arch_write_unlock_asm(&rw->lock);
93} 93}
94 94
95#define _raw_spin_relax(lock) cpu_relax() 95#define arch_spin_relax(lock) cpu_relax()
96#define _raw_read_relax(lock) cpu_relax() 96#define arch_read_relax(lock) cpu_relax()
97#define _raw_write_relax(lock) cpu_relax() 97#define arch_write_relax(lock) cpu_relax()
98 98
99#endif 99#endif
100 100
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
index be75762c0610..1a33608c958b 100644
--- a/arch/blackfin/include/asm/spinlock_types.h
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -15,14 +15,14 @@
15 15
16typedef struct { 16typedef struct {
17 volatile unsigned int lock; 17 volatile unsigned int lock;
18} raw_spinlock_t; 18} arch_spinlock_t;
19 19
20#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 20#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
21 21
22typedef struct { 22typedef struct {
23 volatile unsigned int lock; 23 volatile unsigned int lock;
24} raw_rwlock_t; 24} arch_rwlock_t;
25 25
26#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 26#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
27 27
28#endif 28#endif
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index db9f9c91f11f..64cff54a8a58 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq)
23 23
24static struct irq_desc bad_irq_desc = { 24static struct irq_desc bad_irq_desc = {
25 .handle_irq = handle_bad_irq, 25 .handle_irq = handle_bad_irq,
26 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 26 .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
27}; 27};
28 28
29#ifdef CONFIG_CPUMASK_OFFSTACK 29#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v)
39 unsigned long flags; 39 unsigned long flags;
40 40
41 if (i < NR_IRQS) { 41 if (i < NR_IRQS) {
42 spin_lock_irqsave(&irq_desc[i].lock, flags); 42 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
43 action = irq_desc[i].action; 43 action = irq_desc[i].action;
44 if (!action) 44 if (!action)
45 goto skip; 45 goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
53 53
54 seq_putc(p, '\n'); 54 seq_putc(p, '\n');
55 skip: 55 skip:
56 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 56 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
57 } else if (i == NR_IRQS) { 57 } else if (i == NR_IRQS) {
58 seq_printf(p, "NMI: "); 58 seq_printf(p, "NMI: ");
59 for_each_online_cpu(j) 59 for_each_online_cpu(j)
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 78cb3d38f899..9636bace00e8 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp)
1140 if (fp->ipend & ~0x3F) { 1140 if (fp->ipend & ~0x3F) {
1141 for (i = 0; i < (NR_IRQS - 1); i++) { 1141 for (i = 0; i < (NR_IRQS - 1); i++) {
1142 if (!in_atomic) 1142 if (!in_atomic)
1143 spin_lock_irqsave(&irq_desc[i].lock, flags); 1143 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
1144 1144
1145 action = irq_desc[i].action; 1145 action = irq_desc[i].action;
1146 if (!action) 1146 if (!action)
@@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp)
1155 verbose_printk("\n"); 1155 verbose_printk("\n");
1156unlock: 1156unlock:
1157 if (!in_atomic) 1157 if (!in_atomic)
1158 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 1158 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
1159 } 1159 }
1160 } 1160 }
1161 1161
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index 367a53ea10c5..f171a6600fbc 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
9extern void cris_spin_lock(void *l); 9extern void cris_spin_lock(void *l);
10extern int cris_spin_trylock(void *l); 10extern int cris_spin_trylock(void *l);
11 11
12static inline int __raw_spin_is_locked(raw_spinlock_t *x) 12static inline int arch_spin_is_locked(arch_spinlock_t *x)
13{ 13{
14 return *(volatile signed char *)(&(x)->slock) <= 0; 14 return *(volatile signed char *)(&(x)->slock) <= 0;
15} 15}
16 16
17static inline void __raw_spin_unlock(raw_spinlock_t *lock) 17static inline void arch_spin_unlock(arch_spinlock_t *lock)
18{ 18{
19 __asm__ volatile ("move.d %1,%0" \ 19 __asm__ volatile ("move.d %1,%0" \
20 : "=m" (lock->slock) \ 20 : "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
22 : "memory"); 22 : "memory");
23} 23}
24 24
25static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 25static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
26{ 26{
27 while (__raw_spin_is_locked(lock)) 27 while (arch_spin_is_locked(lock))
28 cpu_relax(); 28 cpu_relax();
29} 29}
30 30
31static inline int __raw_spin_trylock(raw_spinlock_t *lock) 31static inline int arch_spin_trylock(arch_spinlock_t *lock)
32{ 32{
33 return cris_spin_trylock((void *)&lock->slock); 33 return cris_spin_trylock((void *)&lock->slock);
34} 34}
35 35
36static inline void __raw_spin_lock(raw_spinlock_t *lock) 36static inline void arch_spin_lock(arch_spinlock_t *lock)
37{ 37{
38 cris_spin_lock((void *)&lock->slock); 38 cris_spin_lock((void *)&lock->slock);
39} 39}
40 40
41static inline void 41static inline void
42__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 42arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
43{ 43{
44 __raw_spin_lock(lock); 44 arch_spin_lock(lock);
45} 45}
46 46
47/* 47/*
@@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
56 * 56 *
57 */ 57 */
58 58
59static inline int __raw_read_can_lock(raw_rwlock_t *x) 59static inline int arch_read_can_lock(arch_rwlock_t *x)
60{ 60{
61 return (int)(x)->lock > 0; 61 return (int)(x)->lock > 0;
62} 62}
63 63
64static inline int __raw_write_can_lock(raw_rwlock_t *x) 64static inline int arch_write_can_lock(arch_rwlock_t *x)
65{ 65{
66 return (x)->lock == RW_LOCK_BIAS; 66 return (x)->lock == RW_LOCK_BIAS;
67} 67}
68 68
69static inline void __raw_read_lock(raw_rwlock_t *rw) 69static inline void arch_read_lock(arch_rwlock_t *rw)
70{ 70{
71 __raw_spin_lock(&rw->slock); 71 arch_spin_lock(&rw->slock);
72 while (rw->lock == 0); 72 while (rw->lock == 0);
73 rw->lock--; 73 rw->lock--;
74 __raw_spin_unlock(&rw->slock); 74 arch_spin_unlock(&rw->slock);
75} 75}
76 76
77static inline void __raw_write_lock(raw_rwlock_t *rw) 77static inline void arch_write_lock(arch_rwlock_t *rw)
78{ 78{
79 __raw_spin_lock(&rw->slock); 79 arch_spin_lock(&rw->slock);
80 while (rw->lock != RW_LOCK_BIAS); 80 while (rw->lock != RW_LOCK_BIAS);
81 rw->lock = 0; 81 rw->lock = 0;
82 __raw_spin_unlock(&rw->slock); 82 arch_spin_unlock(&rw->slock);
83} 83}
84 84
85static inline void __raw_read_unlock(raw_rwlock_t *rw) 85static inline void arch_read_unlock(arch_rwlock_t *rw)
86{ 86{
87 __raw_spin_lock(&rw->slock); 87 arch_spin_lock(&rw->slock);
88 rw->lock++; 88 rw->lock++;
89 __raw_spin_unlock(&rw->slock); 89 arch_spin_unlock(&rw->slock);
90} 90}
91 91
92static inline void __raw_write_unlock(raw_rwlock_t *rw) 92static inline void arch_write_unlock(arch_rwlock_t *rw)
93{ 93{
94 __raw_spin_lock(&rw->slock); 94 arch_spin_lock(&rw->slock);
95 while (rw->lock != RW_LOCK_BIAS); 95 while (rw->lock != RW_LOCK_BIAS);
96 rw->lock = RW_LOCK_BIAS; 96 rw->lock = RW_LOCK_BIAS;
97 __raw_spin_unlock(&rw->slock); 97 arch_spin_unlock(&rw->slock);
98} 98}
99 99
100static inline int __raw_read_trylock(raw_rwlock_t *rw) 100static inline int arch_read_trylock(arch_rwlock_t *rw)
101{ 101{
102 int ret = 0; 102 int ret = 0;
103 __raw_spin_lock(&rw->slock); 103 arch_spin_lock(&rw->slock);
104 if (rw->lock != 0) { 104 if (rw->lock != 0) {
105 rw->lock--; 105 rw->lock--;
106 ret = 1; 106 ret = 1;
107 } 107 }
108 __raw_spin_unlock(&rw->slock); 108 arch_spin_unlock(&rw->slock);
109 return ret; 109 return ret;
110} 110}
111 111
112static inline int __raw_write_trylock(raw_rwlock_t *rw) 112static inline int arch_write_trylock(arch_rwlock_t *rw)
113{ 113{
114 int ret = 0; 114 int ret = 0;
115 __raw_spin_lock(&rw->slock); 115 arch_spin_lock(&rw->slock);
116 if (rw->lock == RW_LOCK_BIAS) { 116 if (rw->lock == RW_LOCK_BIAS) {
117 rw->lock = 0; 117 rw->lock = 0;
118 ret = 1; 118 ret = 1;
119 } 119 }
120 __raw_spin_unlock(&rw->slock); 120 arch_spin_unlock(&rw->slock);
121 return 1; 121 return 1;
122} 122}
123 123
124#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 124#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
125#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 125#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
126 126
127#define _raw_spin_relax(lock) cpu_relax() 127#define arch_spin_relax(lock) cpu_relax()
128#define _raw_read_relax(lock) cpu_relax() 128#define arch_read_relax(lock) cpu_relax()
129#define _raw_write_relax(lock) cpu_relax() 129#define arch_write_relax(lock) cpu_relax()
130 130
131#endif /* __ASM_ARCH_SPINLOCK_H */ 131#endif /* __ASM_ARCH_SPINLOCK_H */
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 0ca7d9892cc6..b5ce0724a88f 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v)
52 } 52 }
53 53
54 if (i < NR_IRQS) { 54 if (i < NR_IRQS) {
55 spin_lock_irqsave(&irq_desc[i].lock, flags); 55 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
56 action = irq_desc[i].action; 56 action = irq_desc[i].action;
57 if (!action) 57 if (!action)
58 goto skip; 58 goto skip;
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
71 71
72 seq_putc(p, '\n'); 72 seq_putc(p, '\n');
73skip: 73skip:
74 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 74 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
75 } 75 }
76 return 0; 76 return 0;
77} 77}
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index af3e824b91b3..62d1aba615dc 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
69 } 69 }
70 70
71 if (i < NR_IRQS) { 71 if (i < NR_IRQS) {
72 spin_lock_irqsave(&irq_desc[i].lock, flags); 72 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
73 action = irq_desc[i].action; 73 action = irq_desc[i].action;
74 if (action) { 74 if (action) {
75 seq_printf(p, "%3d: ", i); 75 seq_printf(p, "%3d: ", i);
@@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
85 seq_putc(p, '\n'); 85 seq_putc(p, '\n');
86 } 86 }
87 87
88 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 88 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
89 } else if (i == NR_IRQS) { 89 } else if (i == NR_IRQS) {
90 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); 90 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
91 } 91 }
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index 5c913d472119..c25dc2c2b1da 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v)
186 seq_puts(p, " CPU0"); 186 seq_puts(p, " CPU0");
187 187
188 if (i < NR_IRQS) { 188 if (i < NR_IRQS) {
189 spin_lock_irqsave(&irq_desc[i].lock, flags); 189 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
190 action = irq_desc[i].action; 190 action = irq_desc[i].action;
191 if (!action) 191 if (!action)
192 goto unlock; 192 goto unlock;
@@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v)
200 seq_printf(p, ", %s", action->name); 200 seq_printf(p, ", %s", action->name);
201 seq_putc(p, '\n'); 201 seq_putc(p, '\n');
202unlock: 202unlock:
203 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 203 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
204 } 204 }
205 return 0; 205 return 0;
206} 206}
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
127 * @addr: Address to start counting from 127 * @addr: Address to start counting from
128 * 128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store 129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also __raw_spin_unlock(). 130 * with release semantics. See also arch_spin_unlock().
131 */ 131 */
132static __inline__ void 132static __inline__ void
133__clear_bit_unlock(int nr, void *addr) 133__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 239ecdc9516d..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20#define __raw_spin_lock_init(x) ((x)->lock = 0) 20#define arch_spin_lock_init(x) ((x)->lock = 0)
21 21
22/* 22/*
23 * Ticket locks are conceptually two parts, one indicating the current head of 23 * Ticket locks are conceptually two parts, one indicating the current head of
@@ -38,7 +38,7 @@
38#define TICKET_BITS 15 38#define TICKET_BITS 15
39#define TICKET_MASK ((1 << TICKET_BITS) - 1) 39#define TICKET_MASK ((1 << TICKET_BITS) - 1)
40 40
41static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 41static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
42{ 42{
43 int *p = (int *)&lock->lock, ticket, serve; 43 int *p = (int *)&lock->lock, ticket, serve;
44 44
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
58 } 58 }
59} 59}
60 60
61static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 61static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
62{ 62{
63 int tmp = ACCESS_ONCE(lock->lock); 63 int tmp = ACCESS_ONCE(lock->lock);
64 64
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
67 return 0; 67 return 0;
68} 68}
69 69
70static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 70static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
71{ 71{
72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; 72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
73 73
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
75 ACCESS_ONCE(*p) = (tmp + 2) & ~1; 75 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
76} 76}
77 77
78static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) 78static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
79{ 79{
80 int *p = (int *)&lock->lock, ticket; 80 int *p = (int *)&lock->lock, ticket;
81 81
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
89 } 89 }
90} 90}
91 91
92static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 92static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
93{ 93{
94 long tmp = ACCESS_ONCE(lock->lock); 94 long tmp = ACCESS_ONCE(lock->lock);
95 95
96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); 96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
97} 97}
98 98
99static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 99static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
100{ 100{
101 long tmp = ACCESS_ONCE(lock->lock); 101 long tmp = ACCESS_ONCE(lock->lock);
102 102
103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
104} 104}
105 105
106static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 106static inline int arch_spin_is_locked(arch_spinlock_t *lock)
107{ 107{
108 return __ticket_spin_is_locked(lock); 108 return __ticket_spin_is_locked(lock);
109} 109}
110 110
111static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 111static inline int arch_spin_is_contended(arch_spinlock_t *lock)
112{ 112{
113 return __ticket_spin_is_contended(lock); 113 return __ticket_spin_is_contended(lock);
114} 114}
115#define __raw_spin_is_contended __raw_spin_is_contended 115#define arch_spin_is_contended arch_spin_is_contended
116 116
117static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 117static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
118{ 118{
119 __ticket_spin_lock(lock); 119 __ticket_spin_lock(lock);
120} 120}
121 121
122static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 122static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
123{ 123{
124 return __ticket_spin_trylock(lock); 124 return __ticket_spin_trylock(lock);
125} 125}
126 126
127static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 127static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
128{ 128{
129 __ticket_spin_unlock(lock); 129 __ticket_spin_unlock(lock);
130} 130}
131 131
132static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 132static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 __raw_spin_lock(lock); 135 arch_spin_lock(lock);
136} 136}
137 137
138static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
139{ 139{
140 __ticket_spin_unlock_wait(lock); 140 __ticket_spin_unlock_wait(lock);
141} 141}
142 142
143#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 143#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
144#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 144#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
145 145
146#ifdef ASM_SUPPORTED 146#ifdef ASM_SUPPORTED
147 147
148static __always_inline void 148static __always_inline void
149__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) 149arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
150{ 150{
151 __asm__ __volatile__ ( 151 __asm__ __volatile__ (
152 "tbit.nz p6, p0 = %1,%2\n" 152 "tbit.nz p6, p0 = %1,%2\n"
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
169 : "p6", "p7", "r2", "memory"); 169 : "p6", "p7", "r2", "memory");
170} 170}
171 171
172#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
173 173
174#else /* !ASM_SUPPORTED */ 174#else /* !ASM_SUPPORTED */
175 175
176#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
177 177
178#define __raw_read_lock(rw) \ 178#define arch_read_lock(rw) \
179do { \ 179do { \
180 raw_rwlock_t *__read_lock_ptr = (rw); \ 180 arch_rwlock_t *__read_lock_ptr = (rw); \
181 \ 181 \
182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -188,16 +188,16 @@ do { \
188 188
189#endif /* !ASM_SUPPORTED */ 189#endif /* !ASM_SUPPORTED */
190 190
191#define __raw_read_unlock(rw) \ 191#define arch_read_unlock(rw) \
192do { \ 192do { \
193 raw_rwlock_t *__read_lock_ptr = (rw); \ 193 arch_rwlock_t *__read_lock_ptr = (rw); \
194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
195} while (0) 195} while (0)
196 196
197#ifdef ASM_SUPPORTED 197#ifdef ASM_SUPPORTED
198 198
199static __always_inline void 199static __always_inline void
200__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) 200arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
201{ 201{
202 __asm__ __volatile__ ( 202 __asm__ __volatile__ (
203 "tbit.nz p6, p0 = %1, %2\n" 203 "tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
222} 222}
223 223
224#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
225 225
226#define __raw_write_trylock(rw) \ 226#define arch_write_trylock(rw) \
227({ \ 227({ \
228 register long result; \ 228 register long result; \
229 \ 229 \
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
235 (result == 0); \ 235 (result == 0); \
236}) 236})
237 237
238static inline void __raw_write_unlock(raw_rwlock_t *x) 238static inline void arch_write_unlock(arch_rwlock_t *x)
239{ 239{
240 u8 *y = (u8 *)x; 240 u8 *y = (u8 *)x;
241 barrier(); 241 barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
244 244
245#else /* !ASM_SUPPORTED */ 245#else /* !ASM_SUPPORTED */
246 246
247#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247#define arch_write_lock_flags(l, flags) arch_write_lock(l)
248 248
249#define __raw_write_lock(l) \ 249#define arch_write_lock(l) \
250({ \ 250({ \
251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
257 } while (ia64_val); \ 257 } while (ia64_val); \
258}) 258})
259 259
260#define __raw_write_trylock(rw) \ 260#define arch_write_trylock(rw) \
261({ \ 261({ \
262 __u64 ia64_val; \ 262 __u64 ia64_val; \
263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
265 (ia64_val == 0); \ 265 (ia64_val == 0); \
266}) 266})
267 267
268static inline void __raw_write_unlock(raw_rwlock_t *x) 268static inline void arch_write_unlock(arch_rwlock_t *x)
269{ 269{
270 barrier(); 270 barrier();
271 x->write_lock = 0; 271 x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
273 273
274#endif /* !ASM_SUPPORTED */ 274#endif /* !ASM_SUPPORTED */
275 275
276static inline int __raw_read_trylock(raw_rwlock_t *x) 276static inline int arch_read_trylock(arch_rwlock_t *x)
277{ 277{
278 union { 278 union {
279 raw_rwlock_t lock; 279 arch_rwlock_t lock;
280 __u32 word; 280 __u32 word;
281 } old, new; 281 } old, new;
282 old.lock = new.lock = *x; 282 old.lock = new.lock = *x;
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
286} 286}
287 287
288#define _raw_spin_relax(lock) cpu_relax() 288#define arch_spin_relax(lock) cpu_relax()
289#define _raw_read_relax(lock) cpu_relax() 289#define arch_read_relax(lock) cpu_relax()
290#define _raw_write_relax(lock) cpu_relax() 290#define arch_write_relax(lock) cpu_relax()
291 291
292#endif /* _ASM_IA64_SPINLOCK_H */ 292#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4a..e2b42a52a6d3 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int read_counter : 31; 15 volatile unsigned int read_counter : 31;
16 volatile unsigned int write_lock : 1; 16 volatile unsigned int write_lock : 1;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
20 20
21#endif 21#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index dab4d393908c..95ac77aeae9b 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
793 goto unlock_iosapic_lock; 793 goto unlock_iosapic_lock;
794 } 794 }
795 795
796 spin_lock(&irq_desc[irq].lock); 796 raw_spin_lock(&irq_desc[irq].lock);
797 dest = get_target_cpu(gsi, irq); 797 dest = get_target_cpu(gsi, irq);
798 dmode = choose_dmode(); 798 dmode = choose_dmode();
799 err = register_intr(gsi, irq, dmode, polarity, trigger); 799 err = register_intr(gsi, irq, dmode, polarity, trigger);
800 if (err < 0) { 800 if (err < 0) {
801 spin_unlock(&irq_desc[irq].lock); 801 raw_spin_unlock(&irq_desc[irq].lock);
802 irq = err; 802 irq = err;
803 goto unlock_iosapic_lock; 803 goto unlock_iosapic_lock;
804 } 804 }
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
818 cpu_logical_id(dest), dest, irq_to_vector(irq)); 818 cpu_logical_id(dest), dest, irq_to_vector(irq));
819 819
820 spin_unlock(&irq_desc[irq].lock); 820 raw_spin_unlock(&irq_desc[irq].lock);
821 unlock_iosapic_lock: 821 unlock_iosapic_lock:
822 spin_unlock_irqrestore(&iosapic_lock, flags); 822 spin_unlock_irqrestore(&iosapic_lock, flags);
823 return irq; 823 return irq;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7c..94ee9d067cbd 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
71 } 71 }
72 72
73 if (i < NR_IRQS) { 73 if (i < NR_IRQS) {
74 spin_lock_irqsave(&irq_desc[i].lock, flags); 74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
75 action = irq_desc[i].action; 75 action = irq_desc[i].action;
76 if (!action) 76 if (!action)
77 goto skip; 77 goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
91 91
92 seq_putc(p, '\n'); 92 seq_putc(p, '\n');
93skip: 93skip:
94 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
95 } else if (i == NR_IRQS) 95 } else if (i == NR_IRQS)
96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97 return 0; 97 return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a1..70e4bad23432 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
345 345
346 desc = irq_desc + irq; 346 desc = irq_desc + irq;
347 cfg = irq_cfg + irq; 347 cfg = irq_cfg + irq;
348 spin_lock(&desc->lock); 348 raw_spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count) 349 if (!cfg->move_cleanup_count)
350 goto unlock; 350 goto unlock;
351 351
@@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
358 spin_unlock_irqrestore(&vector_lock, flags); 358 spin_unlock_irqrestore(&vector_lock, flags);
359 cfg->move_cleanup_count--; 359 cfg->move_cleanup_count--;
360 unlock: 360 unlock:
361 spin_unlock(&desc->lock); 361 raw_spin_unlock(&desc->lock);
362 } 362 }
363 return IRQ_HANDLED; 363 return IRQ_HANDLED;
364} 364}
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index dded923883b2..179a06489b10 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -24,19 +24,19 @@
24 * We make no fairness assumptions. They have a cost. 24 * We make no fairness assumptions. They have a cost.
25 */ 25 */
26 26
27#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 27#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
28#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 28#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
29#define __raw_spin_unlock_wait(x) \ 29#define arch_spin_unlock_wait(x) \
30 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 30 do { cpu_relax(); } while (arch_spin_is_locked(x))
31 31
32/** 32/**
33 * __raw_spin_trylock - Try spin lock and return a result 33 * arch_spin_trylock - Try spin lock and return a result
34 * @lock: Pointer to the lock variable 34 * @lock: Pointer to the lock variable
35 * 35 *
36 * __raw_spin_trylock() tries to get the lock and returns a result. 36 * arch_spin_trylock() tries to get the lock and returns a result.
37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure). 37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
38 */ 38 */
39static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39static inline int arch_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 int oldval; 41 int oldval;
42 unsigned long tmp1, tmp2; 42 unsigned long tmp1, tmp2;
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
50 * } 50 * }
51 */ 51 */
52 __asm__ __volatile__ ( 52 __asm__ __volatile__ (
53 "# __raw_spin_trylock \n\t" 53 "# arch_spin_trylock \n\t"
54 "ldi %1, #0; \n\t" 54 "ldi %1, #0; \n\t"
55 "mvfc %2, psw; \n\t" 55 "mvfc %2, psw; \n\t"
56 "clrpsw #0x40 -> nop; \n\t" 56 "clrpsw #0x40 -> nop; \n\t"
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
69 return (oldval > 0); 69 return (oldval > 0);
70} 70}
71 71
72static inline void __raw_spin_lock(raw_spinlock_t *lock) 72static inline void arch_spin_lock(arch_spinlock_t *lock)
73{ 73{
74 unsigned long tmp0, tmp1; 74 unsigned long tmp0, tmp1;
75 75
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
84 * } 84 * }
85 */ 85 */
86 __asm__ __volatile__ ( 86 __asm__ __volatile__ (
87 "# __raw_spin_lock \n\t" 87 "# arch_spin_lock \n\t"
88 ".fillinsn \n" 88 ".fillinsn \n"
89 "1: \n\t" 89 "1: \n\t"
90 "mvfc %1, psw; \n\t" 90 "mvfc %1, psw; \n\t"
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
111 ); 111 );
112} 112}
113 113
114static inline void __raw_spin_unlock(raw_spinlock_t *lock) 114static inline void arch_spin_unlock(arch_spinlock_t *lock)
115{ 115{
116 mb(); 116 mb();
117 lock->slock = 1; 117 lock->slock = 1;
@@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
140 * read_can_lock - would read_trylock() succeed? 140 * read_can_lock - would read_trylock() succeed?
141 * @lock: the rwlock in question. 141 * @lock: the rwlock in question.
142 */ 142 */
143#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 143#define arch_read_can_lock(x) ((int)(x)->lock > 0)
144 144
145/** 145/**
146 * write_can_lock - would write_trylock() succeed? 146 * write_can_lock - would write_trylock() succeed?
147 * @lock: the rwlock in question. 147 * @lock: the rwlock in question.
148 */ 148 */
149#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 149#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
150 150
151static inline void __raw_read_lock(raw_rwlock_t *rw) 151static inline void arch_read_lock(arch_rwlock_t *rw)
152{ 152{
153 unsigned long tmp0, tmp1; 153 unsigned long tmp0, tmp1;
154 154
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
199 ); 199 );
200} 200}
201 201
202static inline void __raw_write_lock(raw_rwlock_t *rw) 202static inline void arch_write_lock(arch_rwlock_t *rw)
203{ 203{
204 unsigned long tmp0, tmp1, tmp2; 204 unsigned long tmp0, tmp1, tmp2;
205 205
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
252 ); 252 );
253} 253}
254 254
255static inline void __raw_read_unlock(raw_rwlock_t *rw) 255static inline void arch_read_unlock(arch_rwlock_t *rw)
256{ 256{
257 unsigned long tmp0, tmp1; 257 unsigned long tmp0, tmp1;
258 258
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
274 ); 274 );
275} 275}
276 276
277static inline void __raw_write_unlock(raw_rwlock_t *rw) 277static inline void arch_write_unlock(arch_rwlock_t *rw)
278{ 278{
279 unsigned long tmp0, tmp1, tmp2; 279 unsigned long tmp0, tmp1, tmp2;
280 280
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
298 ); 298 );
299} 299}
300 300
301static inline int __raw_read_trylock(raw_rwlock_t *lock) 301static inline int arch_read_trylock(arch_rwlock_t *lock)
302{ 302{
303 atomic_t *count = (atomic_t*)lock; 303 atomic_t *count = (atomic_t*)lock;
304 if (atomic_dec_return(count) >= 0) 304 if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
307 return 0; 307 return 0;
308} 308}
309 309
310static inline int __raw_write_trylock(raw_rwlock_t *lock) 310static inline int arch_write_trylock(arch_rwlock_t *lock)
311{ 311{
312 atomic_t *count = (atomic_t *)lock; 312 atomic_t *count = (atomic_t *)lock;
313 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 313 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
316 return 0; 316 return 0;
317} 317}
318 318
319#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 319#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
320#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 320#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
321 321
322#define _raw_spin_relax(lock) cpu_relax() 322#define arch_spin_relax(lock) cpu_relax()
323#define _raw_read_relax(lock) cpu_relax() 323#define arch_read_relax(lock) cpu_relax()
324#define _raw_write_relax(lock) cpu_relax() 324#define arch_write_relax(lock) cpu_relax()
325 325
326#endif /* _ASM_M32R_SPINLOCK_H */ 326#endif /* _ASM_M32R_SPINLOCK_H */
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
index 83f52105c0e4..92e27672661f 100644
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
@@ -7,17 +7,17 @@
7 7
8typedef struct { 8typedef struct {
9 volatile int slock; 9 volatile int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
13 13
14typedef struct { 14typedef struct {
15 volatile int lock; 15 volatile int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define RW_LOCK_BIAS 0x01000000 18#define RW_LOCK_BIAS 0x01000000
19#define RW_LOCK_BIAS_STR "0x01000000" 19#define RW_LOCK_BIAS_STR "0x01000000"
20 20
21#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 21#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
22 22
23#endif /* _ASM_M32R_SPINLOCK_TYPES_H */ 23#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 8dfd31e87c4c..3c71f776872c 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v)
40 } 40 }
41 41
42 if (i < NR_IRQS) { 42 if (i < NR_IRQS) {
43 spin_lock_irqsave(&irq_desc[i].lock, flags); 43 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
44 action = irq_desc[i].action; 44 action = irq_desc[i].action;
45 if (!action) 45 if (!action)
46 goto skip; 46 goto skip;
@@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v)
59 59
60 seq_putc(p, '\n'); 60 seq_putc(p, '\n');
61skip: 61skip:
62 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 62 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
63 } 63 }
64 return 0; 64 return 0;
65} 65}
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 7d5ddd62d4d2..0f06034d1fe0 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v)
68 } 68 }
69 69
70 if (i < nr_irq) { 70 if (i < nr_irq) {
71 spin_lock_irqsave(&irq_desc[i].lock, flags); 71 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
72 action = irq_desc[i].action; 72 action = irq_desc[i].action;
73 if (!action) 73 if (!action)
74 goto skip; 74 goto skip;
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v)
89 89
90 seq_putc(p, '\n'); 90 seq_putc(p, '\n');
91skip: 91skip:
92 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 92 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
93 } 93 }
94 return 0; 94 return 0;
95} 95}
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5b60a09a0f08..21ef9efbde43 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -34,33 +34,33 @@
34 * becomes equal to the the initial value of the tail. 34 * becomes equal to the the initial value of the tail.
35 */ 35 */
36 36
37static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 37static inline int arch_spin_is_locked(arch_spinlock_t *lock)
38{ 38{
39 unsigned int counters = ACCESS_ONCE(lock->lock); 39 unsigned int counters = ACCESS_ONCE(lock->lock);
40 40
41 return ((counters >> 14) ^ counters) & 0x1fff; 41 return ((counters >> 14) ^ counters) & 0x1fff;
42} 42}
43 43
44#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 44#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
45#define __raw_spin_unlock_wait(x) \ 45#define arch_spin_unlock_wait(x) \
46 while (__raw_spin_is_locked(x)) { cpu_relax(); } 46 while (arch_spin_is_locked(x)) { cpu_relax(); }
47 47
48static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 48static inline int arch_spin_is_contended(arch_spinlock_t *lock)
49{ 49{
50 unsigned int counters = ACCESS_ONCE(lock->lock); 50 unsigned int counters = ACCESS_ONCE(lock->lock);
51 51
52 return (((counters >> 14) - counters) & 0x1fff) > 1; 52 return (((counters >> 14) - counters) & 0x1fff) > 1;
53} 53}
54#define __raw_spin_is_contended __raw_spin_is_contended 54#define arch_spin_is_contended arch_spin_is_contended
55 55
56static inline void __raw_spin_lock(raw_spinlock_t *lock) 56static inline void arch_spin_lock(arch_spinlock_t *lock)
57{ 57{
58 int my_ticket; 58 int my_ticket;
59 int tmp; 59 int tmp;
60 60
61 if (R10000_LLSC_WAR) { 61 if (R10000_LLSC_WAR) {
62 __asm__ __volatile__ ( 62 __asm__ __volatile__ (
63 " .set push # __raw_spin_lock \n" 63 " .set push # arch_spin_lock \n"
64 " .set noreorder \n" 64 " .set noreorder \n"
65 " \n" 65 " \n"
66 "1: ll %[ticket], %[ticket_ptr] \n" 66 "1: ll %[ticket], %[ticket_ptr] \n"
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
94 [my_ticket] "=&r" (my_ticket)); 94 [my_ticket] "=&r" (my_ticket));
95 } else { 95 } else {
96 __asm__ __volatile__ ( 96 __asm__ __volatile__ (
97 " .set push # __raw_spin_lock \n" 97 " .set push # arch_spin_lock \n"
98 " .set noreorder \n" 98 " .set noreorder \n"
99 " \n" 99 " \n"
100 " ll %[ticket], %[ticket_ptr] \n" 100 " ll %[ticket], %[ticket_ptr] \n"
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
134 smp_llsc_mb(); 134 smp_llsc_mb();
135} 135}
136 136
137static inline void __raw_spin_unlock(raw_spinlock_t *lock) 137static inline void arch_spin_unlock(arch_spinlock_t *lock)
138{ 138{
139 int tmp; 139 int tmp;
140 140
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
142 142
143 if (R10000_LLSC_WAR) { 143 if (R10000_LLSC_WAR) {
144 __asm__ __volatile__ ( 144 __asm__ __volatile__ (
145 " # __raw_spin_unlock \n" 145 " # arch_spin_unlock \n"
146 "1: ll %[ticket], %[ticket_ptr] \n" 146 "1: ll %[ticket], %[ticket_ptr] \n"
147 " addiu %[ticket], %[ticket], 1 \n" 147 " addiu %[ticket], %[ticket], 1 \n"
148 " ori %[ticket], %[ticket], 0x2000 \n" 148 " ori %[ticket], %[ticket], 0x2000 \n"
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
153 [ticket] "=&r" (tmp)); 153 [ticket] "=&r" (tmp));
154 } else { 154 } else {
155 __asm__ __volatile__ ( 155 __asm__ __volatile__ (
156 " .set push # __raw_spin_unlock \n" 156 " .set push # arch_spin_unlock \n"
157 " .set noreorder \n" 157 " .set noreorder \n"
158 " \n" 158 " \n"
159 " ll %[ticket], %[ticket_ptr] \n" 159 " ll %[ticket], %[ticket_ptr] \n"
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
174 } 174 }
175} 175}
176 176
177static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) 177static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
178{ 178{
179 int tmp, tmp2, tmp3; 179 int tmp, tmp2, tmp3;
180 180
181 if (R10000_LLSC_WAR) { 181 if (R10000_LLSC_WAR) {
182 __asm__ __volatile__ ( 182 __asm__ __volatile__ (
183 " .set push # __raw_spin_trylock \n" 183 " .set push # arch_spin_trylock \n"
184 " .set noreorder \n" 184 " .set noreorder \n"
185 " \n" 185 " \n"
186 "1: ll %[ticket], %[ticket_ptr] \n" 186 "1: ll %[ticket], %[ticket_ptr] \n"
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
204 [now_serving] "=&r" (tmp3)); 204 [now_serving] "=&r" (tmp3));
205 } else { 205 } else {
206 __asm__ __volatile__ ( 206 __asm__ __volatile__ (
207 " .set push # __raw_spin_trylock \n" 207 " .set push # arch_spin_trylock \n"
208 " .set noreorder \n" 208 " .set noreorder \n"
209 " \n" 209 " \n"
210 " ll %[ticket], %[ticket_ptr] \n" 210 " ll %[ticket], %[ticket_ptr] \n"
@@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
248 * read_can_lock - would read_trylock() succeed? 248 * read_can_lock - would read_trylock() succeed?
249 * @lock: the rwlock in question. 249 * @lock: the rwlock in question.
250 */ 250 */
251#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 251#define arch_read_can_lock(rw) ((rw)->lock >= 0)
252 252
253/* 253/*
254 * write_can_lock - would write_trylock() succeed? 254 * write_can_lock - would write_trylock() succeed?
255 * @lock: the rwlock in question. 255 * @lock: the rwlock in question.
256 */ 256 */
257#define __raw_write_can_lock(rw) (!(rw)->lock) 257#define arch_write_can_lock(rw) (!(rw)->lock)
258 258
259static inline void __raw_read_lock(raw_rwlock_t *rw) 259static inline void arch_read_lock(arch_rwlock_t *rw)
260{ 260{
261 unsigned int tmp; 261 unsigned int tmp;
262 262
263 if (R10000_LLSC_WAR) { 263 if (R10000_LLSC_WAR) {
264 __asm__ __volatile__( 264 __asm__ __volatile__(
265 " .set noreorder # __raw_read_lock \n" 265 " .set noreorder # arch_read_lock \n"
266 "1: ll %1, %2 \n" 266 "1: ll %1, %2 \n"
267 " bltz %1, 1b \n" 267 " bltz %1, 1b \n"
268 " addu %1, 1 \n" 268 " addu %1, 1 \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
275 : "memory"); 275 : "memory");
276 } else { 276 } else {
277 __asm__ __volatile__( 277 __asm__ __volatile__(
278 " .set noreorder # __raw_read_lock \n" 278 " .set noreorder # arch_read_lock \n"
279 "1: ll %1, %2 \n" 279 "1: ll %1, %2 \n"
280 " bltz %1, 2f \n" 280 " bltz %1, 2f \n"
281 " addu %1, 1 \n" 281 " addu %1, 1 \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
301/* Note the use of sub, not subu which will make the kernel die with an 301/* Note the use of sub, not subu which will make the kernel die with an
302 overflow exception if we ever try to unlock an rwlock that is already 302 overflow exception if we ever try to unlock an rwlock that is already
303 unlocked or is being held by a writer. */ 303 unlocked or is being held by a writer. */
304static inline void __raw_read_unlock(raw_rwlock_t *rw) 304static inline void arch_read_unlock(arch_rwlock_t *rw)
305{ 305{
306 unsigned int tmp; 306 unsigned int tmp;
307 307
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
309 309
310 if (R10000_LLSC_WAR) { 310 if (R10000_LLSC_WAR) {
311 __asm__ __volatile__( 311 __asm__ __volatile__(
312 "1: ll %1, %2 # __raw_read_unlock \n" 312 "1: ll %1, %2 # arch_read_unlock \n"
313 " sub %1, 1 \n" 313 " sub %1, 1 \n"
314 " sc %1, %0 \n" 314 " sc %1, %0 \n"
315 " beqzl %1, 1b \n" 315 " beqzl %1, 1b \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
318 : "memory"); 318 : "memory");
319 } else { 319 } else {
320 __asm__ __volatile__( 320 __asm__ __volatile__(
321 " .set noreorder # __raw_read_unlock \n" 321 " .set noreorder # arch_read_unlock \n"
322 "1: ll %1, %2 \n" 322 "1: ll %1, %2 \n"
323 " sub %1, 1 \n" 323 " sub %1, 1 \n"
324 " sc %1, %0 \n" 324 " sc %1, %0 \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
335 } 335 }
336} 336}
337 337
338static inline void __raw_write_lock(raw_rwlock_t *rw) 338static inline void arch_write_lock(arch_rwlock_t *rw)
339{ 339{
340 unsigned int tmp; 340 unsigned int tmp;
341 341
342 if (R10000_LLSC_WAR) { 342 if (R10000_LLSC_WAR) {
343 __asm__ __volatile__( 343 __asm__ __volatile__(
344 " .set noreorder # __raw_write_lock \n" 344 " .set noreorder # arch_write_lock \n"
345 "1: ll %1, %2 \n" 345 "1: ll %1, %2 \n"
346 " bnez %1, 1b \n" 346 " bnez %1, 1b \n"
347 " lui %1, 0x8000 \n" 347 " lui %1, 0x8000 \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
354 : "memory"); 354 : "memory");
355 } else { 355 } else {
356 __asm__ __volatile__( 356 __asm__ __volatile__(
357 " .set noreorder # __raw_write_lock \n" 357 " .set noreorder # arch_write_lock \n"
358 "1: ll %1, %2 \n" 358 "1: ll %1, %2 \n"
359 " bnez %1, 2f \n" 359 " bnez %1, 2f \n"
360 " lui %1, 0x8000 \n" 360 " lui %1, 0x8000 \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
377 smp_llsc_mb(); 377 smp_llsc_mb();
378} 378}
379 379
380static inline void __raw_write_unlock(raw_rwlock_t *rw) 380static inline void arch_write_unlock(arch_rwlock_t *rw)
381{ 381{
382 smp_mb(); 382 smp_mb();
383 383
384 __asm__ __volatile__( 384 __asm__ __volatile__(
385 " # __raw_write_unlock \n" 385 " # arch_write_unlock \n"
386 " sw $0, %0 \n" 386 " sw $0, %0 \n"
387 : "=m" (rw->lock) 387 : "=m" (rw->lock)
388 : "m" (rw->lock) 388 : "m" (rw->lock)
389 : "memory"); 389 : "memory");
390} 390}
391 391
392static inline int __raw_read_trylock(raw_rwlock_t *rw) 392static inline int arch_read_trylock(arch_rwlock_t *rw)
393{ 393{
394 unsigned int tmp; 394 unsigned int tmp;
395 int ret; 395 int ret;
396 396
397 if (R10000_LLSC_WAR) { 397 if (R10000_LLSC_WAR) {
398 __asm__ __volatile__( 398 __asm__ __volatile__(
399 " .set noreorder # __raw_read_trylock \n" 399 " .set noreorder # arch_read_trylock \n"
400 " li %2, 0 \n" 400 " li %2, 0 \n"
401 "1: ll %1, %3 \n" 401 "1: ll %1, %3 \n"
402 " bltz %1, 2f \n" 402 " bltz %1, 2f \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
413 : "memory"); 413 : "memory");
414 } else { 414 } else {
415 __asm__ __volatile__( 415 __asm__ __volatile__(
416 " .set noreorder # __raw_read_trylock \n" 416 " .set noreorder # arch_read_trylock \n"
417 " li %2, 0 \n" 417 " li %2, 0 \n"
418 "1: ll %1, %3 \n" 418 "1: ll %1, %3 \n"
419 " bltz %1, 2f \n" 419 " bltz %1, 2f \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
433 return ret; 433 return ret;
434} 434}
435 435
436static inline int __raw_write_trylock(raw_rwlock_t *rw) 436static inline int arch_write_trylock(arch_rwlock_t *rw)
437{ 437{
438 unsigned int tmp; 438 unsigned int tmp;
439 int ret; 439 int ret;
440 440
441 if (R10000_LLSC_WAR) { 441 if (R10000_LLSC_WAR) {
442 __asm__ __volatile__( 442 __asm__ __volatile__(
443 " .set noreorder # __raw_write_trylock \n" 443 " .set noreorder # arch_write_trylock \n"
444 " li %2, 0 \n" 444 " li %2, 0 \n"
445 "1: ll %1, %3 \n" 445 "1: ll %1, %3 \n"
446 " bnez %1, 2f \n" 446 " bnez %1, 2f \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
457 : "memory"); 457 : "memory");
458 } else { 458 } else {
459 __asm__ __volatile__( 459 __asm__ __volatile__(
460 " .set noreorder # __raw_write_trylock \n" 460 " .set noreorder # arch_write_trylock \n"
461 " li %2, 0 \n" 461 " li %2, 0 \n"
462 "1: ll %1, %3 \n" 462 "1: ll %1, %3 \n"
463 " bnez %1, 2f \n" 463 " bnez %1, 2f \n"
@@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
480 return ret; 480 return ret;
481} 481}
482 482
483#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 483#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
484#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 484#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
485 485
486#define _raw_spin_relax(lock) cpu_relax() 486#define arch_spin_relax(lock) cpu_relax()
487#define _raw_read_relax(lock) cpu_relax() 487#define arch_read_relax(lock) cpu_relax()
488#define _raw_write_relax(lock) cpu_relax() 488#define arch_write_relax(lock) cpu_relax()
489 489
490#endif /* _ASM_SPINLOCK_H */ 490#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index adeedaa116c1..ee197c2f9c98 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -12,14 +12,14 @@ typedef struct {
12 * bits 15..28: ticket 12 * bits 15..28: ticket
13 */ 13 */
14 unsigned int lock; 14 unsigned int lock;
15} raw_spinlock_t; 15} arch_spinlock_t;
16 16
17#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 17#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
18 18
19typedef struct { 19typedef struct {
20 volatile unsigned int lock; 20 volatile unsigned int lock;
21} raw_rwlock_t; 21} arch_rwlock_t;
22 22
23#define __RAW_RW_LOCK_UNLOCKED { 0 } 23#define __ARCH_RW_LOCK_UNLOCKED { 0 }
24 24
25#endif 25#endif
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7b845ba9dff4..8b0b4181219f 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v)
99 } 99 }
100 100
101 if (i < NR_IRQS) { 101 if (i < NR_IRQS) {
102 spin_lock_irqsave(&irq_desc[i].lock, flags); 102 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
103 action = irq_desc[i].action; 103 action = irq_desc[i].action;
104 if (!action) 104 if (!action)
105 goto skip; 105 goto skip;
@@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v)
118 118
119 seq_putc(p, '\n'); 119 seq_putc(p, '\n');
120skip: 120skip:
121 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 121 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 } else if (i == NR_IRQS) { 122 } else if (i == NR_IRQS) {
123 seq_putc(p, '\n'); 123 seq_putc(p, '\n');
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 6d39e222b170..6153b6a05ccf 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask)
159 159
160 if (current_cpu_type() == CPU_VR4111 || 160 if (current_cpu_type() == CPU_VR4111 ||
161 current_cpu_type() == CPU_VR4121) { 161 current_cpu_type() == CPU_VR4121) {
162 spin_lock_irqsave(&desc->lock, flags); 162 raw_spin_lock_irqsave(&desc->lock, flags);
163 icu1_set(MPIUINTREG, mask); 163 icu1_set(MPIUINTREG, mask);
164 spin_unlock_irqrestore(&desc->lock, flags); 164 raw_spin_unlock_irqrestore(&desc->lock, flags);
165 } 165 }
166} 166}
167 167
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask)
174 174
175 if (current_cpu_type() == CPU_VR4111 || 175 if (current_cpu_type() == CPU_VR4111 ||
176 current_cpu_type() == CPU_VR4121) { 176 current_cpu_type() == CPU_VR4121) {
177 spin_lock_irqsave(&desc->lock, flags); 177 raw_spin_lock_irqsave(&desc->lock, flags);
178 icu1_clear(MPIUINTREG, mask); 178 icu1_clear(MPIUINTREG, mask);
179 spin_unlock_irqrestore(&desc->lock, flags); 179 raw_spin_unlock_irqrestore(&desc->lock, flags);
180 } 180 }
181} 181}
182 182
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask)
189 189
190 if (current_cpu_type() == CPU_VR4111 || 190 if (current_cpu_type() == CPU_VR4111 ||
191 current_cpu_type() == CPU_VR4121) { 191 current_cpu_type() == CPU_VR4121) {
192 spin_lock_irqsave(&desc->lock, flags); 192 raw_spin_lock_irqsave(&desc->lock, flags);
193 icu1_set(MAIUINTREG, mask); 193 icu1_set(MAIUINTREG, mask);
194 spin_unlock_irqrestore(&desc->lock, flags); 194 raw_spin_unlock_irqrestore(&desc->lock, flags);
195 } 195 }
196} 196}
197 197
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask)
204 204
205 if (current_cpu_type() == CPU_VR4111 || 205 if (current_cpu_type() == CPU_VR4111 ||
206 current_cpu_type() == CPU_VR4121) { 206 current_cpu_type() == CPU_VR4121) {
207 spin_lock_irqsave(&desc->lock, flags); 207 raw_spin_lock_irqsave(&desc->lock, flags);
208 icu1_clear(MAIUINTREG, mask); 208 icu1_clear(MAIUINTREG, mask);
209 spin_unlock_irqrestore(&desc->lock, flags); 209 raw_spin_unlock_irqrestore(&desc->lock, flags);
210 } 210 }
211} 211}
212 212
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask)
219 219
220 if (current_cpu_type() == CPU_VR4111 || 220 if (current_cpu_type() == CPU_VR4111 ||
221 current_cpu_type() == CPU_VR4121) { 221 current_cpu_type() == CPU_VR4121) {
222 spin_lock_irqsave(&desc->lock, flags); 222 raw_spin_lock_irqsave(&desc->lock, flags);
223 icu1_set(MKIUINTREG, mask); 223 icu1_set(MKIUINTREG, mask);
224 spin_unlock_irqrestore(&desc->lock, flags); 224 raw_spin_unlock_irqrestore(&desc->lock, flags);
225 } 225 }
226} 226}
227 227
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask)
234 234
235 if (current_cpu_type() == CPU_VR4111 || 235 if (current_cpu_type() == CPU_VR4111 ||
236 current_cpu_type() == CPU_VR4121) { 236 current_cpu_type() == CPU_VR4121) {
237 spin_lock_irqsave(&desc->lock, flags); 237 raw_spin_lock_irqsave(&desc->lock, flags);
238 icu1_clear(MKIUINTREG, mask); 238 icu1_clear(MKIUINTREG, mask);
239 spin_unlock_irqrestore(&desc->lock, flags); 239 raw_spin_unlock_irqrestore(&desc->lock, flags);
240 } 240 }
241} 241}
242 242
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask)
247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ; 247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
248 unsigned long flags; 248 unsigned long flags;
249 249
250 spin_lock_irqsave(&desc->lock, flags); 250 raw_spin_lock_irqsave(&desc->lock, flags);
251 icu1_set(MMACINTREG, mask); 251 icu1_set(MMACINTREG, mask);
252 spin_unlock_irqrestore(&desc->lock, flags); 252 raw_spin_unlock_irqrestore(&desc->lock, flags);
253} 253}
254 254
255EXPORT_SYMBOL(vr41xx_enable_macint); 255EXPORT_SYMBOL(vr41xx_enable_macint);
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask)
259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ; 259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
260 unsigned long flags; 260 unsigned long flags;
261 261
262 spin_lock_irqsave(&desc->lock, flags); 262 raw_spin_lock_irqsave(&desc->lock, flags);
263 icu1_clear(MMACINTREG, mask); 263 icu1_clear(MMACINTREG, mask);
264 spin_unlock_irqrestore(&desc->lock, flags); 264 raw_spin_unlock_irqrestore(&desc->lock, flags);
265} 265}
266 266
267EXPORT_SYMBOL(vr41xx_disable_macint); 267EXPORT_SYMBOL(vr41xx_disable_macint);
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask)
271 struct irq_desc *desc = irq_desc + DSIU_IRQ; 271 struct irq_desc *desc = irq_desc + DSIU_IRQ;
272 unsigned long flags; 272 unsigned long flags;
273 273
274 spin_lock_irqsave(&desc->lock, flags); 274 raw_spin_lock_irqsave(&desc->lock, flags);
275 icu1_set(MDSIUINTREG, mask); 275 icu1_set(MDSIUINTREG, mask);
276 spin_unlock_irqrestore(&desc->lock, flags); 276 raw_spin_unlock_irqrestore(&desc->lock, flags);
277} 277}
278 278
279EXPORT_SYMBOL(vr41xx_enable_dsiuint); 279EXPORT_SYMBOL(vr41xx_enable_dsiuint);
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask)
283 struct irq_desc *desc = irq_desc + DSIU_IRQ; 283 struct irq_desc *desc = irq_desc + DSIU_IRQ;
284 unsigned long flags; 284 unsigned long flags;
285 285
286 spin_lock_irqsave(&desc->lock, flags); 286 raw_spin_lock_irqsave(&desc->lock, flags);
287 icu1_clear(MDSIUINTREG, mask); 287 icu1_clear(MDSIUINTREG, mask);
288 spin_unlock_irqrestore(&desc->lock, flags); 288 raw_spin_unlock_irqrestore(&desc->lock, flags);
289} 289}
290 290
291EXPORT_SYMBOL(vr41xx_disable_dsiuint); 291EXPORT_SYMBOL(vr41xx_disable_dsiuint);
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask)
295 struct irq_desc *desc = irq_desc + FIR_IRQ; 295 struct irq_desc *desc = irq_desc + FIR_IRQ;
296 unsigned long flags; 296 unsigned long flags;
297 297
298 spin_lock_irqsave(&desc->lock, flags); 298 raw_spin_lock_irqsave(&desc->lock, flags);
299 icu2_set(MFIRINTREG, mask); 299 icu2_set(MFIRINTREG, mask);
300 spin_unlock_irqrestore(&desc->lock, flags); 300 raw_spin_unlock_irqrestore(&desc->lock, flags);
301} 301}
302 302
303EXPORT_SYMBOL(vr41xx_enable_firint); 303EXPORT_SYMBOL(vr41xx_enable_firint);
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask)
307 struct irq_desc *desc = irq_desc + FIR_IRQ; 307 struct irq_desc *desc = irq_desc + FIR_IRQ;
308 unsigned long flags; 308 unsigned long flags;
309 309
310 spin_lock_irqsave(&desc->lock, flags); 310 raw_spin_lock_irqsave(&desc->lock, flags);
311 icu2_clear(MFIRINTREG, mask); 311 icu2_clear(MFIRINTREG, mask);
312 spin_unlock_irqrestore(&desc->lock, flags); 312 raw_spin_unlock_irqrestore(&desc->lock, flags);
313} 313}
314 314
315EXPORT_SYMBOL(vr41xx_disable_firint); 315EXPORT_SYMBOL(vr41xx_disable_firint);
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void)
322 if (current_cpu_type() == CPU_VR4122 || 322 if (current_cpu_type() == CPU_VR4122 ||
323 current_cpu_type() == CPU_VR4131 || 323 current_cpu_type() == CPU_VR4131 ||
324 current_cpu_type() == CPU_VR4133) { 324 current_cpu_type() == CPU_VR4133) {
325 spin_lock_irqsave(&desc->lock, flags); 325 raw_spin_lock_irqsave(&desc->lock, flags);
326 icu2_write(MPCIINTREG, PCIINT0); 326 icu2_write(MPCIINTREG, PCIINT0);
327 spin_unlock_irqrestore(&desc->lock, flags); 327 raw_spin_unlock_irqrestore(&desc->lock, flags);
328 } 328 }
329} 329}
330 330
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void)
338 if (current_cpu_type() == CPU_VR4122 || 338 if (current_cpu_type() == CPU_VR4122 ||
339 current_cpu_type() == CPU_VR4131 || 339 current_cpu_type() == CPU_VR4131 ||
340 current_cpu_type() == CPU_VR4133) { 340 current_cpu_type() == CPU_VR4133) {
341 spin_lock_irqsave(&desc->lock, flags); 341 raw_spin_lock_irqsave(&desc->lock, flags);
342 icu2_write(MPCIINTREG, 0); 342 icu2_write(MPCIINTREG, 0);
343 spin_unlock_irqrestore(&desc->lock, flags); 343 raw_spin_unlock_irqrestore(&desc->lock, flags);
344 } 344 }
345} 345}
346 346
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void)
354 if (current_cpu_type() == CPU_VR4122 || 354 if (current_cpu_type() == CPU_VR4122 ||
355 current_cpu_type() == CPU_VR4131 || 355 current_cpu_type() == CPU_VR4131 ||
356 current_cpu_type() == CPU_VR4133) { 356 current_cpu_type() == CPU_VR4133) {
357 spin_lock_irqsave(&desc->lock, flags); 357 raw_spin_lock_irqsave(&desc->lock, flags);
358 icu2_write(MSCUINTREG, SCUINT0); 358 icu2_write(MSCUINTREG, SCUINT0);
359 spin_unlock_irqrestore(&desc->lock, flags); 359 raw_spin_unlock_irqrestore(&desc->lock, flags);
360 } 360 }
361} 361}
362 362
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void)
370 if (current_cpu_type() == CPU_VR4122 || 370 if (current_cpu_type() == CPU_VR4122 ||
371 current_cpu_type() == CPU_VR4131 || 371 current_cpu_type() == CPU_VR4131 ||
372 current_cpu_type() == CPU_VR4133) { 372 current_cpu_type() == CPU_VR4133) {
373 spin_lock_irqsave(&desc->lock, flags); 373 raw_spin_lock_irqsave(&desc->lock, flags);
374 icu2_write(MSCUINTREG, 0); 374 icu2_write(MSCUINTREG, 0);
375 spin_unlock_irqrestore(&desc->lock, flags); 375 raw_spin_unlock_irqrestore(&desc->lock, flags);
376 } 376 }
377} 377}
378 378
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask)
386 if (current_cpu_type() == CPU_VR4122 || 386 if (current_cpu_type() == CPU_VR4122 ||
387 current_cpu_type() == CPU_VR4131 || 387 current_cpu_type() == CPU_VR4131 ||
388 current_cpu_type() == CPU_VR4133) { 388 current_cpu_type() == CPU_VR4133) {
389 spin_lock_irqsave(&desc->lock, flags); 389 raw_spin_lock_irqsave(&desc->lock, flags);
390 icu2_set(MCSIINTREG, mask); 390 icu2_set(MCSIINTREG, mask);
391 spin_unlock_irqrestore(&desc->lock, flags); 391 raw_spin_unlock_irqrestore(&desc->lock, flags);
392 } 392 }
393} 393}
394 394
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask)
402 if (current_cpu_type() == CPU_VR4122 || 402 if (current_cpu_type() == CPU_VR4122 ||
403 current_cpu_type() == CPU_VR4131 || 403 current_cpu_type() == CPU_VR4131 ||
404 current_cpu_type() == CPU_VR4133) { 404 current_cpu_type() == CPU_VR4133) {
405 spin_lock_irqsave(&desc->lock, flags); 405 raw_spin_lock_irqsave(&desc->lock, flags);
406 icu2_clear(MCSIINTREG, mask); 406 icu2_clear(MCSIINTREG, mask);
407 spin_unlock_irqrestore(&desc->lock, flags); 407 raw_spin_unlock_irqrestore(&desc->lock, flags);
408 } 408 }
409} 409}
410 410
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void)
418 if (current_cpu_type() == CPU_VR4122 || 418 if (current_cpu_type() == CPU_VR4122 ||
419 current_cpu_type() == CPU_VR4131 || 419 current_cpu_type() == CPU_VR4131 ||
420 current_cpu_type() == CPU_VR4133) { 420 current_cpu_type() == CPU_VR4133) {
421 spin_lock_irqsave(&desc->lock, flags); 421 raw_spin_lock_irqsave(&desc->lock, flags);
422 icu2_write(MBCUINTREG, BCUINTR); 422 icu2_write(MBCUINTREG, BCUINTR);
423 spin_unlock_irqrestore(&desc->lock, flags); 423 raw_spin_unlock_irqrestore(&desc->lock, flags);
424 } 424 }
425} 425}
426 426
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void)
434 if (current_cpu_type() == CPU_VR4122 || 434 if (current_cpu_type() == CPU_VR4122 ||
435 current_cpu_type() == CPU_VR4131 || 435 current_cpu_type() == CPU_VR4131 ||
436 current_cpu_type() == CPU_VR4133) { 436 current_cpu_type() == CPU_VR4133) {
437 spin_lock_irqsave(&desc->lock, flags); 437 raw_spin_lock_irqsave(&desc->lock, flags);
438 icu2_write(MBCUINTREG, 0); 438 icu2_write(MBCUINTREG, 0);
439 spin_unlock_irqrestore(&desc->lock, flags); 439 raw_spin_unlock_irqrestore(&desc->lock, flags);
440 } 440 }
441} 441}
442 442
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
486 486
487 pin = SYSINT1_IRQ_TO_PIN(irq); 487 pin = SYSINT1_IRQ_TO_PIN(irq);
488 488
489 spin_lock_irq(&desc->lock); 489 raw_spin_lock_irq(&desc->lock);
490 490
491 intassign0 = icu1_read(INTASSIGN0); 491 intassign0 = icu1_read(INTASSIGN0);
492 intassign1 = icu1_read(INTASSIGN1); 492 intassign1 = icu1_read(INTASSIGN1);
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
525 intassign1 |= (uint16_t)assign << 9; 525 intassign1 |= (uint16_t)assign << 9;
526 break; 526 break;
527 default: 527 default:
528 spin_unlock_irq(&desc->lock); 528 raw_spin_unlock_irq(&desc->lock);
529 return -EINVAL; 529 return -EINVAL;
530 } 530 }
531 531
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
533 icu1_write(INTASSIGN0, intassign0); 533 icu1_write(INTASSIGN0, intassign0);
534 icu1_write(INTASSIGN1, intassign1); 534 icu1_write(INTASSIGN1, intassign1);
535 535
536 spin_unlock_irq(&desc->lock); 536 raw_spin_unlock_irq(&desc->lock);
537 537
538 return 0; 538 return 0;
539} 539}
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
546 546
547 pin = SYSINT2_IRQ_TO_PIN(irq); 547 pin = SYSINT2_IRQ_TO_PIN(irq);
548 548
549 spin_lock_irq(&desc->lock); 549 raw_spin_lock_irq(&desc->lock);
550 550
551 intassign2 = icu1_read(INTASSIGN2); 551 intassign2 = icu1_read(INTASSIGN2);
552 intassign3 = icu1_read(INTASSIGN3); 552 intassign3 = icu1_read(INTASSIGN3);
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
593 intassign3 |= (uint16_t)assign << 12; 593 intassign3 |= (uint16_t)assign << 12;
594 break; 594 break;
595 default: 595 default:
596 spin_unlock_irq(&desc->lock); 596 raw_spin_unlock_irq(&desc->lock);
597 return -EINVAL; 597 return -EINVAL;
598 } 598 }
599 599
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
601 icu1_write(INTASSIGN2, intassign2); 601 icu1_write(INTASSIGN2, intassign2);
602 icu1_write(INTASSIGN3, intassign3); 602 icu1_write(INTASSIGN3, intassign3);
603 603
604 spin_unlock_irq(&desc->lock); 604 raw_spin_unlock_irq(&desc->lock);
605 605
606 return 0; 606 return 0;
607} 607}
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 4c3c58ef5cda..e2d5ed891f37 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v)
215 215
216 /* display information rows, one per active CPU */ 216 /* display information rows, one per active CPU */
217 case 1 ... NR_IRQS - 1: 217 case 1 ... NR_IRQS - 1:
218 spin_lock_irqsave(&irq_desc[i].lock, flags); 218 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
219 219
220 action = irq_desc[i].action; 220 action = irq_desc[i].action;
221 if (action) { 221 if (action) {
@@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v)
235 seq_putc(p, '\n'); 235 seq_putc(p, '\n');
236 } 236 }
237 237
238 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 238 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
239 break; 239 break;
240 240
241 /* polish off with NMI and error counters */ 241 /* polish off with NMI and error counters */
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 8bc9e96699b2..716634d1f546 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -27,19 +27,19 @@
27# define ATOMIC_HASH_SIZE 4 27# define ATOMIC_HASH_SIZE 4
28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29 29
30extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 30extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31 31
32/* Can't use raw_spin_lock_irq because of #include problems, so 32/* Can't use raw_spin_lock_irq because of #include problems, so
33 * this is the substitute */ 33 * this is the substitute */
34#define _atomic_spin_lock_irqsave(l,f) do { \ 34#define _atomic_spin_lock_irqsave(l,f) do { \
35 raw_spinlock_t *s = ATOMIC_HASH(l); \ 35 arch_spinlock_t *s = ATOMIC_HASH(l); \
36 local_irq_save(f); \ 36 local_irq_save(f); \
37 __raw_spin_lock(s); \ 37 arch_spin_lock(s); \
38} while(0) 38} while(0)
39 39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \ 40#define _atomic_spin_unlock_irqrestore(l,f) do { \
41 raw_spinlock_t *s = ATOMIC_HASH(l); \ 41 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 __raw_spin_unlock(s); \ 42 arch_spin_unlock(s); \
43 local_irq_restore(f); \ 43 local_irq_restore(f); \
44} while(0) 44} while(0)
45 45
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fae03e136fa8..74036f436a3b 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/spinlock_types.h> 6#include <asm/spinlock_types.h>
7 7
8static inline int __raw_spin_is_locked(raw_spinlock_t *x) 8static inline int arch_spin_is_locked(arch_spinlock_t *x)
9{ 9{
10 volatile unsigned int *a = __ldcw_align(x); 10 volatile unsigned int *a = __ldcw_align(x);
11 return *a == 0; 11 return *a == 0;
12} 12}
13 13
14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) 14#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
15#define __raw_spin_unlock_wait(x) \ 15#define arch_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (arch_spin_is_locked(x))
17 17
18static inline void __raw_spin_lock_flags(raw_spinlock_t *x, 18static inline void arch_spin_lock_flags(arch_spinlock_t *x,
19 unsigned long flags) 19 unsigned long flags)
20{ 20{
21 volatile unsigned int *a; 21 volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
33 mb(); 33 mb();
34} 34}
35 35
36static inline void __raw_spin_unlock(raw_spinlock_t *x) 36static inline void arch_spin_unlock(arch_spinlock_t *x)
37{ 37{
38 volatile unsigned int *a; 38 volatile unsigned int *a;
39 mb(); 39 mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
42 mb(); 42 mb();
43} 43}
44 44
45static inline int __raw_spin_trylock(raw_spinlock_t *x) 45static inline int arch_spin_trylock(arch_spinlock_t *x)
46{ 46{
47 volatile unsigned int *a; 47 volatile unsigned int *a;
48 int ret; 48 int ret;
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
69 69
70/* Note that we have to ensure interrupts are disabled in case we're 70/* Note that we have to ensure interrupts are disabled in case we're
71 * interrupted by some other code that wants to grab the same read lock */ 71 * interrupted by some other code that wants to grab the same read lock */
72static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 72static __inline__ void arch_read_lock(arch_rwlock_t *rw)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 local_irq_save(flags); 75 local_irq_save(flags);
76 __raw_spin_lock_flags(&rw->lock, flags); 76 arch_spin_lock_flags(&rw->lock, flags);
77 rw->counter++; 77 rw->counter++;
78 __raw_spin_unlock(&rw->lock); 78 arch_spin_unlock(&rw->lock);
79 local_irq_restore(flags); 79 local_irq_restore(flags);
80} 80}
81 81
82/* Note that we have to ensure interrupts are disabled in case we're 82/* Note that we have to ensure interrupts are disabled in case we're
83 * interrupted by some other code that wants to grab the same read lock */ 83 * interrupted by some other code that wants to grab the same read lock */
84static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 84static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
85{ 85{
86 unsigned long flags; 86 unsigned long flags;
87 local_irq_save(flags); 87 local_irq_save(flags);
88 __raw_spin_lock_flags(&rw->lock, flags); 88 arch_spin_lock_flags(&rw->lock, flags);
89 rw->counter--; 89 rw->counter--;
90 __raw_spin_unlock(&rw->lock); 90 arch_spin_unlock(&rw->lock);
91 local_irq_restore(flags); 91 local_irq_restore(flags);
92} 92}
93 93
94/* Note that we have to ensure interrupts are disabled in case we're 94/* Note that we have to ensure interrupts are disabled in case we're
95 * interrupted by some other code that wants to grab the same read lock */ 95 * interrupted by some other code that wants to grab the same read lock */
96static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) 96static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
97{ 97{
98 unsigned long flags; 98 unsigned long flags;
99 retry: 99 retry:
100 local_irq_save(flags); 100 local_irq_save(flags);
101 if (__raw_spin_trylock(&rw->lock)) { 101 if (arch_spin_trylock(&rw->lock)) {
102 rw->counter++; 102 rw->counter++;
103 __raw_spin_unlock(&rw->lock); 103 arch_spin_unlock(&rw->lock);
104 local_irq_restore(flags); 104 local_irq_restore(flags);
105 return 1; 105 return 1;
106 } 106 }
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
111 return 0; 111 return 0;
112 112
113 /* Wait until we have a realistic chance at the lock */ 113 /* Wait until we have a realistic chance at the lock */
114 while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) 114 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
115 cpu_relax(); 115 cpu_relax();
116 116
117 goto retry; 117 goto retry;
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
119 119
120/* Note that we have to ensure interrupts are disabled in case we're 120/* Note that we have to ensure interrupts are disabled in case we're
121 * interrupted by some other code that wants to read_trylock() this lock */ 121 * interrupted by some other code that wants to read_trylock() this lock */
122static __inline__ void __raw_write_lock(raw_rwlock_t *rw) 122static __inline__ void arch_write_lock(arch_rwlock_t *rw)
123{ 123{
124 unsigned long flags; 124 unsigned long flags;
125retry: 125retry:
126 local_irq_save(flags); 126 local_irq_save(flags);
127 __raw_spin_lock_flags(&rw->lock, flags); 127 arch_spin_lock_flags(&rw->lock, flags);
128 128
129 if (rw->counter != 0) { 129 if (rw->counter != 0) {
130 __raw_spin_unlock(&rw->lock); 130 arch_spin_unlock(&rw->lock);
131 local_irq_restore(flags); 131 local_irq_restore(flags);
132 132
133 while (rw->counter != 0) 133 while (rw->counter != 0)
@@ -141,27 +141,27 @@ retry:
141 local_irq_restore(flags); 141 local_irq_restore(flags);
142} 142}
143 143
144static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 144static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
145{ 145{
146 rw->counter = 0; 146 rw->counter = 0;
147 __raw_spin_unlock(&rw->lock); 147 arch_spin_unlock(&rw->lock);
148} 148}
149 149
150/* Note that we have to ensure interrupts are disabled in case we're 150/* Note that we have to ensure interrupts are disabled in case we're
151 * interrupted by some other code that wants to read_trylock() this lock */ 151 * interrupted by some other code that wants to read_trylock() this lock */
152static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) 152static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
153{ 153{
154 unsigned long flags; 154 unsigned long flags;
155 int result = 0; 155 int result = 0;
156 156
157 local_irq_save(flags); 157 local_irq_save(flags);
158 if (__raw_spin_trylock(&rw->lock)) { 158 if (arch_spin_trylock(&rw->lock)) {
159 if (rw->counter == 0) { 159 if (rw->counter == 0) {
160 rw->counter = -1; 160 rw->counter = -1;
161 result = 1; 161 result = 1;
162 } else { 162 } else {
163 /* Read-locked. Oh well. */ 163 /* Read-locked. Oh well. */
164 __raw_spin_unlock(&rw->lock); 164 arch_spin_unlock(&rw->lock);
165 } 165 }
166 } 166 }
167 local_irq_restore(flags); 167 local_irq_restore(flags);
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
173 * read_can_lock - would read_trylock() succeed? 173 * read_can_lock - would read_trylock() succeed?
174 * @lock: the rwlock in question. 174 * @lock: the rwlock in question.
175 */ 175 */
176static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) 176static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
177{ 177{
178 return rw->counter >= 0; 178 return rw->counter >= 0;
179} 179}
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
182 * write_can_lock - would write_trylock() succeed? 182 * write_can_lock - would write_trylock() succeed?
183 * @lock: the rwlock in question. 183 * @lock: the rwlock in question.
184 */ 184 */
185static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) 185static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
186{ 186{
187 return !rw->counter; 187 return !rw->counter;
188} 188}
189 189
190#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 190#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
191#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 191#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
192 192
193#define _raw_spin_relax(lock) cpu_relax() 193#define arch_spin_relax(lock) cpu_relax()
194#define _raw_read_relax(lock) cpu_relax() 194#define arch_read_relax(lock) cpu_relax()
195#define _raw_write_relax(lock) cpu_relax() 195#define arch_write_relax(lock) cpu_relax()
196 196
197#endif /* __ASM_SPINLOCK_H */ 197#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 3f72f47cf4b2..8c373aa28a86 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -4,18 +4,18 @@
4typedef struct { 4typedef struct {
5#ifdef CONFIG_PA20 5#ifdef CONFIG_PA20
6 volatile unsigned int slock; 6 volatile unsigned int slock;
7# define __RAW_SPIN_LOCK_UNLOCKED { 1 } 7# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
8#else 8#else
9 volatile unsigned int lock[4]; 9 volatile unsigned int lock[4];
10# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } 10# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
11#endif 11#endif
12} raw_spinlock_t; 12} arch_spinlock_t;
13 13
14typedef struct { 14typedef struct {
15 raw_spinlock_t lock; 15 arch_spinlock_t lock;
16 volatile int counter; 16 volatile int counter;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
20 20
21#endif 21#endif
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 2e7610cb33d5..f47465e8d040 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v)
180 if (i < NR_IRQS) { 180 if (i < NR_IRQS) {
181 struct irqaction *action; 181 struct irqaction *action;
182 182
183 spin_lock_irqsave(&irq_desc[i].lock, flags); 183 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
184 action = irq_desc[i].action; 184 action = irq_desc[i].action;
185 if (!action) 185 if (!action)
186 goto skip; 186 goto skip;
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
224 224
225 seq_putc(p, '\n'); 225 seq_putc(p, '\n');
226 skip: 226 skip:
227 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 227 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
228 } 228 }
229 229
230 return 0; 230 return 0;
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index e3eb739fab19..353963d42059 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -12,8 +12,8 @@
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13 13
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { 15arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
16 [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED 16 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
17}; 17};
18#endif 18#endif
19 19
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 168fce726201..20de73c36682 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
58 unsigned long entry; /* physical address pointer */ 58 unsigned long entry; /* physical address pointer */
59 unsigned long base; /* physical address pointer */ 59 unsigned long base; /* physical address pointer */
60 unsigned long size; 60 unsigned long size;
61 raw_spinlock_t lock; 61 arch_spinlock_t lock;
62 struct rtas_args args; 62 struct rtas_args args;
63 struct device_node *dev; /* virtual address pointer */ 63 struct device_node *dev; /* virtual address pointer */
64}; 64};
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 198266cf9e2d..764094cff681 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
28#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
29#include <asm/synch.h> 29#include <asm/synch.h>
30 30
31#define __raw_spin_is_locked(x) ((x)->slock != 0) 31#define arch_spin_is_locked(x) ((x)->slock != 0)
32 32
33#ifdef CONFIG_PPC64 33#ifdef CONFIG_PPC64
34/* use 0x800000yy when locked, where yy == CPU number */ 34/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) 57static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
73 return tmp; 73 return tmp;
74} 74}
75 75
76static inline int __raw_spin_trylock(raw_spinlock_t *lock) 76static inline int arch_spin_trylock(arch_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return arch_spin_trylock(lock) == 0; 79 return __arch_spin_trylock(lock) == 0;
80} 80}
81 81
82/* 82/*
@@ -96,19 +96,19 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
96#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 96#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
97/* We only yield to the hypervisor if we are in shared processor mode */ 97/* We only yield to the hypervisor if we are in shared processor mode */
98#define SHARED_PROCESSOR (get_lppaca()->shared_proc) 98#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
99extern void __spin_yield(raw_spinlock_t *lock); 99extern void __spin_yield(arch_spinlock_t *lock);
100extern void __rw_yield(raw_rwlock_t *lock); 100extern void __rw_yield(arch_rwlock_t *lock);
101#else /* SPLPAR || ISERIES */ 101#else /* SPLPAR || ISERIES */
102#define __spin_yield(x) barrier() 102#define __spin_yield(x) barrier()
103#define __rw_yield(x) barrier() 103#define __rw_yield(x) barrier()
104#define SHARED_PROCESSOR 0 104#define SHARED_PROCESSOR 0
105#endif 105#endif
106 106
107static inline void __raw_spin_lock(raw_spinlock_t *lock) 107static inline void arch_spin_lock(arch_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
111 if (likely(arch_spin_trylock(lock) == 0)) 111 if (likely(__arch_spin_trylock(lock) == 0))
112 break; 112 break;
113 do { 113 do {
114 HMT_low(); 114 HMT_low();
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
120} 120}
121 121
122static inline 122static inline
123void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 123void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
124{ 124{
125 unsigned long flags_dis; 125 unsigned long flags_dis;
126 126
127 CLEAR_IO_SYNC; 127 CLEAR_IO_SYNC;
128 while (1) { 128 while (1) {
129 if (likely(arch_spin_trylock(lock) == 0)) 129 if (likely(__arch_spin_trylock(lock) == 0))
130 break; 130 break;
131 local_save_flags(flags_dis); 131 local_save_flags(flags_dis);
132 local_irq_restore(flags); 132 local_irq_restore(flags);
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
140 } 140 }
141} 141}
142 142
143static inline void __raw_spin_unlock(raw_spinlock_t *lock) 143static inline void arch_spin_unlock(arch_spinlock_t *lock)
144{ 144{
145 SYNC_IO; 145 SYNC_IO;
146 __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 __asm__ __volatile__("# arch_spin_unlock\n\t"
147 LWSYNC_ON_SMP: : :"memory"); 147 LWSYNC_ON_SMP: : :"memory");
148 lock->slock = 0; 148 lock->slock = 0;
149} 149}
150 150
151#ifdef CONFIG_PPC64 151#ifdef CONFIG_PPC64
152extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); 152extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
153#else 153#else
154#define __raw_spin_unlock_wait(lock) \ 154#define arch_spin_unlock_wait(lock) \
155 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 155 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
156#endif 156#endif
157 157
158/* 158/*
@@ -166,8 +166,8 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
166 * read-locks. 166 * read-locks.
167 */ 167 */
168 168
169#define __raw_read_can_lock(rw) ((rw)->lock >= 0) 169#define arch_read_can_lock(rw) ((rw)->lock >= 0)
170#define __raw_write_can_lock(rw) (!(rw)->lock) 170#define arch_write_can_lock(rw) (!(rw)->lock)
171 171
172#ifdef CONFIG_PPC64 172#ifdef CONFIG_PPC64
173#define __DO_SIGN_EXTEND "extsw %0,%0\n" 173#define __DO_SIGN_EXTEND "extsw %0,%0\n"
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
181 * This returns the old value in the lock + 1, 181 * This returns the old value in the lock + 1,
182 * so we got a read lock if the return value is > 0. 182 * so we got a read lock if the return value is > 0.
183 */ 183 */
184static inline long arch_read_trylock(raw_rwlock_t *rw) 184static inline long __arch_read_trylock(arch_rwlock_t *rw)
185{ 185{
186 long tmp; 186 long tmp;
187 187
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw)
205 * This returns the old value in the lock, 205 * This returns the old value in the lock,
206 * so we got the write lock if the return value is 0. 206 * so we got the write lock if the return value is 0.
207 */ 207 */
208static inline long arch_write_trylock(raw_rwlock_t *rw) 208static inline long __arch_write_trylock(arch_rwlock_t *rw)
209{ 209{
210 long tmp, token; 210 long tmp, token;
211 211
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(raw_rwlock_t *rw)
225 return tmp; 225 return tmp;
226} 226}
227 227
228static inline void __raw_read_lock(raw_rwlock_t *rw) 228static inline void arch_read_lock(arch_rwlock_t *rw)
229{ 229{
230 while (1) { 230 while (1) {
231 if (likely(arch_read_trylock(rw) > 0)) 231 if (likely(__arch_read_trylock(rw) > 0))
232 break; 232 break;
233 do { 233 do {
234 HMT_low(); 234 HMT_low();
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
239 } 239 }
240} 240}
241 241
242static inline void __raw_write_lock(raw_rwlock_t *rw) 242static inline void arch_write_lock(arch_rwlock_t *rw)
243{ 243{
244 while (1) { 244 while (1) {
245 if (likely(arch_write_trylock(rw) == 0)) 245 if (likely(__arch_write_trylock(rw) == 0))
246 break; 246 break;
247 do { 247 do {
248 HMT_low(); 248 HMT_low();
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
253 } 253 }
254} 254}
255 255
256static inline int __raw_read_trylock(raw_rwlock_t *rw) 256static inline int arch_read_trylock(arch_rwlock_t *rw)
257{ 257{
258 return arch_read_trylock(rw) > 0; 258 return __arch_read_trylock(rw) > 0;
259} 259}
260 260
261static inline int __raw_write_trylock(raw_rwlock_t *rw) 261static inline int arch_write_trylock(arch_rwlock_t *rw)
262{ 262{
263 return arch_write_trylock(rw) == 0; 263 return __arch_write_trylock(rw) == 0;
264} 264}
265 265
266static inline void __raw_read_unlock(raw_rwlock_t *rw) 266static inline void arch_read_unlock(arch_rwlock_t *rw)
267{ 267{
268 long tmp; 268 long tmp;
269 269
@@ -280,19 +280,19 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
280 : "cr0", "xer", "memory"); 280 : "cr0", "xer", "memory");
281} 281}
282 282
283static inline void __raw_write_unlock(raw_rwlock_t *rw) 283static inline void arch_write_unlock(arch_rwlock_t *rw)
284{ 284{
285 __asm__ __volatile__("# write_unlock\n\t" 285 __asm__ __volatile__("# write_unlock\n\t"
286 LWSYNC_ON_SMP: : :"memory"); 286 LWSYNC_ON_SMP: : :"memory");
287 rw->lock = 0; 287 rw->lock = 0;
288} 288}
289 289
290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 290#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 291#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
292 292
293#define _raw_spin_relax(lock) __spin_yield(lock) 293#define arch_spin_relax(lock) __spin_yield(lock)
294#define _raw_read_relax(lock) __rw_yield(lock) 294#define arch_read_relax(lock) __rw_yield(lock)
295#define _raw_write_relax(lock) __rw_yield(lock) 295#define arch_write_relax(lock) __rw_yield(lock)
296 296
297#endif /* __KERNEL__ */ 297#endif /* __KERNEL__ */
298#endif /* __ASM_SPINLOCK_H */ 298#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index 74236c9f05b1..2351adc4fdc4 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int slock; 9 volatile unsigned int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile signed int lock; 15 volatile signed int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f6dca4f4b295..9040330b0530 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -210,7 +210,7 @@ int show_interrupts(struct seq_file *p, void *v)
210 if (!desc) 210 if (!desc)
211 return 0; 211 return 0;
212 212
213 spin_lock_irqsave(&desc->lock, flags); 213 raw_spin_lock_irqsave(&desc->lock, flags);
214 214
215 action = desc->action; 215 action = desc->action;
216 if (!action || !action->handler) 216 if (!action || !action->handler)
@@ -237,7 +237,7 @@ int show_interrupts(struct seq_file *p, void *v)
237 seq_putc(p, '\n'); 237 seq_putc(p, '\n');
238 238
239skip: 239skip:
240 spin_unlock_irqrestore(&desc->lock, flags); 240 raw_spin_unlock_irqrestore(&desc->lock, flags);
241 241
242 return 0; 242 return 0;
243} 243}
@@ -1112,7 +1112,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
1112 if (!desc) 1112 if (!desc)
1113 continue; 1113 continue;
1114 1114
1115 spin_lock_irqsave(&desc->lock, flags); 1115 raw_spin_lock_irqsave(&desc->lock, flags);
1116 1116
1117 if (desc->action && desc->action->handler) { 1117 if (desc->action && desc->action->handler) {
1118 seq_printf(m, "%5d ", i); 1118 seq_printf(m, "%5d ", i);
@@ -1131,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
1131 seq_printf(m, "%s\n", p); 1131 seq_printf(m, "%s\n", p);
1132 } 1132 }
1133 1133
1134 spin_unlock_irqrestore(&desc->lock, flags); 1134 raw_spin_unlock_irqrestore(&desc->lock, flags);
1135 } 1135 }
1136 1136
1137 return 0; 1137 return 0;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index bf90361bb70f..fd0d29493fd6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,7 +42,7 @@
42#include <asm/mmu.h> 42#include <asm/mmu.h>
43 43
44struct rtas_t rtas = { 44struct rtas_t rtas = {
45 .lock = __RAW_SPIN_LOCK_UNLOCKED 45 .lock = __ARCH_SPIN_LOCK_UNLOCKED
46}; 46};
47EXPORT_SYMBOL(rtas); 47EXPORT_SYMBOL(rtas);
48 48
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void)
80 80
81 local_irq_save(flags); 81 local_irq_save(flags);
82 preempt_disable(); 82 preempt_disable();
83 __raw_spin_lock_flags(&rtas.lock, flags); 83 arch_spin_lock_flags(&rtas.lock, flags);
84 return flags; 84 return flags;
85} 85}
86 86
87static void unlock_rtas(unsigned long flags) 87static void unlock_rtas(unsigned long flags)
88{ 88{
89 __raw_spin_unlock(&rtas.lock); 89 arch_spin_unlock(&rtas.lock);
90 local_irq_restore(flags); 90 local_irq_restore(flags);
91 preempt_enable(); 91 preempt_enable();
92} 92}
@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
978 return 1; 978 return 1;
979} 979}
980 980
981static raw_spinlock_t timebase_lock; 981static arch_spinlock_t timebase_lock;
982static u64 timebase = 0; 982static u64 timebase = 0;
983 983
984void __cpuinit rtas_give_timebase(void) 984void __cpuinit rtas_give_timebase(void)
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void)
987 987
988 local_irq_save(flags); 988 local_irq_save(flags);
989 hard_irq_disable(); 989 hard_irq_disable();
990 __raw_spin_lock(&timebase_lock); 990 arch_spin_lock(&timebase_lock);
991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); 991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
992 timebase = get_tb(); 992 timebase = get_tb();
993 __raw_spin_unlock(&timebase_lock); 993 arch_spin_unlock(&timebase_lock);
994 994
995 while (timebase) 995 while (timebase)
996 barrier(); 996 barrier();
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void)
1002{ 1002{
1003 while (!timebase) 1003 while (!timebase)
1004 barrier(); 1004 barrier();
1005 __raw_spin_lock(&timebase_lock); 1005 arch_spin_lock(&timebase_lock);
1006 set_tb(timebase >> 32, timebase & 0xffffffff); 1006 set_tb(timebase >> 32, timebase & 0xffffffff);
1007 timebase = 0; 1007 timebase = 0;
1008 __raw_spin_unlock(&timebase_lock); 1008 arch_spin_unlock(&timebase_lock);
1009} 1009}
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 79d0fa3a470d..58e14fba11b1 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -25,7 +25,7 @@
25#include <asm/smp.h> 25#include <asm/smp.h>
26#include <asm/firmware.h> 26#include <asm/firmware.h>
27 27
28void __spin_yield(raw_spinlock_t *lock) 28void __spin_yield(arch_spinlock_t *lock)
29{ 29{
30 unsigned int lock_value, holder_cpu, yield_count; 30 unsigned int lock_value, holder_cpu, yield_count;
31 31
@@ -55,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock)
55 * This turns out to be the same for read and write locks, since 55 * This turns out to be the same for read and write locks, since
56 * we only know the holder if it is write-locked. 56 * we only know the holder if it is write-locked.
57 */ 57 */
58void __rw_yield(raw_rwlock_t *rw) 58void __rw_yield(arch_rwlock_t *rw)
59{ 59{
60 int lock_value; 60 int lock_value;
61 unsigned int holder_cpu, yield_count; 61 unsigned int holder_cpu, yield_count;
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
82} 82}
83#endif 83#endif
84 84
85void __raw_spin_unlock_wait(raw_spinlock_t *lock) 85void arch_spin_unlock_wait(arch_spinlock_t *lock)
86{ 86{
87 while (lock->slock) { 87 while (lock->slock) {
88 HMT_low(); 88 HMT_low();
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock)
92 HMT_medium(); 92 HMT_medium();
93} 93}
94 94
95EXPORT_SYMBOL(__raw_spin_unlock_wait); 95EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index cc0c854291d7..0bac3a3dbecf 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
86 u32 status, enable; 86 u32 status, enable;
87 87
88 /* Mask off the cascaded IRQ */ 88 /* Mask off the cascaded IRQ */
89 spin_lock(&desc->lock); 89 raw_spin_lock(&desc->lock);
90 desc->chip->mask(virq); 90 desc->chip->mask(virq);
91 spin_unlock(&desc->lock); 91 raw_spin_unlock(&desc->lock);
92 92
93 /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs 93 /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
94 * are pending. 'ffs()' is 1 based */ 94 * are pending. 'ffs()' is 1 based */
@@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
104 } 104 }
105 105
106 /* Processing done; can reenable the cascade now */ 106 /* Processing done; can reenable the cascade now */
107 spin_lock(&desc->lock); 107 raw_spin_lock(&desc->lock);
108 desc->chip->ack(virq); 108 desc->chip->ack(virq);
109 if (!(desc->status & IRQ_DISABLED)) 109 if (!(desc->status & IRQ_DISABLED))
110 desc->chip->unmask(virq); 110 desc->chip->unmask(virq);
111 spin_unlock(&desc->lock); 111 raw_spin_unlock(&desc->lock);
112} 112}
113 113
114static int media5200_irq_map(struct irq_host *h, unsigned int virq, 114static int media5200_irq_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 7267effc8078..6829cf7e2bda 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -237,7 +237,7 @@ extern int noirqdebug;
237 237
238static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) 238static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
239{ 239{
240 spin_lock(&desc->lock); 240 raw_spin_lock(&desc->lock);
241 241
242 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 242 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
243 243
@@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
265 goto out_eoi; 265 goto out_eoi;
266 266
267 desc->status &= ~IRQ_PENDING; 267 desc->status &= ~IRQ_PENDING;
268 spin_unlock(&desc->lock); 268 raw_spin_unlock(&desc->lock);
269 action_ret = handle_IRQ_event(irq, action); 269 action_ret = handle_IRQ_event(irq, action);
270 if (!noirqdebug) 270 if (!noirqdebug)
271 note_interrupt(irq, desc, action_ret); 271 note_interrupt(irq, desc, action_ret);
272 spin_lock(&desc->lock); 272 raw_spin_lock(&desc->lock);
273 273
274 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 274 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
275 275
276 desc->status &= ~IRQ_INPROGRESS; 276 desc->status &= ~IRQ_INPROGRESS;
277out_eoi: 277out_eoi:
278 desc->chip->eoi(irq); 278 desc->chip->eoi(irq);
279 spin_unlock(&desc->lock); 279 raw_spin_unlock(&desc->lock);
280} 280}
281 281
282static int iic_host_map(struct irq_host *h, unsigned int virq, 282static int iic_host_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 07762259c60a..86c4b29eea89 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs()
217 struct irq_desc *desc = irq_to_desc(irq); 217 struct irq_desc *desc = irq_to_desc(irq);
218 218
219 if (desc && desc->chip && desc->chip->startup) { 219 if (desc && desc->chip && desc->chip->startup) {
220 spin_lock_irqsave(&desc->lock, flags); 220 raw_spin_lock_irqsave(&desc->lock, flags);
221 desc->chip->startup(irq); 221 desc->chip->startup(irq);
222 spin_unlock_irqrestore(&desc->lock, flags); 222 raw_spin_unlock_irqrestore(&desc->lock, flags);
223 } 223 }
224 } 224 }
225} 225}
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index a4619347aa7e..242f8095c2df 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
71} 71}
72 72
73#ifdef CONFIG_SMP 73#ifdef CONFIG_SMP
74static raw_spinlock_t timebase_lock; 74static arch_spinlock_t timebase_lock;
75static unsigned long timebase; 75static unsigned long timebase;
76 76
77static void __devinit pas_give_timebase(void) 77static void __devinit pas_give_timebase(void)
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void)
80 80
81 local_irq_save(flags); 81 local_irq_save(flags);
82 hard_irq_disable(); 82 hard_irq_disable();
83 __raw_spin_lock(&timebase_lock); 83 arch_spin_lock(&timebase_lock);
84 mtspr(SPRN_TBCTL, TBCTL_FREEZE); 84 mtspr(SPRN_TBCTL, TBCTL_FREEZE);
85 isync(); 85 isync();
86 timebase = get_tb(); 86 timebase = get_tb();
87 __raw_spin_unlock(&timebase_lock); 87 arch_spin_unlock(&timebase_lock);
88 88
89 while (timebase) 89 while (timebase)
90 barrier(); 90 barrier();
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void)
97 while (!timebase) 97 while (!timebase)
98 smp_rmb(); 98 smp_rmb();
99 99
100 __raw_spin_lock(&timebase_lock); 100 arch_spin_lock(&timebase_lock);
101 set_tb(timebase >> 32, timebase & 0xffffffff); 101 set_tb(timebase >> 32, timebase & 0xffffffff);
102 timebase = 0; 102 timebase = 0;
103 __raw_spin_unlock(&timebase_lock); 103 arch_spin_unlock(&timebase_lock);
104} 104}
105 105
106struct smp_ops_t pas_smp_ops = { 106struct smp_ops_t pas_smp_ops = {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 7d01b58f3989..b9b9e11609ec 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -906,7 +906,7 @@ void xics_migrate_irqs_away(void)
906 || desc->chip->set_affinity == NULL) 906 || desc->chip->set_affinity == NULL)
907 continue; 907 continue;
908 908
909 spin_lock_irqsave(&desc->lock, flags); 909 raw_spin_lock_irqsave(&desc->lock, flags);
910 910
911 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); 911 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
912 if (status) { 912 if (status) {
@@ -930,7 +930,7 @@ void xics_migrate_irqs_away(void)
930 cpumask_setall(irq_to_desc(virq)->affinity); 930 cpumask_setall(irq_to_desc(virq)->affinity);
931 desc->chip->set_affinity(virq, cpu_all_mask); 931 desc->chip->set_affinity(virq, cpu_all_mask);
932unlock: 932unlock:
933 spin_unlock_irqrestore(&desc->lock, flags); 933 raw_spin_unlock_irqrestore(&desc->lock, flags);
934 } 934 }
935} 935}
936#endif 936#endif
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 62e50258cdef..c6e11b077108 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
173 u32 intr_index; 173 u32 intr_index;
174 u32 have_shift = 0; 174 u32 have_shift = 0;
175 175
176 spin_lock(&desc->lock); 176 raw_spin_lock(&desc->lock);
177 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { 177 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
178 if (desc->chip->mask_ack) 178 if (desc->chip->mask_ack)
179 desc->chip->mask_ack(irq); 179 desc->chip->mask_ack(irq);
@@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
225 break; 225 break;
226 } 226 }
227unlock: 227unlock:
228 spin_unlock(&desc->lock); 228 raw_spin_unlock(&desc->lock);
229} 229}
230 230
231static int __devinit fsl_of_msi_probe(struct of_device *dev, 231static int __devinit fsl_of_msi_probe(struct of_device *dev,
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 7d10074b3304..6f220a913e42 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
225 int src; 225 int src;
226 int subvirq; 226 int subvirq;
227 227
228 spin_lock(&desc->lock); 228 raw_spin_lock(&desc->lock);
229 if (desc->status & IRQ_LEVEL) 229 if (desc->status & IRQ_LEVEL)
230 desc->chip->mask(virq); 230 desc->chip->mask(virq);
231 else 231 else
232 desc->chip->mask_ack(virq); 232 desc->chip->mask_ack(virq);
233 spin_unlock(&desc->lock); 233 raw_spin_unlock(&desc->lock);
234 234
235 msr = mfdcr(uic->dcrbase + UIC_MSR); 235 msr = mfdcr(uic->dcrbase + UIC_MSR);
236 if (!msr) /* spurious interrupt */ 236 if (!msr) /* spurious interrupt */
@@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
242 generic_handle_irq(subvirq); 242 generic_handle_irq(subvirq);
243 243
244uic_irq_ret: 244uic_irq_ret:
245 spin_lock(&desc->lock); 245 raw_spin_lock(&desc->lock);
246 if (desc->status & IRQ_LEVEL) 246 if (desc->status & IRQ_LEVEL)
247 desc->chip->ack(virq); 247 desc->chip->ack(virq);
248 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 248 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
249 desc->chip->unmask(virq); 249 desc->chip->unmask(virq);
250 spin_unlock(&desc->lock); 250 raw_spin_unlock(&desc->lock);
251} 251}
252 252
253static struct uic * __init uic_init_one(struct device_node *node) 253static struct uic * __init uic_init_one(struct device_node *node)
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c9af0d19c7ab..a587907d77f3 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock,
52 * (the type definitions are in asm/spinlock_types.h) 52 * (the type definitions are in asm/spinlock_types.h)
53 */ 53 */
54 54
55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) 55#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
56#define __raw_spin_unlock_wait(lock) \ 56#define arch_spin_unlock_wait(lock) \
57 do { while (__raw_spin_is_locked(lock)) \ 57 do { while (arch_spin_is_locked(lock)) \
58 _raw_spin_relax(lock); } while (0) 58 arch_spin_relax(lock); } while (0)
59 59
60extern void _raw_spin_lock_wait(raw_spinlock_t *); 60extern void arch_spin_lock_wait(arch_spinlock_t *);
61extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); 61extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
62extern int _raw_spin_trylock_retry(raw_spinlock_t *); 62extern int arch_spin_trylock_retry(arch_spinlock_t *);
63extern void _raw_spin_relax(raw_spinlock_t *lock); 63extern void arch_spin_relax(arch_spinlock_t *lock);
64 64
65static inline void __raw_spin_lock(raw_spinlock_t *lp) 65static inline void arch_spin_lock(arch_spinlock_t *lp)
66{ 66{
67 int old; 67 int old;
68 68
69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
70 if (likely(old == 0)) 70 if (likely(old == 0))
71 return; 71 return;
72 _raw_spin_lock_wait(lp); 72 arch_spin_lock_wait(lp);
73} 73}
74 74
75static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, 75static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
76 unsigned long flags) 76 unsigned long flags)
77{ 77{
78 int old; 78 int old;
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
81 if (likely(old == 0)) 81 if (likely(old == 0))
82 return; 82 return;
83 _raw_spin_lock_wait_flags(lp, flags); 83 arch_spin_lock_wait_flags(lp, flags);
84} 84}
85 85
86static inline int __raw_spin_trylock(raw_spinlock_t *lp) 86static inline int arch_spin_trylock(arch_spinlock_t *lp)
87{ 87{
88 int old; 88 int old;
89 89
90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
91 if (likely(old == 0)) 91 if (likely(old == 0))
92 return 1; 92 return 1;
93 return _raw_spin_trylock_retry(lp); 93 return arch_spin_trylock_retry(lp);
94} 94}
95 95
96static inline void __raw_spin_unlock(raw_spinlock_t *lp) 96static inline void arch_spin_unlock(arch_spinlock_t *lp)
97{ 97{
98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
99} 99}
@@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
113 * read_can_lock - would read_trylock() succeed? 113 * read_can_lock - would read_trylock() succeed?
114 * @lock: the rwlock in question. 114 * @lock: the rwlock in question.
115 */ 115 */
116#define __raw_read_can_lock(x) ((int)(x)->lock >= 0) 116#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
117 117
118/** 118/**
119 * write_can_lock - would write_trylock() succeed? 119 * write_can_lock - would write_trylock() succeed?
120 * @lock: the rwlock in question. 120 * @lock: the rwlock in question.
121 */ 121 */
122#define __raw_write_can_lock(x) ((x)->lock == 0) 122#define arch_write_can_lock(x) ((x)->lock == 0)
123 123
124extern void _raw_read_lock_wait(raw_rwlock_t *lp); 124extern void _raw_read_lock_wait(arch_rwlock_t *lp);
125extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); 125extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
126extern int _raw_read_trylock_retry(raw_rwlock_t *lp); 126extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
127extern void _raw_write_lock_wait(raw_rwlock_t *lp); 127extern void _raw_write_lock_wait(arch_rwlock_t *lp);
128extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); 128extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
129extern int _raw_write_trylock_retry(raw_rwlock_t *lp); 129extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
130 130
131static inline void __raw_read_lock(raw_rwlock_t *rw) 131static inline void arch_read_lock(arch_rwlock_t *rw)
132{ 132{
133 unsigned int old; 133 unsigned int old;
134 old = rw->lock & 0x7fffffffU; 134 old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
136 _raw_read_lock_wait(rw); 136 _raw_read_lock_wait(rw);
137} 137}
138 138
139static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) 139static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
140{ 140{
141 unsigned int old; 141 unsigned int old;
142 old = rw->lock & 0x7fffffffU; 142 old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
144 _raw_read_lock_wait_flags(rw, flags); 144 _raw_read_lock_wait_flags(rw, flags);
145} 145}
146 146
147static inline void __raw_read_unlock(raw_rwlock_t *rw) 147static inline void arch_read_unlock(arch_rwlock_t *rw)
148{ 148{
149 unsigned int old, cmp; 149 unsigned int old, cmp;
150 150
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
155 } while (cmp != old); 155 } while (cmp != old);
156} 156}
157 157
158static inline void __raw_write_lock(raw_rwlock_t *rw) 158static inline void arch_write_lock(arch_rwlock_t *rw)
159{ 159{
160 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 160 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
161 _raw_write_lock_wait(rw); 161 _raw_write_lock_wait(rw);
162} 162}
163 163
164static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) 164static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
165{ 165{
166 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 166 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
167 _raw_write_lock_wait_flags(rw, flags); 167 _raw_write_lock_wait_flags(rw, flags);
168} 168}
169 169
170static inline void __raw_write_unlock(raw_rwlock_t *rw) 170static inline void arch_write_unlock(arch_rwlock_t *rw)
171{ 171{
172 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 172 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
173} 173}
174 174
175static inline int __raw_read_trylock(raw_rwlock_t *rw) 175static inline int arch_read_trylock(arch_rwlock_t *rw)
176{ 176{
177 unsigned int old; 177 unsigned int old;
178 old = rw->lock & 0x7fffffffU; 178 old = rw->lock & 0x7fffffffU;
@@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
181 return _raw_read_trylock_retry(rw); 181 return _raw_read_trylock_retry(rw);
182} 182}
183 183
184static inline int __raw_write_trylock(raw_rwlock_t *rw) 184static inline int arch_write_trylock(arch_rwlock_t *rw)
185{ 185{
186 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) 186 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
187 return 1; 187 return 1;
188 return _raw_write_trylock_retry(rw); 188 return _raw_write_trylock_retry(rw);
189} 189}
190 190
191#define _raw_read_relax(lock) cpu_relax() 191#define arch_read_relax(lock) cpu_relax()
192#define _raw_write_relax(lock) cpu_relax() 192#define arch_write_relax(lock) cpu_relax()
193 193
194#endif /* __ASM_SPINLOCK_H */ 194#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 654abc40de04..9c76656a0af0 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int owner_cpu; 9 volatile unsigned int owner_cpu;
10} __attribute__ ((aligned (4))) raw_spinlock_t; 10} __attribute__ ((aligned (4))) arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f7e0d30250b7..10754a375668 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
39 _raw_yield(); 39 _raw_yield();
40} 40}
41 41
42void _raw_spin_lock_wait(raw_spinlock_t *lp) 42void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
51 _raw_yield_cpu(~owner); 51 _raw_yield_cpu(~owner);
52 count = spin_retry; 52 count = spin_retry;
53 } 53 }
54 if (__raw_spin_is_locked(lp)) 54 if (arch_spin_is_locked(lp))
55 continue; 55 continue;
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 return; 57 return;
58 } 58 }
59} 59}
60EXPORT_SYMBOL(_raw_spin_lock_wait); 60EXPORT_SYMBOL(arch_spin_lock_wait);
61 61
62void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) 62void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63{ 63{
64 int count = spin_retry; 64 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id(); 65 unsigned int cpu = ~smp_processor_id();
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
72 _raw_yield_cpu(~owner); 72 _raw_yield_cpu(~owner);
73 count = spin_retry; 73 count = spin_retry;
74 } 74 }
75 if (__raw_spin_is_locked(lp)) 75 if (arch_spin_is_locked(lp))
76 continue; 76 continue;
77 local_irq_disable(); 77 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
@@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
80 local_irq_restore(flags); 80 local_irq_restore(flags);
81 } 81 }
82} 82}
83EXPORT_SYMBOL(_raw_spin_lock_wait_flags); 83EXPORT_SYMBOL(arch_spin_lock_wait_flags);
84 84
85int _raw_spin_trylock_retry(raw_spinlock_t *lp) 85int arch_spin_trylock_retry(arch_spinlock_t *lp)
86{ 86{
87 unsigned int cpu = ~smp_processor_id(); 87 unsigned int cpu = ~smp_processor_id();
88 int count; 88 int count;
89 89
90 for (count = spin_retry; count > 0; count--) { 90 for (count = spin_retry; count > 0; count--) {
91 if (__raw_spin_is_locked(lp)) 91 if (arch_spin_is_locked(lp))
92 continue; 92 continue;
93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
94 return 1; 94 return 1;
95 } 95 }
96 return 0; 96 return 0;
97} 97}
98EXPORT_SYMBOL(_raw_spin_trylock_retry); 98EXPORT_SYMBOL(arch_spin_trylock_retry);
99 99
100void _raw_spin_relax(raw_spinlock_t *lock) 100void arch_spin_relax(arch_spinlock_t *lock)
101{ 101{
102 unsigned int cpu = lock->owner_cpu; 102 unsigned int cpu = lock->owner_cpu;
103 if (cpu != 0) 103 if (cpu != 0)
104 _raw_yield_cpu(~cpu); 104 _raw_yield_cpu(~cpu);
105} 105}
106EXPORT_SYMBOL(_raw_spin_relax); 106EXPORT_SYMBOL(arch_spin_relax);
107 107
108void _raw_read_lock_wait(raw_rwlock_t *rw) 108void _raw_read_lock_wait(arch_rwlock_t *rw)
109{ 109{
110 unsigned int old; 110 unsigned int old;
111 int count = spin_retry; 111 int count = spin_retry;
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
115 _raw_yield(); 115 _raw_yield();
116 count = spin_retry; 116 count = spin_retry;
117 } 117 }
118 if (!__raw_read_can_lock(rw)) 118 if (!arch_read_can_lock(rw))
119 continue; 119 continue;
120 old = rw->lock & 0x7fffffffU; 120 old = rw->lock & 0x7fffffffU;
121 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 121 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
124} 124}
125EXPORT_SYMBOL(_raw_read_lock_wait); 125EXPORT_SYMBOL(_raw_read_lock_wait);
126 126
127void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 127void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
128{ 128{
129 unsigned int old; 129 unsigned int old;
130 int count = spin_retry; 130 int count = spin_retry;
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
135 _raw_yield(); 135 _raw_yield();
136 count = spin_retry; 136 count = spin_retry;
137 } 137 }
138 if (!__raw_read_can_lock(rw)) 138 if (!arch_read_can_lock(rw))
139 continue; 139 continue;
140 old = rw->lock & 0x7fffffffU; 140 old = rw->lock & 0x7fffffffU;
141 local_irq_disable(); 141 local_irq_disable();
@@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
145} 145}
146EXPORT_SYMBOL(_raw_read_lock_wait_flags); 146EXPORT_SYMBOL(_raw_read_lock_wait_flags);
147 147
148int _raw_read_trylock_retry(raw_rwlock_t *rw) 148int _raw_read_trylock_retry(arch_rwlock_t *rw)
149{ 149{
150 unsigned int old; 150 unsigned int old;
151 int count = spin_retry; 151 int count = spin_retry;
152 152
153 while (count-- > 0) { 153 while (count-- > 0) {
154 if (!__raw_read_can_lock(rw)) 154 if (!arch_read_can_lock(rw))
155 continue; 155 continue;
156 old = rw->lock & 0x7fffffffU; 156 old = rw->lock & 0x7fffffffU;
157 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 157 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw)
161} 161}
162EXPORT_SYMBOL(_raw_read_trylock_retry); 162EXPORT_SYMBOL(_raw_read_trylock_retry);
163 163
164void _raw_write_lock_wait(raw_rwlock_t *rw) 164void _raw_write_lock_wait(arch_rwlock_t *rw)
165{ 165{
166 int count = spin_retry; 166 int count = spin_retry;
167 167
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
170 _raw_yield(); 170 _raw_yield();
171 count = spin_retry; 171 count = spin_retry;
172 } 172 }
173 if (!__raw_write_can_lock(rw)) 173 if (!arch_write_can_lock(rw))
174 continue; 174 continue;
175 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 175 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
176 return; 176 return;
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
178} 178}
179EXPORT_SYMBOL(_raw_write_lock_wait); 179EXPORT_SYMBOL(_raw_write_lock_wait);
180 180
181void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 181void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
182{ 182{
183 int count = spin_retry; 183 int count = spin_retry;
184 184
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
188 _raw_yield(); 188 _raw_yield();
189 count = spin_retry; 189 count = spin_retry;
190 } 190 }
191 if (!__raw_write_can_lock(rw)) 191 if (!arch_write_can_lock(rw))
192 continue; 192 continue;
193 local_irq_disable(); 193 local_irq_disable();
194 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 194 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
197} 197}
198EXPORT_SYMBOL(_raw_write_lock_wait_flags); 198EXPORT_SYMBOL(_raw_write_lock_wait_flags);
199 199
200int _raw_write_trylock_retry(raw_rwlock_t *rw) 200int _raw_write_trylock_retry(arch_rwlock_t *rw)
201{ 201{
202 int count = spin_retry; 202 int count = spin_retry;
203 203
204 while (count-- > 0) { 204 while (count-- > 0) {
205 if (!__raw_write_can_lock(rw)) 205 if (!arch_write_can_lock(rw))
206 continue; 206 continue;
207 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 207 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
208 return 1; 208 return 1;
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index a28c9f0053fd..bdc0f3b6c56a 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -23,10 +23,10 @@
23 * Your basic SMP spinlocks, allowing only a single CPU anywhere 23 * Your basic SMP spinlocks, allowing only a single CPU anywhere
24 */ 24 */
25 25
26#define __raw_spin_is_locked(x) ((x)->lock <= 0) 26#define arch_spin_is_locked(x) ((x)->lock <= 0)
27#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 27#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
28#define __raw_spin_unlock_wait(x) \ 28#define arch_spin_unlock_wait(x) \
29 do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) 29 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
30 30
31/* 31/*
32 * Simple spin lock operations. There are two variants, one clears IRQ's 32 * Simple spin lock operations. There are two variants, one clears IRQ's
@@ -34,14 +34,14 @@
34 * 34 *
35 * We make no fairness assumptions. They have a cost. 35 * We make no fairness assumptions. They have a cost.
36 */ 36 */
37static inline void __raw_spin_lock(raw_spinlock_t *lock) 37static inline void arch_spin_lock(arch_spinlock_t *lock)
38{ 38{
39 unsigned long tmp; 39 unsigned long tmp;
40 unsigned long oldval; 40 unsigned long oldval;
41 41
42 __asm__ __volatile__ ( 42 __asm__ __volatile__ (
43 "1: \n\t" 43 "1: \n\t"
44 "movli.l @%2, %0 ! __raw_spin_lock \n\t" 44 "movli.l @%2, %0 ! arch_spin_lock \n\t"
45 "mov %0, %1 \n\t" 45 "mov %0, %1 \n\t"
46 "mov #0, %0 \n\t" 46 "mov #0, %0 \n\t"
47 "movco.l %0, @%2 \n\t" 47 "movco.l %0, @%2 \n\t"
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
54 ); 54 );
55} 55}
56 56
57static inline void __raw_spin_unlock(raw_spinlock_t *lock) 57static inline void arch_spin_unlock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp; 59 unsigned long tmp;
60 60
61 __asm__ __volatile__ ( 61 __asm__ __volatile__ (
62 "mov #1, %0 ! __raw_spin_unlock \n\t" 62 "mov #1, %0 ! arch_spin_unlock \n\t"
63 "mov.l %0, @%1 \n\t" 63 "mov.l %0, @%1 \n\t"
64 : "=&z" (tmp) 64 : "=&z" (tmp)
65 : "r" (&lock->lock) 65 : "r" (&lock->lock)
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
67 ); 67 );
68} 68}
69 69
70static inline int __raw_spin_trylock(raw_spinlock_t *lock) 70static inline int arch_spin_trylock(arch_spinlock_t *lock)
71{ 71{
72 unsigned long tmp, oldval; 72 unsigned long tmp, oldval;
73 73
74 __asm__ __volatile__ ( 74 __asm__ __volatile__ (
75 "1: \n\t" 75 "1: \n\t"
76 "movli.l @%2, %0 ! __raw_spin_trylock \n\t" 76 "movli.l @%2, %0 ! arch_spin_trylock \n\t"
77 "mov %0, %1 \n\t" 77 "mov %0, %1 \n\t"
78 "mov #0, %0 \n\t" 78 "mov #0, %0 \n\t"
79 "movco.l %0, @%2 \n\t" 79 "movco.l %0, @%2 \n\t"
@@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
100 * read_can_lock - would read_trylock() succeed? 100 * read_can_lock - would read_trylock() succeed?
101 * @lock: the rwlock in question. 101 * @lock: the rwlock in question.
102 */ 102 */
103#define __raw_read_can_lock(x) ((x)->lock > 0) 103#define arch_read_can_lock(x) ((x)->lock > 0)
104 104
105/** 105/**
106 * write_can_lock - would write_trylock() succeed? 106 * write_can_lock - would write_trylock() succeed?
107 * @lock: the rwlock in question. 107 * @lock: the rwlock in question.
108 */ 108 */
109#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 109#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
110 110
111static inline void __raw_read_lock(raw_rwlock_t *rw) 111static inline void arch_read_lock(arch_rwlock_t *rw)
112{ 112{
113 unsigned long tmp; 113 unsigned long tmp;
114 114
115 __asm__ __volatile__ ( 115 __asm__ __volatile__ (
116 "1: \n\t" 116 "1: \n\t"
117 "movli.l @%1, %0 ! __raw_read_lock \n\t" 117 "movli.l @%1, %0 ! arch_read_lock \n\t"
118 "cmp/pl %0 \n\t" 118 "cmp/pl %0 \n\t"
119 "bf 1b \n\t" 119 "bf 1b \n\t"
120 "add #-1, %0 \n\t" 120 "add #-1, %0 \n\t"
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
126 ); 126 );
127} 127}
128 128
129static inline void __raw_read_unlock(raw_rwlock_t *rw) 129static inline void arch_read_unlock(arch_rwlock_t *rw)
130{ 130{
131 unsigned long tmp; 131 unsigned long tmp;
132 132
133 __asm__ __volatile__ ( 133 __asm__ __volatile__ (
134 "1: \n\t" 134 "1: \n\t"
135 "movli.l @%1, %0 ! __raw_read_unlock \n\t" 135 "movli.l @%1, %0 ! arch_read_unlock \n\t"
136 "add #1, %0 \n\t" 136 "add #1, %0 \n\t"
137 "movco.l %0, @%1 \n\t" 137 "movco.l %0, @%1 \n\t"
138 "bf 1b \n\t" 138 "bf 1b \n\t"
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
142 ); 142 );
143} 143}
144 144
145static inline void __raw_write_lock(raw_rwlock_t *rw) 145static inline void arch_write_lock(arch_rwlock_t *rw)
146{ 146{
147 unsigned long tmp; 147 unsigned long tmp;
148 148
149 __asm__ __volatile__ ( 149 __asm__ __volatile__ (
150 "1: \n\t" 150 "1: \n\t"
151 "movli.l @%1, %0 ! __raw_write_lock \n\t" 151 "movli.l @%1, %0 ! arch_write_lock \n\t"
152 "cmp/hs %2, %0 \n\t" 152 "cmp/hs %2, %0 \n\t"
153 "bf 1b \n\t" 153 "bf 1b \n\t"
154 "sub %2, %0 \n\t" 154 "sub %2, %0 \n\t"
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
160 ); 160 );
161} 161}
162 162
163static inline void __raw_write_unlock(raw_rwlock_t *rw) 163static inline void arch_write_unlock(arch_rwlock_t *rw)
164{ 164{
165 __asm__ __volatile__ ( 165 __asm__ __volatile__ (
166 "mov.l %1, @%0 ! __raw_write_unlock \n\t" 166 "mov.l %1, @%0 ! arch_write_unlock \n\t"
167 : 167 :
168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS) 168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
169 : "t", "memory" 169 : "t", "memory"
170 ); 170 );
171} 171}
172 172
173static inline int __raw_read_trylock(raw_rwlock_t *rw) 173static inline int arch_read_trylock(arch_rwlock_t *rw)
174{ 174{
175 unsigned long tmp, oldval; 175 unsigned long tmp, oldval;
176 176
177 __asm__ __volatile__ ( 177 __asm__ __volatile__ (
178 "1: \n\t" 178 "1: \n\t"
179 "movli.l @%2, %0 ! __raw_read_trylock \n\t" 179 "movli.l @%2, %0 ! arch_read_trylock \n\t"
180 "mov %0, %1 \n\t" 180 "mov %0, %1 \n\t"
181 "cmp/pl %0 \n\t" 181 "cmp/pl %0 \n\t"
182 "bf 2f \n\t" 182 "bf 2f \n\t"
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
193 return (oldval > 0); 193 return (oldval > 0);
194} 194}
195 195
196static inline int __raw_write_trylock(raw_rwlock_t *rw) 196static inline int arch_write_trylock(arch_rwlock_t *rw)
197{ 197{
198 unsigned long tmp, oldval; 198 unsigned long tmp, oldval;
199 199
200 __asm__ __volatile__ ( 200 __asm__ __volatile__ (
201 "1: \n\t" 201 "1: \n\t"
202 "movli.l @%2, %0 ! __raw_write_trylock \n\t" 202 "movli.l @%2, %0 ! arch_write_trylock \n\t"
203 "mov %0, %1 \n\t" 203 "mov %0, %1 \n\t"
204 "cmp/hs %3, %0 \n\t" 204 "cmp/hs %3, %0 \n\t"
205 "bf 2f \n\t" 205 "bf 2f \n\t"
@@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
216 return (oldval > (RW_LOCK_BIAS - 1)); 216 return (oldval > (RW_LOCK_BIAS - 1));
217} 217}
218 218
219#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 219#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
220#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 220#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
221 221
222#define _raw_spin_relax(lock) cpu_relax() 222#define arch_spin_relax(lock) cpu_relax()
223#define _raw_read_relax(lock) cpu_relax() 223#define arch_read_relax(lock) cpu_relax()
224#define _raw_write_relax(lock) cpu_relax() 224#define arch_write_relax(lock) cpu_relax()
225 225
226#endif /* __ASM_SH_SPINLOCK_H */ 226#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
index b4d244e7b60c..9b7560db06ca 100644
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define RW_LOCK_BIAS 0x01000000 18#define RW_LOCK_BIAS 0x01000000
19#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 19#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
20 20
21#endif 21#endif
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index e1913f28f418..d2d41d046657 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
76 if (!desc) 76 if (!desc)
77 return 0; 77 return 0;
78 78
79 spin_lock_irqsave(&desc->lock, flags); 79 raw_spin_lock_irqsave(&desc->lock, flags);
80 for_each_online_cpu(j) 80 for_each_online_cpu(j)
81 any_count |= kstat_irqs_cpu(i, j); 81 any_count |= kstat_irqs_cpu(i, j);
82 action = desc->action; 82 action = desc->action;
@@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v)
97 97
98 seq_putc(p, '\n'); 98 seq_putc(p, '\n');
99out: 99out:
100 spin_unlock_irqrestore(&desc->lock, flags); 100 raw_spin_unlock_irqrestore(&desc->lock, flags);
101 return 0; 101 return 0;
102} 102}
103#endif 103#endif
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 857630cff636..7f9b9dba38a6 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
10 10
11#include <asm/psr.h> 11#include <asm/psr.h>
12 12
13#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 13#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
14 14
15#define __raw_spin_unlock_wait(lock) \ 15#define arch_spin_unlock_wait(lock) \
16 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 16 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
17 17
18static inline void __raw_spin_lock(raw_spinlock_t *lock) 18static inline void arch_spin_lock(arch_spinlock_t *lock)
19{ 19{
20 __asm__ __volatile__( 20 __asm__ __volatile__(
21 "\n1:\n\t" 21 "\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
35 : "g2", "memory", "cc"); 35 : "g2", "memory", "cc");
36} 36}
37 37
38static inline int __raw_spin_trylock(raw_spinlock_t *lock) 38static inline int arch_spin_trylock(arch_spinlock_t *lock)
39{ 39{
40 unsigned int result; 40 unsigned int result;
41 __asm__ __volatile__("ldstub [%1], %0" 41 __asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
45 return (result == 0); 45 return (result == 0);
46} 46}
47 47
48static inline void __raw_spin_unlock(raw_spinlock_t *lock) 48static inline void arch_spin_unlock(arch_spinlock_t *lock)
49{ 49{
50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51} 51}
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
65 * Sort of like atomic_t's on Sparc, but even more clever. 65 * Sort of like atomic_t's on Sparc, but even more clever.
66 * 66 *
67 * ------------------------------------ 67 * ------------------------------------
68 * | 24-bit counter | wlock | raw_rwlock_t 68 * | 24-bit counter | wlock | arch_rwlock_t
69 * ------------------------------------ 69 * ------------------------------------
70 * 31 8 7 0 70 * 31 8 7 0
71 * 71 *
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 * 76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */ 78 */
79static inline void arch_read_lock(raw_rwlock_t *rw) 79static inline void __arch_read_lock(arch_rwlock_t *rw)
80{ 80{
81 register raw_rwlock_t *lp asm("g1"); 81 register arch_rwlock_t *lp asm("g1");
82 lp = rw; 82 lp = rw;
83 __asm__ __volatile__( 83 __asm__ __volatile__(
84 "mov %%o7, %%g4\n\t" 84 "mov %%o7, %%g4\n\t"
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw)
89 : "g2", "g4", "memory", "cc"); 89 : "g2", "g4", "memory", "cc");
90} 90}
91 91
92#define __raw_read_lock(lock) \ 92#define arch_read_lock(lock) \
93do { unsigned long flags; \ 93do { unsigned long flags; \
94 local_irq_save(flags); \ 94 local_irq_save(flags); \
95 arch_read_lock(lock); \ 95 __arch_read_lock(lock); \
96 local_irq_restore(flags); \ 96 local_irq_restore(flags); \
97} while(0) 97} while(0)
98 98
99static inline void arch_read_unlock(raw_rwlock_t *rw) 99static inline void __arch_read_unlock(arch_rwlock_t *rw)
100{ 100{
101 register raw_rwlock_t *lp asm("g1"); 101 register arch_rwlock_t *lp asm("g1");
102 lp = rw; 102 lp = rw;
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 "mov %%o7, %%g4\n\t" 104 "mov %%o7, %%g4\n\t"
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw)
109 : "g2", "g4", "memory", "cc"); 109 : "g2", "g4", "memory", "cc");
110} 110}
111 111
112#define __raw_read_unlock(lock) \ 112#define arch_read_unlock(lock) \
113do { unsigned long flags; \ 113do { unsigned long flags; \
114 local_irq_save(flags); \ 114 local_irq_save(flags); \
115 arch_read_unlock(lock); \ 115 __arch_read_unlock(lock); \
116 local_irq_restore(flags); \ 116 local_irq_restore(flags); \
117} while(0) 117} while(0)
118 118
119static inline void __raw_write_lock(raw_rwlock_t *rw) 119static inline void arch_write_lock(arch_rwlock_t *rw)
120{ 120{
121 register raw_rwlock_t *lp asm("g1"); 121 register arch_rwlock_t *lp asm("g1");
122 lp = rw; 122 lp = rw;
123 __asm__ __volatile__( 123 __asm__ __volatile__(
124 "mov %%o7, %%g4\n\t" 124 "mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
130 *(volatile __u32 *)&lp->lock = ~0U; 130 *(volatile __u32 *)&lp->lock = ~0U;
131} 131}
132 132
133static inline int __raw_write_trylock(raw_rwlock_t *rw) 133static inline int arch_write_trylock(arch_rwlock_t *rw)
134{ 134{
135 unsigned int val; 135 unsigned int val;
136 136
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
150 return (val == 0); 150 return (val == 0);
151} 151}
152 152
153static inline int arch_read_trylock(raw_rwlock_t *rw) 153static inline int __arch_read_trylock(arch_rwlock_t *rw)
154{ 154{
155 register raw_rwlock_t *lp asm("g1"); 155 register arch_rwlock_t *lp asm("g1");
156 register int res asm("o0"); 156 register int res asm("o0");
157 lp = rw; 157 lp = rw;
158 __asm__ __volatile__( 158 __asm__ __volatile__(
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
165 return res; 165 return res;
166} 166}
167 167
168#define __raw_read_trylock(lock) \ 168#define arch_read_trylock(lock) \
169({ unsigned long flags; \ 169({ unsigned long flags; \
170 int res; \ 170 int res; \
171 local_irq_save(flags); \ 171 local_irq_save(flags); \
172 res = arch_read_trylock(lock); \ 172 res = __arch_read_trylock(lock); \
173 local_irq_restore(flags); \ 173 local_irq_restore(flags); \
174 res; \ 174 res; \
175}) 175})
176 176
177#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 177#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
178 178
179#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 179#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
180#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 180#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
181#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) 181#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
182 182
183#define _raw_spin_relax(lock) cpu_relax() 183#define arch_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax() 184#define arch_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax() 185#define arch_write_relax(lock) cpu_relax()
186 186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) 187#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock) 188#define arch_write_can_lock(rw) (!(rw)->lock)
189 189
190#endif /* !(__ASSEMBLY__) */ 190#endif /* !(__ASSEMBLY__) */
191 191
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 43e514783582..073936a8b275 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
21 * the spinner sections must be pre-V9 branches. 21 * the spinner sections must be pre-V9 branches.
22 */ 22 */
23 23
24#define __raw_spin_is_locked(lp) ((lp)->lock != 0) 24#define arch_spin_is_locked(lp) ((lp)->lock != 0)
25 25
26#define __raw_spin_unlock_wait(lp) \ 26#define arch_spin_unlock_wait(lp) \
27 do { rmb(); \ 27 do { rmb(); \
28 } while((lp)->lock) 28 } while((lp)->lock)
29 29
30static inline void __raw_spin_lock(raw_spinlock_t *lock) 30static inline void arch_spin_lock(arch_spinlock_t *lock)
31{ 31{
32 unsigned long tmp; 32 unsigned long tmp;
33 33
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
46 : "memory"); 46 : "memory");
47} 47}
48 48
49static inline int __raw_spin_trylock(raw_spinlock_t *lock) 49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{ 50{
51 unsigned long result; 51 unsigned long result;
52 52
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59 return (result == 0UL); 59 return (result == 0UL);
60} 60}
61 61
62static inline void __raw_spin_unlock(raw_spinlock_t *lock) 62static inline void arch_spin_unlock(arch_spinlock_t *lock)
63{ 63{
64 __asm__ __volatile__( 64 __asm__ __volatile__(
65" stb %%g0, [%0]" 65" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
68 : "memory"); 68 : "memory");
69} 69}
70 70
71static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 71static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
72{ 72{
73 unsigned long tmp1, tmp2; 73 unsigned long tmp1, tmp2;
74 74
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
92 92
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 94
95static void inline arch_read_lock(raw_rwlock_t *lock) 95static void inline arch_read_lock(arch_rwlock_t *lock)
96{ 96{
97 unsigned long tmp1, tmp2; 97 unsigned long tmp1, tmp2;
98 98
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
115 : "memory"); 115 : "memory");
116} 116}
117 117
118static int inline arch_read_trylock(raw_rwlock_t *lock) 118static int inline arch_read_trylock(arch_rwlock_t *lock)
119{ 119{
120 int tmp1, tmp2; 120 int tmp1, tmp2;
121 121
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
136 return tmp1; 136 return tmp1;
137} 137}
138 138
139static void inline arch_read_unlock(raw_rwlock_t *lock) 139static void inline arch_read_unlock(arch_rwlock_t *lock)
140{ 140{
141 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
142 142
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
152 : "memory"); 152 : "memory");
153} 153}
154 154
155static void inline arch_write_lock(raw_rwlock_t *lock) 155static void inline arch_write_lock(arch_rwlock_t *lock)
156{ 156{
157 unsigned long mask, tmp1, tmp2; 157 unsigned long mask, tmp1, tmp2;
158 158
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
177 : "memory"); 177 : "memory");
178} 178}
179 179
180static void inline arch_write_unlock(raw_rwlock_t *lock) 180static void inline arch_write_unlock(arch_rwlock_t *lock)
181{ 181{
182 __asm__ __volatile__( 182 __asm__ __volatile__(
183" stw %%g0, [%0]" 183" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
186 : "memory"); 186 : "memory");
187} 187}
188 188
189static int inline arch_write_trylock(raw_rwlock_t *lock) 189static int inline arch_write_trylock(arch_rwlock_t *lock)
190{ 190{
191 unsigned long mask, tmp1, tmp2, result; 191 unsigned long mask, tmp1, tmp2, result;
192 192
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define __raw_read_lock(p) arch_read_lock(p) 213#define arch_read_lock(p) arch_read_lock(p)
214#define __raw_read_lock_flags(p, f) arch_read_lock(p) 214#define arch_read_lock_flags(p, f) arch_read_lock(p)
215#define __raw_read_trylock(p) arch_read_trylock(p) 215#define arch_read_trylock(p) arch_read_trylock(p)
216#define __raw_read_unlock(p) arch_read_unlock(p) 216#define arch_read_unlock(p) arch_read_unlock(p)
217#define __raw_write_lock(p) arch_write_lock(p) 217#define arch_write_lock(p) arch_write_lock(p)
218#define __raw_write_lock_flags(p, f) arch_write_lock(p) 218#define arch_write_lock_flags(p, f) arch_write_lock(p)
219#define __raw_write_unlock(p) arch_write_unlock(p) 219#define arch_write_unlock(p) arch_write_unlock(p)
220#define __raw_write_trylock(p) arch_write_trylock(p) 220#define arch_write_trylock(p) arch_write_trylock(p)
221 221
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define arch_write_can_lock(rw) (!(rw)->lock)
224 224
225#define _raw_spin_relax(lock) cpu_relax() 225#define arch_spin_relax(lock) cpu_relax()
226#define _raw_read_relax(lock) cpu_relax() 226#define arch_read_relax(lock) cpu_relax()
227#define _raw_write_relax(lock) cpu_relax() 227#define arch_write_relax(lock) cpu_relax()
228 228
229#endif /* !(__ASSEMBLY__) */ 229#endif /* !(__ASSEMBLY__) */
230 230
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 37cbe01c585b..9c454fdeaad8 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned char lock; 9 volatile unsigned char lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int lock; 15 volatile unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { 0 } 18#define __ARCH_RW_LOCK_UNLOCKED { 0 }
19 19
20#endif 20#endif
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index ce996f97855f..8d6882bb480a 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v)
176 } 176 }
177 177
178 if (i < NR_IRQS) { 178 if (i < NR_IRQS) {
179 spin_lock_irqsave(&irq_desc[i].lock, flags); 179 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
180 action = irq_desc[i].action; 180 action = irq_desc[i].action;
181 if (!action) 181 if (!action)
182 goto skip; 182 goto skip;
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v)
195 195
196 seq_putc(p, '\n'); 196 seq_putc(p, '\n');
197skip: 197skip:
198 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 198 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
199 } else if (i == NR_IRQS) { 199 } else if (i == NR_IRQS) {
200 seq_printf(p, "NMI: "); 200 seq_printf(p, "NMI: ");
201 for_each_online_cpu(j) 201 for_each_online_cpu(j)
@@ -785,14 +785,14 @@ void fixup_irqs(void)
785 for (irq = 0; irq < NR_IRQS; irq++) { 785 for (irq = 0; irq < NR_IRQS; irq++) {
786 unsigned long flags; 786 unsigned long flags;
787 787
788 spin_lock_irqsave(&irq_desc[irq].lock, flags); 788 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
789 if (irq_desc[irq].action && 789 if (irq_desc[irq].action &&
790 !(irq_desc[irq].status & IRQ_PER_CPU)) { 790 !(irq_desc[irq].status & IRQ_PER_CPU)) {
791 if (irq_desc[irq].chip->set_affinity) 791 if (irq_desc[irq].chip->set_affinity)
792 irq_desc[irq].chip->set_affinity(irq, 792 irq_desc[irq].chip->set_affinity(irq,
793 irq_desc[irq].affinity); 793 irq_desc[irq].affinity);
794 } 794 }
795 spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 795 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
796 } 796 }
797 797
798 tick_ops->disable_irq(); 798 tick_ops->disable_irq();
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 039270b9b73b..89474ba0741e 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v)
34 } 34 }
35 35
36 if (i < NR_IRQS) { 36 if (i < NR_IRQS) {
37 spin_lock_irqsave(&irq_desc[i].lock, flags); 37 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
38 action = irq_desc[i].action; 38 action = irq_desc[i].action;
39 if (!action) 39 if (!action)
40 goto skip; 40 goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
53 53
54 seq_putc(p, '\n'); 54 seq_putc(p, '\n');
55skip: 55skip:
56 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 56 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
57 } else if (i == NR_IRQS) 57 } else if (i == NR_IRQS)
58 seq_putc(p, '\n'); 58 seq_putc(p, '\n');
59 59
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index efb38994859c..dd59a85a918f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
731 731
732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
733 733
734static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 734static inline int arch_spin_is_locked(struct arch_spinlock *lock)
735{ 735{
736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
737} 737}
738 738
739static inline int __raw_spin_is_contended(struct raw_spinlock *lock) 739static inline int arch_spin_is_contended(struct arch_spinlock *lock)
740{ 740{
741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
742} 742}
743#define __raw_spin_is_contended __raw_spin_is_contended 743#define arch_spin_is_contended arch_spin_is_contended
744 744
745static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) 745static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
746{ 746{
747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
748} 748}
749 749
750static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, 750static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
751 unsigned long flags) 751 unsigned long flags)
752{ 752{
753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); 753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
754} 754}
755 755
756static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) 756static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
757{ 757{
758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
759} 759}
760 760
761static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) 761static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
762{ 762{
763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); 763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
764} 764}
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 9357473c8da0..b1e70d51e40c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
318 phys_addr_t phys, pgprot_t flags); 318 phys_addr_t phys, pgprot_t flags);
319}; 319};
320 320
321struct raw_spinlock; 321struct arch_spinlock;
322struct pv_lock_ops { 322struct pv_lock_ops {
323 int (*spin_is_locked)(struct raw_spinlock *lock); 323 int (*spin_is_locked)(struct arch_spinlock *lock);
324 int (*spin_is_contended)(struct raw_spinlock *lock); 324 int (*spin_is_contended)(struct arch_spinlock *lock);
325 void (*spin_lock)(struct raw_spinlock *lock); 325 void (*spin_lock)(struct arch_spinlock *lock);
326 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); 326 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
327 int (*spin_trylock)(struct raw_spinlock *lock); 327 int (*spin_trylock)(struct arch_spinlock *lock);
328 void (*spin_unlock)(struct raw_spinlock *lock); 328 void (*spin_unlock)(struct arch_spinlock *lock);
329}; 329};
330 330
331/* This contains all the paravirt structures: we get a convenient 331/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 4e77853321db..3089f70c0c52 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -58,7 +58,7 @@
58#if (NR_CPUS < 256) 58#if (NR_CPUS < 256)
59#define TICKET_SHIFT 8 59#define TICKET_SHIFT 8
60 60
61static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 61static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
62{ 62{
63 short inc = 0x0100; 63 short inc = 0x0100;
64 64
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
77 : "memory", "cc"); 77 : "memory", "cc");
78} 78}
79 79
80static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 80static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
81{ 81{
82 int tmp, new; 82 int tmp, new;
83 83
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
96 return tmp; 96 return tmp;
97} 97}
98 98
99static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 99static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
100{ 100{
101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
102 : "+m" (lock->slock) 102 : "+m" (lock->slock)
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
106#else 106#else
107#define TICKET_SHIFT 16 107#define TICKET_SHIFT 16
108 108
109static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 109static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
110{ 110{
111 int inc = 0x00010000; 111 int inc = 0x00010000;
112 int tmp; 112 int tmp;
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
127 : "memory", "cc"); 127 : "memory", "cc");
128} 128}
129 129
130static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 130static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
131{ 131{
132 int tmp; 132 int tmp;
133 int new; 133 int new;
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
149 return tmp; 149 return tmp;
150} 150}
151 151
152static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 152static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
153{ 153{
154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
155 : "+m" (lock->slock) 155 : "+m" (lock->slock)
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
158} 158}
159#endif 159#endif
160 160
161static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 161static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
162{ 162{
163 int tmp = ACCESS_ONCE(lock->slock); 163 int tmp = ACCESS_ONCE(lock->slock);
164 164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); 165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166} 166}
167 167
168static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 168static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
169{ 169{
170 int tmp = ACCESS_ONCE(lock->slock); 170 int tmp = ACCESS_ONCE(lock->slock);
171 171
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
174 174
175#ifndef CONFIG_PARAVIRT_SPINLOCKS 175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176 176
177static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int arch_spin_is_locked(arch_spinlock_t *lock)
178{ 178{
179 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
180} 180}
181 181
182static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 182static inline int arch_spin_is_contended(arch_spinlock_t *lock)
183{ 183{
184 return __ticket_spin_is_contended(lock); 184 return __ticket_spin_is_contended(lock);
185} 185}
186#define __raw_spin_is_contended __raw_spin_is_contended 186#define arch_spin_is_contended arch_spin_is_contended
187 187
188static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 188static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
189{ 189{
190 __ticket_spin_lock(lock); 190 __ticket_spin_lock(lock);
191} 191}
192 192
193static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 193static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
194{ 194{
195 return __ticket_spin_trylock(lock); 195 return __ticket_spin_trylock(lock);
196} 196}
197 197
198static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 198static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
199{ 199{
200 __ticket_spin_unlock(lock); 200 __ticket_spin_unlock(lock);
201} 201}
202 202
203static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 203static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
204 unsigned long flags) 204 unsigned long flags)
205{ 205{
206 __raw_spin_lock(lock); 206 arch_spin_lock(lock);
207} 207}
208 208
209#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 210
211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
212{ 212{
213 while (__raw_spin_is_locked(lock)) 213 while (arch_spin_is_locked(lock))
214 cpu_relax(); 214 cpu_relax();
215} 215}
216 216
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
232 * read_can_lock - would read_trylock() succeed? 232 * read_can_lock - would read_trylock() succeed?
233 * @lock: the rwlock in question. 233 * @lock: the rwlock in question.
234 */ 234 */
235static inline int __raw_read_can_lock(raw_rwlock_t *lock) 235static inline int arch_read_can_lock(arch_rwlock_t *lock)
236{ 236{
237 return (int)(lock)->lock > 0; 237 return (int)(lock)->lock > 0;
238} 238}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock)
241 * write_can_lock - would write_trylock() succeed? 241 * write_can_lock - would write_trylock() succeed?
242 * @lock: the rwlock in question. 242 * @lock: the rwlock in question.
243 */ 243 */
244static inline int __raw_write_can_lock(raw_rwlock_t *lock) 244static inline int arch_write_can_lock(arch_rwlock_t *lock)
245{ 245{
246 return (lock)->lock == RW_LOCK_BIAS; 246 return (lock)->lock == RW_LOCK_BIAS;
247} 247}
248 248
249static inline void __raw_read_lock(raw_rwlock_t *rw) 249static inline void arch_read_lock(arch_rwlock_t *rw)
250{ 250{
251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
252 "jns 1f\n" 252 "jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
255 ::LOCK_PTR_REG (rw) : "memory"); 255 ::LOCK_PTR_REG (rw) : "memory");
256} 256}
257 257
258static inline void __raw_write_lock(raw_rwlock_t *rw) 258static inline void arch_write_lock(arch_rwlock_t *rw)
259{ 259{
260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" 260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
261 "jz 1f\n" 261 "jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); 264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
265} 265}
266 266
267static inline int __raw_read_trylock(raw_rwlock_t *lock) 267static inline int arch_read_trylock(arch_rwlock_t *lock)
268{ 268{
269 atomic_t *count = (atomic_t *)lock; 269 atomic_t *count = (atomic_t *)lock;
270 270
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
274 return 0; 274 return 0;
275} 275}
276 276
277static inline int __raw_write_trylock(raw_rwlock_t *lock) 277static inline int arch_write_trylock(arch_rwlock_t *lock)
278{ 278{
279 atomic_t *count = (atomic_t *)lock; 279 atomic_t *count = (atomic_t *)lock;
280 280
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
284 return 0; 284 return 0;
285} 285}
286 286
287static inline void __raw_read_unlock(raw_rwlock_t *rw) 287static inline void arch_read_unlock(arch_rwlock_t *rw)
288{ 288{
289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
290} 290}
291 291
292static inline void __raw_write_unlock(raw_rwlock_t *rw) 292static inline void arch_write_unlock(arch_rwlock_t *rw)
293{ 293{
294 asm volatile(LOCK_PREFIX "addl %1, %0" 294 asm volatile(LOCK_PREFIX "addl %1, %0"
295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); 295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
296} 296}
297 297
298#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 298#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
299#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 299#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
300 300
301#define _raw_spin_relax(lock) cpu_relax() 301#define arch_spin_relax(lock) cpu_relax()
302#define _raw_read_relax(lock) cpu_relax() 302#define arch_read_relax(lock) cpu_relax()
303#define _raw_write_relax(lock) cpu_relax() 303#define arch_write_relax(lock) cpu_relax()
304 304
305/* The {read|write|spin}_lock() on x86 are full memory barriers. */ 305/* The {read|write|spin}_lock() on x86 are full memory barriers. */
306static inline void smp_mb__after_lock(void) { } 306static inline void smp_mb__after_lock(void) { }
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 845f81c87091..dcb48b2edc11 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,16 +5,16 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8typedef struct raw_spinlock { 8typedef struct arch_spinlock {
9 unsigned int slock; 9 unsigned int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 unsigned int lock; 15 unsigned int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 18#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19 19
20#endif /* _ASM_X86_SPINLOCK_TYPES_H */ 20#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d5d498fbee4b..11a5851f1f50 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2431 continue; 2431 continue;
2432 2432
2433 cfg = irq_cfg(irq); 2433 cfg = irq_cfg(irq);
2434 spin_lock(&desc->lock); 2434 raw_spin_lock(&desc->lock);
2435 2435
2436 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2436 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2437 goto unlock; 2437 goto unlock;
@@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2450 } 2450 }
2451 __get_cpu_var(vector_irq)[vector] = -1; 2451 __get_cpu_var(vector_irq)[vector] = -1;
2452unlock: 2452unlock:
2453 spin_unlock(&desc->lock); 2453 raw_spin_unlock(&desc->lock);
2454 } 2454 }
2455 2455
2456 irq_exit(); 2456 irq_exit();
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b8ce165dde5d..0a0aa1cec8f1 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
188} 188}
189EXPORT_SYMBOL(dump_stack); 189EXPORT_SYMBOL(dump_stack);
190 190
191static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; 191static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
192static int die_owner = -1; 192static int die_owner = -1;
193static unsigned int die_nest_count; 193static unsigned int die_nest_count;
194 194
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void)
207 /* racy, but better than risking deadlock. */ 207 /* racy, but better than risking deadlock. */
208 raw_local_irq_save(flags); 208 raw_local_irq_save(flags);
209 cpu = smp_processor_id(); 209 cpu = smp_processor_id();
210 if (!__raw_spin_trylock(&die_lock)) { 210 if (!arch_spin_trylock(&die_lock)) {
211 if (cpu == die_owner) 211 if (cpu == die_owner)
212 /* nested oops. should stop eventually */; 212 /* nested oops. should stop eventually */;
213 else 213 else
214 __raw_spin_lock(&die_lock); 214 arch_spin_lock(&die_lock);
215 } 215 }
216 die_nest_count++; 216 die_nest_count++;
217 die_owner = cpu; 217 die_owner = cpu;
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
231 die_nest_count--; 231 die_nest_count--;
232 if (!die_nest_count) 232 if (!die_nest_count)
233 /* Nest count reaches zero, release the lock. */ 233 /* Nest count reaches zero, release the lock. */
234 __raw_spin_unlock(&die_lock); 234 arch_spin_unlock(&die_lock);
235 raw_local_irq_restore(flags); 235 raw_local_irq_restore(flags);
236 oops_exit(); 236 oops_exit();
237 237
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 664bcb7384ac..91fd0c70a18a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v)
149 if (!desc) 149 if (!desc)
150 return 0; 150 return 0;
151 151
152 spin_lock_irqsave(&desc->lock, flags); 152 raw_spin_lock_irqsave(&desc->lock, flags);
153 for_each_online_cpu(j) 153 for_each_online_cpu(j)
154 any_count |= kstat_irqs_cpu(i, j); 154 any_count |= kstat_irqs_cpu(i, j);
155 action = desc->action; 155 action = desc->action;
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v)
170 170
171 seq_putc(p, '\n'); 171 seq_putc(p, '\n');
172out: 172out:
173 spin_unlock_irqrestore(&desc->lock, flags); 173 raw_spin_unlock_irqrestore(&desc->lock, flags);
174 return 0; 174 return 0;
175} 175}
176 176
@@ -294,12 +294,12 @@ void fixup_irqs(void)
294 continue; 294 continue;
295 295
296 /* interrupt's are disabled at this point */ 296 /* interrupt's are disabled at this point */
297 spin_lock(&desc->lock); 297 raw_spin_lock(&desc->lock);
298 298
299 affinity = desc->affinity; 299 affinity = desc->affinity;
300 if (!irq_has_action(irq) || 300 if (!irq_has_action(irq) ||
301 cpumask_equal(affinity, cpu_online_mask)) { 301 cpumask_equal(affinity, cpu_online_mask)) {
302 spin_unlock(&desc->lock); 302 raw_spin_unlock(&desc->lock);
303 continue; 303 continue;
304 } 304 }
305 305
@@ -326,7 +326,7 @@ void fixup_irqs(void)
326 if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) 326 if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
327 desc->chip->unmask(irq); 327 desc->chip->unmask(irq);
328 328
329 spin_unlock(&desc->lock); 329 raw_spin_unlock(&desc->lock);
330 330
331 if (break_affinity && set_affinity) 331 if (break_affinity && set_affinity)
332 printk("Broke affinity for irq %i\n", irq); 332 printk("Broke affinity for irq %i\n", irq);
@@ -356,10 +356,10 @@ void fixup_irqs(void)
356 irq = __get_cpu_var(vector_irq)[vector]; 356 irq = __get_cpu_var(vector_irq)[vector];
357 357
358 desc = irq_to_desc(irq); 358 desc = irq_to_desc(irq);
359 spin_lock(&desc->lock); 359 raw_spin_lock(&desc->lock);
360 if (desc->chip->retrigger) 360 if (desc->chip->retrigger)
361 desc->chip->retrigger(irq); 361 desc->chip->retrigger(irq);
362 spin_unlock(&desc->lock); 362 raw_spin_unlock(&desc->lock);
363 } 363 }
364 } 364 }
365} 365}
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 3a7c5a44082e..676b8c77a976 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -8,9 +8,9 @@
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9 9
10static inline void 10static inline void
11default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
12{ 12{
13 __raw_spin_lock(lock); 13 arch_spin_lock(lock);
14} 14}
15 15
16struct pv_lock_ops pv_lock_ops = { 16struct pv_lock_ops pv_lock_ops = {
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index eed156851f5d..0aa5fed8b9e6 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; 36static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static __cpuinitdata cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static __cpuinitdata cycles_t max_warp;
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void)
62 * previous TSC that was measured (possibly on 62 * previous TSC that was measured (possibly on
63 * another CPU) and update the previous TSC timestamp. 63 * another CPU) and update the previous TSC timestamp.
64 */ 64 */
65 __raw_spin_lock(&sync_lock); 65 arch_spin_lock(&sync_lock);
66 prev = last_tsc; 66 prev = last_tsc;
67 rdtsc_barrier(); 67 rdtsc_barrier();
68 now = get_cycles(); 68 now = get_cycles();
69 rdtsc_barrier(); 69 rdtsc_barrier();
70 last_tsc = now; 70 last_tsc = now;
71 __raw_spin_unlock(&sync_lock); 71 arch_spin_unlock(&sync_lock);
72 72
73 /* 73 /*
74 * Be nice every now and then (and also check whether 74 * Be nice every now and then (and also check whether
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void)
87 * we saw a time-warp of the TSC going backwards: 87 * we saw a time-warp of the TSC going backwards:
88 */ 88 */
89 if (unlikely(prev > now)) { 89 if (unlikely(prev > now)) {
90 __raw_spin_lock(&sync_lock); 90 arch_spin_lock(&sync_lock);
91 max_warp = max(max_warp, prev - now); 91 max_warp = max(max_warp, prev - now);
92 nr_warps++; 92 nr_warps++;
93 __raw_spin_unlock(&sync_lock); 93 arch_spin_unlock(&sync_lock);
94 } 94 }
95 } 95 }
96 WARN(!(now-start), 96 WARN(!(now-start),
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 36a5141108df..24ded31b5aec 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -120,14 +120,14 @@ struct xen_spinlock {
120 unsigned short spinners; /* count of waiting cpus */ 120 unsigned short spinners; /* count of waiting cpus */
121}; 121};
122 122
123static int xen_spin_is_locked(struct raw_spinlock *lock) 123static int xen_spin_is_locked(struct arch_spinlock *lock)
124{ 124{
125 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 125 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
126 126
127 return xl->lock != 0; 127 return xl->lock != 0;
128} 128}
129 129
130static int xen_spin_is_contended(struct raw_spinlock *lock) 130static int xen_spin_is_contended(struct arch_spinlock *lock)
131{ 131{
132 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 132 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
133 133
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
136 return xl->spinners != 0; 136 return xl->spinners != 0;
137} 137}
138 138
139static int xen_spin_trylock(struct raw_spinlock *lock) 139static int xen_spin_trylock(struct arch_spinlock *lock)
140{ 140{
141 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 141 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
142 u8 old = 1; 142 u8 old = 1;
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
181 __get_cpu_var(lock_spinners) = prev; 181 __get_cpu_var(lock_spinners) = prev;
182} 182}
183 183
184static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) 184static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
185{ 185{
186 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 186 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
187 struct xen_spinlock *prev; 187 struct xen_spinlock *prev;
@@ -254,7 +254,7 @@ out:
254 return ret; 254 return ret;
255} 255}
256 256
257static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) 257static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
258{ 258{
259 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 259 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
260 unsigned timeout; 260 unsigned timeout;
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
291 spin_time_accum_total(start_spin); 291 spin_time_accum_total(start_spin);
292} 292}
293 293
294static void xen_spin_lock(struct raw_spinlock *lock) 294static void xen_spin_lock(struct arch_spinlock *lock)
295{ 295{
296 __xen_spin_lock(lock, false); 296 __xen_spin_lock(lock, false);
297} 297}
298 298
299static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 299static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
300{ 300{
301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); 301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
302} 302}
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
317 } 317 }
318} 318}
319 319
320static void xen_spin_unlock(struct raw_spinlock *lock) 320static void xen_spin_unlock(struct arch_spinlock *lock)
321{ 321{
322 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 322 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
323 323
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a1badb32fcda..8cd38484e130 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v)
90 } 90 }
91 91
92 if (i < NR_IRQS) { 92 if (i < NR_IRQS) {
93 spin_lock_irqsave(&irq_desc[i].lock, flags); 93 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
94 action = irq_desc[i].action; 94 action = irq_desc[i].action;
95 if (!action) 95 if (!action)
96 goto skip; 96 goto skip;
@@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v)
109 109
110 seq_putc(p, '\n'); 110 seq_putc(p, '\n');
111skip: 111skip:
112 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 112 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
113 } else if (i == NR_IRQS) { 113 } else if (i == NR_IRQS) {
114 seq_printf(p, "NMI: "); 114 seq_printf(p, "NMI: ");
115 for_each_online_cpu(j) 115 for_each_online_cpu(j)
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63a..ecc44a8e2b44 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,19 +15,19 @@
15# define ATOMIC_HASH_SIZE 4 15# define ATOMIC_HASH_SIZE 4
16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17 17
18extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 18extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19 19
20/* Can't use raw_spin_lock_irq because of #include problems, so 20/* Can't use raw_spin_lock_irq because of #include problems, so
21 * this is the substitute */ 21 * this is the substitute */
22#define _atomic_spin_lock_irqsave(l,f) do { \ 22#define _atomic_spin_lock_irqsave(l,f) do { \
23 raw_spinlock_t *s = ATOMIC_HASH(l); \ 23 arch_spinlock_t *s = ATOMIC_HASH(l); \
24 local_irq_save(f); \ 24 local_irq_save(f); \
25 __raw_spin_lock(s); \ 25 arch_spin_lock(s); \
26} while(0) 26} while(0)
27 27
28#define _atomic_spin_unlock_irqrestore(l,f) do { \ 28#define _atomic_spin_unlock_irqrestore(l,f) do { \
29 raw_spinlock_t *s = ATOMIC_HASH(l); \ 29 arch_spinlock_t *s = ATOMIC_HASH(l); \
30 __raw_spin_unlock(s); \ 30 arch_spin_unlock(s); \
31 local_irq_restore(f); \ 31 local_irq_restore(f); \
32} while(0) 32} while(0)
33 33
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index af634e95871d..5d86fb2309d2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -169,7 +169,7 @@ struct hrtimer_clock_base {
169 * @max_hang_time: Maximum time spent in hrtimer_interrupt 169 * @max_hang_time: Maximum time spent in hrtimer_interrupt
170 */ 170 */
171struct hrtimer_cpu_base { 171struct hrtimer_cpu_base {
172 spinlock_t lock; 172 raw_spinlock_t lock;
173 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 173 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
174#ifdef CONFIG_HIGH_RES_TIMERS 174#ifdef CONFIG_HIGH_RES_TIMERS
175 ktime_t expires_next; 175 ktime_t expires_next;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8ed0abf06f89..5ed8b9c50355 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -170,7 +170,7 @@ extern struct cred init_cred;
170 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 170 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
171 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 171 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
172 .fs_excl = ATOMIC_INIT(0), \ 172 .fs_excl = ATOMIC_INIT(0), \
173 .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 173 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
174 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 174 .timer_slack_ns = 50000, /* 50 usec default slack */ \
175 .pids = { \ 175 .pids = { \
176 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ 176 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a6..451481c082b5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
192 unsigned int irq_count; /* For detecting broken IRQs */ 192 unsigned int irq_count; /* For detecting broken IRQs */
193 unsigned long last_unhandled; /* Aging timer for unhandled count */ 193 unsigned long last_unhandled; /* Aging timer for unhandled count */
194 unsigned int irqs_unhandled; 194 unsigned int irqs_unhandled;
195 spinlock_t lock; 195 raw_spinlock_t lock;
196#ifdef CONFIG_SMP 196#ifdef CONFIG_SMP
197 cpumask_var_t affinity; 197 cpumask_var_t affinity;
198 unsigned int node; 198 unsigned int node;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 64a53f74c9a9..da7bdc23f279 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -681,7 +681,7 @@ struct perf_event_context {
681 * Protect the states of the events in the list, 681 * Protect the states of the events in the list,
682 * nr_active, and the list: 682 * nr_active, and the list:
683 */ 683 */
684 spinlock_t lock; 684 raw_spinlock_t lock;
685 /* 685 /*
686 * Protect the list of events. Locking either mutex or lock 686 * Protect the list of events. Locking either mutex or lock
687 * is sufficient to ensure the list doesn't change; to change 687 * is sufficient to ensure the list doesn't change; to change
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6ac..8227f717c70f 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
81 struct list_head prio_list; 81 struct list_head prio_list;
82 struct list_head node_list; 82 struct list_head node_list;
83#ifdef CONFIG_DEBUG_PI_LIST 83#ifdef CONFIG_DEBUG_PI_LIST
84 spinlock_t *lock; 84 raw_spinlock_t *rawlock;
85 spinlock_t *spinlock;
85#endif 86#endif
86}; 87};
87 88
@@ -91,9 +92,11 @@ struct plist_node {
91}; 92};
92 93
93#ifdef CONFIG_DEBUG_PI_LIST 94#ifdef CONFIG_DEBUG_PI_LIST
94# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock 95# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
96# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
95#else 97#else
96# define PLIST_HEAD_LOCK_INIT(_lock) 98# define PLIST_HEAD_LOCK_INIT(_lock)
99# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
97#endif 100#endif
98 101
99#define _PLIST_HEAD_INIT(head) \ 102#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
107 */ 110 */
108#define PLIST_HEAD_INIT(head, _lock) \ 111#define PLIST_HEAD_INIT(head, _lock) \
109{ \ 112{ \
110 _PLIST_HEAD_INIT(head), \ 113 _PLIST_HEAD_INIT(head), \
111 PLIST_HEAD_LOCK_INIT(&(_lock)) \ 114 PLIST_HEAD_LOCK_INIT(&(_lock)) \
112} 115}
113 116
114/** 117/**
118 * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
119 * @head: struct plist_head variable name
120 * @_lock: lock to initialize for this list
121 */
122#define PLIST_HEAD_INIT_RAW(head, _lock) \
123{ \
124 _PLIST_HEAD_INIT(head), \
125 PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
126}
127
128/**
115 * PLIST_NODE_INIT - static struct plist_node initializer 129 * PLIST_NODE_INIT - static struct plist_node initializer
116 * @node: struct plist_node variable name 130 * @node: struct plist_node variable name
117 * @__prio: initial node priority 131 * @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
119#define PLIST_NODE_INIT(node, __prio) \ 133#define PLIST_NODE_INIT(node, __prio) \
120{ \ 134{ \
121 .prio = (__prio), \ 135 .prio = (__prio), \
122 .plist = { _PLIST_HEAD_INIT((node).plist) }, \ 136 .plist = { _PLIST_HEAD_INIT((node).plist) }, \
123} 137}
124 138
125/** 139/**
126 * plist_head_init - dynamic struct plist_head initializer 140 * plist_head_init - dynamic struct plist_head initializer
127 * @head: &struct plist_head pointer 141 * @head: &struct plist_head pointer
128 * @lock: list spinlock, remembered for debugging 142 * @lock: spinlock protecting the list (debugging)
129 */ 143 */
130static inline void 144static inline void
131plist_head_init(struct plist_head *head, spinlock_t *lock) 145plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
133 INIT_LIST_HEAD(&head->prio_list); 147 INIT_LIST_HEAD(&head->prio_list);
134 INIT_LIST_HEAD(&head->node_list); 148 INIT_LIST_HEAD(&head->node_list);
135#ifdef CONFIG_DEBUG_PI_LIST 149#ifdef CONFIG_DEBUG_PI_LIST
136 head->lock = lock; 150 head->spinlock = lock;
151 head->rawlock = NULL;
152#endif
153}
154
155/**
156 * plist_head_init_raw - dynamic struct plist_head initializer
157 * @head: &struct plist_head pointer
158 * @lock: raw_spinlock protecting the list (debugging)
159 */
160static inline void
161plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
162{
163 INIT_LIST_HEAD(&head->prio_list);
164 INIT_LIST_HEAD(&head->node_list);
165#ifdef CONFIG_DEBUG_PI_LIST
166 head->rawlock = lock;
167 head->spinlock = NULL;
137#endif 168#endif
138} 169}
139 170
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d530..281d8fd775e8 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
24 * @owner: the mutex owner 24 * @owner: the mutex owner
25 */ 25 */
26struct rt_mutex { 26struct rt_mutex {
27 spinlock_t wait_lock; 27 raw_spinlock_t wait_lock;
28 struct plist_head wait_list; 28 struct plist_head wait_list;
29 struct task_struct *owner; 29 struct task_struct *owner;
30#ifdef CONFIG_DEBUG_RT_MUTEXES 30#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
63#endif 63#endif
64 64
65#define __RT_MUTEX_INITIALIZER(mutexname) \ 65#define __RT_MUTEX_INITIALIZER(mutexname) \
66 { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ 66 { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
67 , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ 67 , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
68 , .owner = NULL \ 68 , .owner = NULL \
69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} 69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
70 70
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 000000000000..71e0b00b6f2c
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
1#ifndef __LINUX_RWLOCK_H
2#define __LINUX_RWLOCK_H
3
4#ifndef __LINUX_SPINLOCK_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * rwlock related methods
10 *
11 * split out from spinlock.h
12 *
13 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
14 * Released under the General Public License (GPL).
15 */
16
17#ifdef CONFIG_DEBUG_SPINLOCK
18 extern void __rwlock_init(rwlock_t *lock, const char *name,
19 struct lock_class_key *key);
20# define rwlock_init(lock) \
21do { \
22 static struct lock_class_key __key; \
23 \
24 __rwlock_init((lock), #lock, &__key); \
25} while (0)
26#else
27# define rwlock_init(lock) \
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
29#endif
30
31#ifdef CONFIG_DEBUG_SPINLOCK
32 extern void do_raw_read_lock(rwlock_t *lock);
33#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock);
36 extern void do_raw_write_lock(rwlock_t *lock);
37#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
38 extern int do_raw_write_trylock(rwlock_t *lock);
39 extern void do_raw_write_unlock(rwlock_t *lock);
40#else
41# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
42# define do_raw_read_lock_flags(lock, flags) \
43 arch_read_lock_flags(&(lock)->raw_lock, *(flags))
44# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
45# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
46# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
47# define do_raw_write_lock_flags(lock, flags) \
48 arch_write_lock_flags(&(lock)->raw_lock, *(flags))
49# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
50# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
51#endif
52
53#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
54#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
55
56/*
57 * Define the various rw_lock methods. Note we define these
58 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
59 * methods are defined as nops in the case they are not required.
60 */
61#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
62#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
63
64#define write_lock(lock) _raw_write_lock(lock)
65#define read_lock(lock) _raw_read_lock(lock)
66
67#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
68
69#define read_lock_irqsave(lock, flags) \
70 do { \
71 typecheck(unsigned long, flags); \
72 flags = _raw_read_lock_irqsave(lock); \
73 } while (0)
74#define write_lock_irqsave(lock, flags) \
75 do { \
76 typecheck(unsigned long, flags); \
77 flags = _raw_write_lock_irqsave(lock); \
78 } while (0)
79
80#else
81
82#define read_lock_irqsave(lock, flags) \
83 do { \
84 typecheck(unsigned long, flags); \
85 _raw_read_lock_irqsave(lock, flags); \
86 } while (0)
87#define write_lock_irqsave(lock, flags) \
88 do { \
89 typecheck(unsigned long, flags); \
90 _raw_write_lock_irqsave(lock, flags); \
91 } while (0)
92
93#endif
94
95#define read_lock_irq(lock) _raw_read_lock_irq(lock)
96#define read_lock_bh(lock) _raw_read_lock_bh(lock)
97#define write_lock_irq(lock) _raw_write_lock_irq(lock)
98#define write_lock_bh(lock) _raw_write_lock_bh(lock)
99#define read_unlock(lock) _raw_read_unlock(lock)
100#define write_unlock(lock) _raw_write_unlock(lock)
101#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
102#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
103
104#define read_unlock_irqrestore(lock, flags) \
105 do { \
106 typecheck(unsigned long, flags); \
107 _raw_read_unlock_irqrestore(lock, flags); \
108 } while (0)
109#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
110
111#define write_unlock_irqrestore(lock, flags) \
112 do { \
113 typecheck(unsigned long, flags); \
114 _raw_write_unlock_irqrestore(lock, flags); \
115 } while (0)
116#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
117
118#define write_trylock_irqsave(lock, flags) \
119({ \
120 local_irq_save(flags); \
121 write_trylock(lock) ? \
122 1 : ({ local_irq_restore(flags); 0; }); \
123})
124
125#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 000000000000..9c9f0495d37c
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
1#ifndef __LINUX_RWLOCK_API_SMP_H
2#define __LINUX_RWLOCK_API_SMP_H
3
4#ifndef __LINUX_SPINLOCK_API_SMP_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * include/linux/rwlock_api_smp.h
10 *
11 * spinlock API declarations on SMP (and debug)
12 * (implemented in kernel/spinlock.c)
13 *
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
16 */
17
18void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
28int __lockfunc _raw_read_trylock(rwlock_t *lock);
29int __lockfunc _raw_write_trylock(rwlock_t *lock);
30void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
31void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
32void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
33void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
34void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
35void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
36void __lockfunc
37_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
38 __releases(lock);
39void __lockfunc
40_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
41 __releases(lock);
42
43#ifdef CONFIG_INLINE_READ_LOCK
44#define _raw_read_lock(lock) __raw_read_lock(lock)
45#endif
46
47#ifdef CONFIG_INLINE_WRITE_LOCK
48#define _raw_write_lock(lock) __raw_write_lock(lock)
49#endif
50
51#ifdef CONFIG_INLINE_READ_LOCK_BH
52#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
53#endif
54
55#ifdef CONFIG_INLINE_WRITE_LOCK_BH
56#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
57#endif
58
59#ifdef CONFIG_INLINE_READ_LOCK_IRQ
60#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
61#endif
62
63#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
64#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
65#endif
66
67#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
68#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
69#endif
70
71#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
72#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
73#endif
74
75#ifdef CONFIG_INLINE_READ_TRYLOCK
76#define _raw_read_trylock(lock) __raw_read_trylock(lock)
77#endif
78
79#ifdef CONFIG_INLINE_WRITE_TRYLOCK
80#define _raw_write_trylock(lock) __raw_write_trylock(lock)
81#endif
82
83#ifdef CONFIG_INLINE_READ_UNLOCK
84#define _raw_read_unlock(lock) __raw_read_unlock(lock)
85#endif
86
87#ifdef CONFIG_INLINE_WRITE_UNLOCK
88#define _raw_write_unlock(lock) __raw_write_unlock(lock)
89#endif
90
91#ifdef CONFIG_INLINE_READ_UNLOCK_BH
92#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
93#endif
94
95#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
96#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
97#endif
98
99#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
100#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
101#endif
102
103#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
104#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
105#endif
106
107#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
108#define _raw_read_unlock_irqrestore(lock, flags) \
109 __raw_read_unlock_irqrestore(lock, flags)
110#endif
111
112#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
113#define _raw_write_unlock_irqrestore(lock, flags) \
114 __raw_write_unlock_irqrestore(lock, flags)
115#endif
116
117static inline int __raw_read_trylock(rwlock_t *lock)
118{
119 preempt_disable();
120 if (do_raw_read_trylock(lock)) {
121 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
122 return 1;
123 }
124 preempt_enable();
125 return 0;
126}
127
128static inline int __raw_write_trylock(rwlock_t *lock)
129{
130 preempt_disable();
131 if (do_raw_write_trylock(lock)) {
132 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
133 return 1;
134 }
135 preempt_enable();
136 return 0;
137}
138
139/*
140 * If lockdep is enabled then we use the non-preemption spin-ops
141 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
142 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
143 */
144#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
145
146static inline void __raw_read_lock(rwlock_t *lock)
147{
148 preempt_disable();
149 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
150 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
151}
152
153static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
154{
155 unsigned long flags;
156
157 local_irq_save(flags);
158 preempt_disable();
159 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
160 LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
161 do_raw_read_lock_flags, &flags);
162 return flags;
163}
164
165static inline void __raw_read_lock_irq(rwlock_t *lock)
166{
167 local_irq_disable();
168 preempt_disable();
169 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
170 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
171}
172
173static inline void __raw_read_lock_bh(rwlock_t *lock)
174{
175 local_bh_disable();
176 preempt_disable();
177 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
178 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
179}
180
181static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
182{
183 unsigned long flags;
184
185 local_irq_save(flags);
186 preempt_disable();
187 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
188 LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
189 do_raw_write_lock_flags, &flags);
190 return flags;
191}
192
193static inline void __raw_write_lock_irq(rwlock_t *lock)
194{
195 local_irq_disable();
196 preempt_disable();
197 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
198 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
199}
200
201static inline void __raw_write_lock_bh(rwlock_t *lock)
202{
203 local_bh_disable();
204 preempt_disable();
205 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
206 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
207}
208
209static inline void __raw_write_lock(rwlock_t *lock)
210{
211 preempt_disable();
212 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
213 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
214}
215
216#endif /* CONFIG_PREEMPT */
217
218static inline void __raw_write_unlock(rwlock_t *lock)
219{
220 rwlock_release(&lock->dep_map, 1, _RET_IP_);
221 do_raw_write_unlock(lock);
222 preempt_enable();
223}
224
225static inline void __raw_read_unlock(rwlock_t *lock)
226{
227 rwlock_release(&lock->dep_map, 1, _RET_IP_);
228 do_raw_read_unlock(lock);
229 preempt_enable();
230}
231
232static inline void
233__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
234{
235 rwlock_release(&lock->dep_map, 1, _RET_IP_);
236 do_raw_read_unlock(lock);
237 local_irq_restore(flags);
238 preempt_enable();
239}
240
241static inline void __raw_read_unlock_irq(rwlock_t *lock)
242{
243 rwlock_release(&lock->dep_map, 1, _RET_IP_);
244 do_raw_read_unlock(lock);
245 local_irq_enable();
246 preempt_enable();
247}
248
249static inline void __raw_read_unlock_bh(rwlock_t *lock)
250{
251 rwlock_release(&lock->dep_map, 1, _RET_IP_);
252 do_raw_read_unlock(lock);
253 preempt_enable_no_resched();
254 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
255}
256
257static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
258 unsigned long flags)
259{
260 rwlock_release(&lock->dep_map, 1, _RET_IP_);
261 do_raw_write_unlock(lock);
262 local_irq_restore(flags);
263 preempt_enable();
264}
265
266static inline void __raw_write_unlock_irq(rwlock_t *lock)
267{
268 rwlock_release(&lock->dep_map, 1, _RET_IP_);
269 do_raw_write_unlock(lock);
270 local_irq_enable();
271 preempt_enable();
272}
273
274static inline void __raw_write_unlock_bh(rwlock_t *lock)
275{
276 rwlock_release(&lock->dep_map, 1, _RET_IP_);
277 do_raw_write_unlock(lock);
278 preempt_enable_no_resched();
279 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
280}
281
282#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 000000000000..bd31808c7d8e
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
1#ifndef __LINUX_RWLOCK_TYPES_H
2#define __LINUX_RWLOCK_TYPES_H
3
4/*
5 * include/linux/rwlock_types.h - generic rwlock type definitions
6 * and initializers
7 *
8 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
9 * Released under the General Public License (GPL).
10 */
11typedef struct {
12 arch_rwlock_t raw_lock;
13#ifdef CONFIG_GENERIC_LOCKBREAK
14 unsigned int break_lock;
15#endif
16#ifdef CONFIG_DEBUG_SPINLOCK
17 unsigned int magic, owner_cpu;
18 void *owner;
19#endif
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 struct lockdep_map dep_map;
22#endif
23} rwlock_t;
24
25#define RWLOCK_MAGIC 0xdeaf1eed
26
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
29#else
30# define RW_DEP_MAP_INIT(lockname)
31#endif
32
33#ifdef CONFIG_DEBUG_SPINLOCK
34#define __RW_LOCK_UNLOCKED(lockname) \
35 (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
36 .magic = RWLOCK_MAGIC, \
37 .owner = SPINLOCK_OWNER_INIT, \
38 .owner_cpu = -1, \
39 RW_DEP_MAP_INIT(lockname) }
40#else
41#define __RW_LOCK_UNLOCKED(lockname) \
42 (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
43 RW_DEP_MAP_INIT(lockname) }
44#endif
45
46/*
47 * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
48 * deprecated.
49 *
50 * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
51 */
52#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
53
54#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
55
56#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7d388494f45d..5c858f38e81a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1409,7 +1409,7 @@ struct task_struct {
1409#endif 1409#endif
1410 1410
1411 /* Protection of the PI data structures: */ 1411 /* Protection of the PI data structures: */
1412 spinlock_t pi_lock; 1412 raw_spinlock_t pi_lock;
1413 1413
1414#ifdef CONFIG_RT_MUTEXES 1414#ifdef CONFIG_RT_MUTEXES
1415 /* PI waiters blocked on a rt_mutex held by this task */ 1415 /* PI waiters blocked on a rt_mutex held by this task */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d88..86088213334a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
8 * 8 *
9 * on SMP builds: 9 * on SMP builds:
10 * 10 *
11 * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the 11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers 12 * initializers
13 * 13 *
14 * linux/spinlock_types.h: 14 * linux/spinlock_types.h:
15 * defines the generic type and initializers 15 * defines the generic type and initializers
16 * 16 *
17 * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code 18 * implementations, mostly inline assembly code
19 * 19 *
20 * (also included on UP-debug builds:) 20 * (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
34 * defines the generic type and initializers 34 * defines the generic type and initializers
35 * 35 *
36 * linux/spinlock_up.h: 36 * linux/spinlock_up.h:
37 * contains the __raw_spin_*()/etc. version of UP 37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt 38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds) 39 * builds)
40 * 40 *
@@ -75,12 +75,12 @@
75#define __lockfunc __attribute__((section(".spinlock.text"))) 75#define __lockfunc __attribute__((section(".spinlock.text")))
76 76
77/* 77/*
78 * Pull the raw_spinlock_t and raw_rwlock_t definitions: 78 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82/* 82/*
83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
84 */ 84 */
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86# include <asm/spinlock.h> 86# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
89#endif 89#endif
90 90
91#ifdef CONFIG_DEBUG_SPINLOCK 91#ifdef CONFIG_DEBUG_SPINLOCK
92 extern void __spin_lock_init(spinlock_t *lock, const char *name, 92 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
93 struct lock_class_key *key); 93 struct lock_class_key *key);
94# define spin_lock_init(lock) \ 94# define raw_spin_lock_init(lock) \
95do { \ 95do { \
96 static struct lock_class_key __key; \ 96 static struct lock_class_key __key; \
97 \ 97 \
98 __spin_lock_init((lock), #lock, &__key); \ 98 __raw_spin_lock_init((lock), #lock, &__key); \
99} while (0) 99} while (0)
100 100
101#else 101#else
102# define spin_lock_init(lock) \ 102# define raw_spin_lock_init(lock) \
103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) 103 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
104#endif 104#endif
105 105
106#ifdef CONFIG_DEBUG_SPINLOCK 106#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
107 extern void __rwlock_init(rwlock_t *lock, const char *name,
108 struct lock_class_key *key);
109# define rwlock_init(lock) \
110do { \
111 static struct lock_class_key __key; \
112 \
113 __rwlock_init((lock), #lock, &__key); \
114} while (0)
115#else
116# define rwlock_init(lock) \
117 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
118#endif
119
120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
121 107
122#ifdef CONFIG_GENERIC_LOCKBREAK 108#ifdef CONFIG_GENERIC_LOCKBREAK
123#define spin_is_contended(lock) ((lock)->break_lock) 109#define raw_spin_is_contended(lock) ((lock)->break_lock)
124#else 110#else
125 111
126#ifdef __raw_spin_is_contended 112#ifdef arch_spin_is_contended
127#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 113#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
128#else 114#else
129#define spin_is_contended(lock) (((void)(lock), 0)) 115#define raw_spin_is_contended(lock) (((void)(lock), 0))
130#endif /*__raw_spin_is_contended*/ 116#endif /*arch_spin_is_contended*/
131#endif 117#endif
132 118
133/* The lock does not imply full memory barrier. */ 119/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
136#endif 122#endif
137 123
138/** 124/**
139 * spin_unlock_wait - wait until the spinlock gets unlocked 125 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
140 * @lock: the spinlock in question. 126 * @lock: the spinlock in question.
141 */ 127 */
142#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
143 129
144#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
145 extern void _raw_spin_lock(spinlock_t *lock); 131 extern void do_raw_spin_lock(raw_spinlock_t *lock);
146#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
147 extern int _raw_spin_trylock(spinlock_t *lock); 133 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
148 extern void _raw_spin_unlock(spinlock_t *lock); 134 extern void do_raw_spin_unlock(raw_spinlock_t *lock);
149 extern void _raw_read_lock(rwlock_t *lock);
150#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
151 extern int _raw_read_trylock(rwlock_t *lock);
152 extern void _raw_read_unlock(rwlock_t *lock);
153 extern void _raw_write_lock(rwlock_t *lock);
154#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
155 extern int _raw_write_trylock(rwlock_t *lock);
156 extern void _raw_write_unlock(rwlock_t *lock);
157#else 135#else
158# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 136static inline void do_raw_spin_lock(raw_spinlock_t *lock)
159# define _raw_spin_lock_flags(lock, flags) \ 137{
160 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 138 arch_spin_lock(&lock->raw_lock);
161# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 139}
162# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 140
163# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 141static inline void
164# define _raw_read_lock_flags(lock, flags) \ 142do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
165 __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) 143{
166# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 144 arch_spin_lock_flags(&lock->raw_lock, *flags);
167# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 145}
168# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 146
169# define _raw_write_lock_flags(lock, flags) \ 147static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
170 __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) 148{
171# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 149 return arch_spin_trylock(&(lock)->raw_lock);
172# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 150}
151
152static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
153{
154 arch_spin_unlock(&lock->raw_lock);
155}
173#endif 156#endif
174 157
175#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
176#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
177
178/* 158/*
179 * Define the various spin_lock and rw_lock methods. Note we define these 159 * Define the various spin_lock methods. Note we define these
180 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various 160 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
181 * methods are defined as nops in the case they are not required. 161 * various methods are defined as nops in the case they are not
162 * required.
182 */ 163 */
183#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) 164#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
184#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
185#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
186 165
187#define spin_lock(lock) _spin_lock(lock) 166#define raw_spin_lock(lock) _raw_spin_lock(lock)
188 167
189#ifdef CONFIG_DEBUG_LOCK_ALLOC 168#ifdef CONFIG_DEBUG_LOCK_ALLOC
190# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) 169# define raw_spin_lock_nested(lock, subclass) \
191# define spin_lock_nest_lock(lock, nest_lock) \ 170 _raw_spin_lock_nested(lock, subclass)
171
172# define raw_spin_lock_nest_lock(lock, nest_lock) \
192 do { \ 173 do { \
193 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 174 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
194 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 175 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
195 } while (0) 176 } while (0)
196#else 177#else
197# define spin_lock_nested(lock, subclass) _spin_lock(lock) 178# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
198# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) 179# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
199#endif 180#endif
200 181
201#define write_lock(lock) _write_lock(lock)
202#define read_lock(lock) _read_lock(lock)
203
204#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 182#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
205 183
206#define spin_lock_irqsave(lock, flags) \ 184#define raw_spin_lock_irqsave(lock, flags) \
207 do { \ 185 do { \
208 typecheck(unsigned long, flags); \ 186 typecheck(unsigned long, flags); \
209 flags = _spin_lock_irqsave(lock); \ 187 flags = _raw_spin_lock_irqsave(lock); \
210 } while (0)
211#define read_lock_irqsave(lock, flags) \
212 do { \
213 typecheck(unsigned long, flags); \
214 flags = _read_lock_irqsave(lock); \
215 } while (0)
216#define write_lock_irqsave(lock, flags) \
217 do { \
218 typecheck(unsigned long, flags); \
219 flags = _write_lock_irqsave(lock); \
220 } while (0) 188 } while (0)
221 189
222#ifdef CONFIG_DEBUG_LOCK_ALLOC 190#ifdef CONFIG_DEBUG_LOCK_ALLOC
223#define spin_lock_irqsave_nested(lock, flags, subclass) \ 191#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
224 do { \ 192 do { \
225 typecheck(unsigned long, flags); \ 193 typecheck(unsigned long, flags); \
226 flags = _spin_lock_irqsave_nested(lock, subclass); \ 194 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
227 } while (0) 195 } while (0)
228#else 196#else
229#define spin_lock_irqsave_nested(lock, flags, subclass) \ 197#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
230 do { \ 198 do { \
231 typecheck(unsigned long, flags); \ 199 typecheck(unsigned long, flags); \
232 flags = _spin_lock_irqsave(lock); \ 200 flags = _raw_spin_lock_irqsave(lock); \
233 } while (0) 201 } while (0)
234#endif 202#endif
235 203
236#else 204#else
237 205
238#define spin_lock_irqsave(lock, flags) \ 206#define raw_spin_lock_irqsave(lock, flags) \
239 do { \
240 typecheck(unsigned long, flags); \
241 _spin_lock_irqsave(lock, flags); \
242 } while (0)
243#define read_lock_irqsave(lock, flags) \
244 do { \
245 typecheck(unsigned long, flags); \
246 _read_lock_irqsave(lock, flags); \
247 } while (0)
248#define write_lock_irqsave(lock, flags) \
249 do { \ 207 do { \
250 typecheck(unsigned long, flags); \ 208 typecheck(unsigned long, flags); \
251 _write_lock_irqsave(lock, flags); \ 209 _raw_spin_lock_irqsave(lock, flags); \
252 } while (0) 210 } while (0)
253#define spin_lock_irqsave_nested(lock, flags, subclass) \
254 spin_lock_irqsave(lock, flags)
255 211
256#endif 212#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
213 raw_spin_lock_irqsave(lock, flags)
257 214
258#define spin_lock_irq(lock) _spin_lock_irq(lock) 215#endif
259#define spin_lock_bh(lock) _spin_lock_bh(lock)
260#define read_lock_irq(lock) _read_lock_irq(lock)
261#define read_lock_bh(lock) _read_lock_bh(lock)
262#define write_lock_irq(lock) _write_lock_irq(lock)
263#define write_lock_bh(lock) _write_lock_bh(lock)
264#define spin_unlock(lock) _spin_unlock(lock)
265#define read_unlock(lock) _read_unlock(lock)
266#define write_unlock(lock) _write_unlock(lock)
267#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
268#define read_unlock_irq(lock) _read_unlock_irq(lock)
269#define write_unlock_irq(lock) _write_unlock_irq(lock)
270
271#define spin_unlock_irqrestore(lock, flags) \
272 do { \
273 typecheck(unsigned long, flags); \
274 _spin_unlock_irqrestore(lock, flags); \
275 } while (0)
276#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
277 216
278#define read_unlock_irqrestore(lock, flags) \ 217#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
279 do { \ 218#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
280 typecheck(unsigned long, flags); \ 219#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
281 _read_unlock_irqrestore(lock, flags); \ 220#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
282 } while (0)
283#define read_unlock_bh(lock) _read_unlock_bh(lock)
284 221
285#define write_unlock_irqrestore(lock, flags) \ 222#define raw_spin_unlock_irqrestore(lock, flags) \
286 do { \ 223 do { \
287 typecheck(unsigned long, flags); \ 224 typecheck(unsigned long, flags); \
288 _write_unlock_irqrestore(lock, flags); \ 225 _raw_spin_unlock_irqrestore(lock, flags); \
289 } while (0) 226 } while (0)
290#define write_unlock_bh(lock) _write_unlock_bh(lock) 227#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
291 228
292#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) 229#define raw_spin_trylock_bh(lock) \
230 __cond_lock(lock, _raw_spin_trylock_bh(lock))
293 231
294#define spin_trylock_irq(lock) \ 232#define raw_spin_trylock_irq(lock) \
295({ \ 233({ \
296 local_irq_disable(); \ 234 local_irq_disable(); \
297 spin_trylock(lock) ? \ 235 raw_spin_trylock(lock) ? \
298 1 : ({ local_irq_enable(); 0; }); \ 236 1 : ({ local_irq_enable(); 0; }); \
299}) 237})
300 238
301#define spin_trylock_irqsave(lock, flags) \ 239#define raw_spin_trylock_irqsave(lock, flags) \
302({ \ 240({ \
303 local_irq_save(flags); \ 241 local_irq_save(flags); \
304 spin_trylock(lock) ? \ 242 raw_spin_trylock(lock) ? \
305 1 : ({ local_irq_restore(flags); 0; }); \ 243 1 : ({ local_irq_restore(flags); 0; }); \
306}) 244})
307 245
308#define write_trylock_irqsave(lock, flags) \ 246/**
309({ \ 247 * raw_spin_can_lock - would raw_spin_trylock() succeed?
310 local_irq_save(flags); \ 248 * @lock: the spinlock in question.
311 write_trylock(lock) ? \ 249 */
312 1 : ({ local_irq_restore(flags); 0; }); \ 250#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
251
252/* Include rwlock functions */
253#include <linux/rwlock.h>
254
255/*
256 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
257 */
258#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
259# include <linux/spinlock_api_smp.h>
260#else
261# include <linux/spinlock_api_up.h>
262#endif
263
264/*
265 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
266 */
267
268static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
269{
270 return &lock->rlock;
271}
272
273#define spin_lock_init(_lock) \
274do { \
275 spinlock_check(_lock); \
276 raw_spin_lock_init(&(_lock)->rlock); \
277} while (0)
278
279static inline void spin_lock(spinlock_t *lock)
280{
281 raw_spin_lock(&lock->rlock);
282}
283
284static inline void spin_lock_bh(spinlock_t *lock)
285{
286 raw_spin_lock_bh(&lock->rlock);
287}
288
289static inline int spin_trylock(spinlock_t *lock)
290{
291 return raw_spin_trylock(&lock->rlock);
292}
293
294#define spin_lock_nested(lock, subclass) \
295do { \
296 raw_spin_lock_nested(spinlock_check(lock), subclass); \
297} while (0)
298
299#define spin_lock_nest_lock(lock, nest_lock) \
300do { \
301 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
302} while (0)
303
304static inline void spin_lock_irq(spinlock_t *lock)
305{
306 raw_spin_lock_irq(&lock->rlock);
307}
308
309#define spin_lock_irqsave(lock, flags) \
310do { \
311 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
312} while (0)
313
314#define spin_lock_irqsave_nested(lock, flags, subclass) \
315do { \
316 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
317} while (0)
318
319static inline void spin_unlock(spinlock_t *lock)
320{
321 raw_spin_unlock(&lock->rlock);
322}
323
324static inline void spin_unlock_bh(spinlock_t *lock)
325{
326 raw_spin_unlock_bh(&lock->rlock);
327}
328
329static inline void spin_unlock_irq(spinlock_t *lock)
330{
331 raw_spin_unlock_irq(&lock->rlock);
332}
333
334static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
335{
336 raw_spin_unlock_irqrestore(&lock->rlock, flags);
337}
338
339static inline int spin_trylock_bh(spinlock_t *lock)
340{
341 return raw_spin_trylock_bh(&lock->rlock);
342}
343
344static inline int spin_trylock_irq(spinlock_t *lock)
345{
346 return raw_spin_trylock_irq(&lock->rlock);
347}
348
349#define spin_trylock_irqsave(lock, flags) \
350({ \
351 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
313}) 352})
314 353
354static inline void spin_unlock_wait(spinlock_t *lock)
355{
356 raw_spin_unlock_wait(&lock->rlock);
357}
358
359static inline int spin_is_locked(spinlock_t *lock)
360{
361 return raw_spin_is_locked(&lock->rlock);
362}
363
364static inline int spin_is_contended(spinlock_t *lock)
365{
366 return raw_spin_is_contended(&lock->rlock);
367}
368
369static inline int spin_can_lock(spinlock_t *lock)
370{
371 return raw_spin_can_lock(&lock->rlock);
372}
373
374static inline void assert_spin_locked(spinlock_t *lock)
375{
376 assert_raw_spin_locked(&lock->rlock);
377}
378
315/* 379/*
316 * Pull the atomic_t declaration: 380 * Pull the atomic_t declaration:
317 * (asm-mips/atomic.h needs above definitions) 381 * (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
329#define atomic_dec_and_lock(atomic, lock) \ 393#define atomic_dec_and_lock(atomic, lock) \
330 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 394 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
331 395
332/**
333 * spin_can_lock - would spin_trylock() succeed?
334 * @lock: the spinlock in question.
335 */
336#define spin_can_lock(lock) (!spin_is_locked(lock))
337
338/*
339 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
340 */
341#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
342# include <linux/spinlock_api_smp.h>
343#else
344# include <linux/spinlock_api_up.h>
345#endif
346
347#endif /* __LINUX_SPINLOCK_H */ 396#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459bc..e253ccd7a604 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
17 17
18int in_lock_functions(unsigned long addr); 18int in_lock_functions(unsigned long addr);
19 19
20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 20#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
21 21
22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); 22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); 24 __acquires(lock);
25void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) 25void __lockfunc
26 __acquires(lock); 26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); 27 __acquires(lock);
28void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); 28void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); 29void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); 30 __acquires(lock);
31void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); 31
32void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); 32unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); 33 __acquires(lock);
34void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); 34unsigned long __lockfunc
35unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 35_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
36 __acquires(lock); 36 __acquires(lock);
37unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 37int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
38 __acquires(lock); 38int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
39unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 39void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
40 __acquires(lock); 40void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
41unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 41void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
42 __acquires(lock); 42void __lockfunc
43int __lockfunc _spin_trylock(spinlock_t *lock); 43_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
44int __lockfunc _read_trylock(rwlock_t *lock); 44 __releases(lock);
45int __lockfunc _write_trylock(rwlock_t *lock);
46int __lockfunc _spin_trylock_bh(spinlock_t *lock);
47void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
48void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
49void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
50void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
51void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
52void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
53void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
54void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
55void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
56void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
57 __releases(lock);
58void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
59 __releases(lock);
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock);
62 45
63#ifdef CONFIG_INLINE_SPIN_LOCK 46#ifdef CONFIG_INLINE_SPIN_LOCK
64#define _spin_lock(lock) __spin_lock(lock) 47#define _raw_spin_lock(lock) __raw_spin_lock(lock)
65#endif
66
67#ifdef CONFIG_INLINE_READ_LOCK
68#define _read_lock(lock) __read_lock(lock)
69#endif
70
71#ifdef CONFIG_INLINE_WRITE_LOCK
72#define _write_lock(lock) __write_lock(lock)
73#endif 48#endif
74 49
75#ifdef CONFIG_INLINE_SPIN_LOCK_BH 50#ifdef CONFIG_INLINE_SPIN_LOCK_BH
76#define _spin_lock_bh(lock) __spin_lock_bh(lock) 51#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
77#endif
78
79#ifdef CONFIG_INLINE_READ_LOCK_BH
80#define _read_lock_bh(lock) __read_lock_bh(lock)
81#endif
82
83#ifdef CONFIG_INLINE_WRITE_LOCK_BH
84#define _write_lock_bh(lock) __write_lock_bh(lock)
85#endif 52#endif
86 53
87#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ 54#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
88#define _spin_lock_irq(lock) __spin_lock_irq(lock) 55#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
89#endif
90
91#ifdef CONFIG_INLINE_READ_LOCK_IRQ
92#define _read_lock_irq(lock) __read_lock_irq(lock)
93#endif
94
95#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
96#define _write_lock_irq(lock) __write_lock_irq(lock)
97#endif 56#endif
98 57
99#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 58#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
100#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 59#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
101#endif
102
103#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
104#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
105#endif
106
107#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
108#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
109#endif 60#endif
110 61
111#ifdef CONFIG_INLINE_SPIN_TRYLOCK 62#ifdef CONFIG_INLINE_SPIN_TRYLOCK
112#define _spin_trylock(lock) __spin_trylock(lock) 63#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
113#endif
114
115#ifdef CONFIG_INLINE_READ_TRYLOCK
116#define _read_trylock(lock) __read_trylock(lock)
117#endif
118
119#ifdef CONFIG_INLINE_WRITE_TRYLOCK
120#define _write_trylock(lock) __write_trylock(lock)
121#endif 64#endif
122 65
123#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH 66#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
124#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 67#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
125#endif 68#endif
126 69
127#ifdef CONFIG_INLINE_SPIN_UNLOCK 70#ifdef CONFIG_INLINE_SPIN_UNLOCK
128#define _spin_unlock(lock) __spin_unlock(lock) 71#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
129#endif
130
131#ifdef CONFIG_INLINE_READ_UNLOCK
132#define _read_unlock(lock) __read_unlock(lock)
133#endif
134
135#ifdef CONFIG_INLINE_WRITE_UNLOCK
136#define _write_unlock(lock) __write_unlock(lock)
137#endif 72#endif
138 73
139#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH 74#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
140#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 75#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
141#endif
142
143#ifdef CONFIG_INLINE_READ_UNLOCK_BH
144#define _read_unlock_bh(lock) __read_unlock_bh(lock)
145#endif
146
147#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
148#define _write_unlock_bh(lock) __write_unlock_bh(lock)
149#endif 76#endif
150 77
151#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ 78#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
152#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 79#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
153#endif
154
155#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
156#define _read_unlock_irq(lock) __read_unlock_irq(lock)
157#endif
158
159#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
160#define _write_unlock_irq(lock) __write_unlock_irq(lock)
161#endif 80#endif
162 81
163#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 82#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
164#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 83#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
165#endif
166
167#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
168#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
169#endif
170
171#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
172#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
173#endif 84#endif
174 85
175static inline int __spin_trylock(spinlock_t *lock) 86static inline int __raw_spin_trylock(raw_spinlock_t *lock)
176{ 87{
177 preempt_disable(); 88 preempt_disable();
178 if (_raw_spin_trylock(lock)) { 89 if (do_raw_spin_trylock(lock)) {
179 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 90 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
180 return 1; 91 return 1;
181 } 92 }
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
183 return 0; 94 return 0;
184} 95}
185 96
186static inline int __read_trylock(rwlock_t *lock)
187{
188 preempt_disable();
189 if (_raw_read_trylock(lock)) {
190 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
191 return 1;
192 }
193 preempt_enable();
194 return 0;
195}
196
197static inline int __write_trylock(rwlock_t *lock)
198{
199 preempt_disable();
200 if (_raw_write_trylock(lock)) {
201 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
202 return 1;
203 }
204 preempt_enable();
205 return 0;
206}
207
208/* 97/*
209 * If lockdep is enabled then we use the non-preemption spin-ops 98 * If lockdep is enabled then we use the non-preemption spin-ops
210 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 99 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
212 */ 101 */
213#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 102#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
214 103
215static inline void __read_lock(rwlock_t *lock) 104static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
216{
217 preempt_disable();
218 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
219 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
220}
221
222static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
223{ 105{
224 unsigned long flags; 106 unsigned long flags;
225 107
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
228 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 110 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
229 /* 111 /*
230 * On lockdep we dont want the hand-coded irq-enable of 112 * On lockdep we dont want the hand-coded irq-enable of
231 * _raw_spin_lock_flags() code, because lockdep assumes 113 * do_raw_spin_lock_flags() code, because lockdep assumes
232 * that interrupts are not re-enabled during lock-acquire: 114 * that interrupts are not re-enabled during lock-acquire:
233 */ 115 */
234#ifdef CONFIG_LOCKDEP 116#ifdef CONFIG_LOCKDEP
235 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 117 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
236#else 118#else
237 _raw_spin_lock_flags(lock, &flags); 119 do_raw_spin_lock_flags(lock, &flags);
238#endif 120#endif
239 return flags; 121 return flags;
240} 122}
241 123
242static inline void __spin_lock_irq(spinlock_t *lock) 124static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
243{ 125{
244 local_irq_disable(); 126 local_irq_disable();
245 preempt_disable(); 127 preempt_disable();
246 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 128 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
247 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 129 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
248} 130}
249 131
250static inline void __spin_lock_bh(spinlock_t *lock) 132static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
251{ 133{
252 local_bh_disable(); 134 local_bh_disable();
253 preempt_disable(); 135 preempt_disable();
254 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 136 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
255 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 137 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
256}
257
258static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
259{
260 unsigned long flags;
261
262 local_irq_save(flags);
263 preempt_disable();
264 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
265 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
266 _raw_read_lock_flags, &flags);
267 return flags;
268}
269
270static inline void __read_lock_irq(rwlock_t *lock)
271{
272 local_irq_disable();
273 preempt_disable();
274 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
275 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
276}
277
278static inline void __read_lock_bh(rwlock_t *lock)
279{
280 local_bh_disable();
281 preempt_disable();
282 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
283 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
284}
285
286static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
287{
288 unsigned long flags;
289
290 local_irq_save(flags);
291 preempt_disable();
292 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
293 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
294 _raw_write_lock_flags, &flags);
295 return flags;
296}
297
298static inline void __write_lock_irq(rwlock_t *lock)
299{
300 local_irq_disable();
301 preempt_disable();
302 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
303 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
304} 138}
305 139
306static inline void __write_lock_bh(rwlock_t *lock) 140static inline void __raw_spin_lock(raw_spinlock_t *lock)
307{
308 local_bh_disable();
309 preempt_disable();
310 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
311 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
312}
313
314static inline void __spin_lock(spinlock_t *lock)
315{ 141{
316 preempt_disable(); 142 preempt_disable();
317 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 143 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
318 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 144 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
319}
320
321static inline void __write_lock(rwlock_t *lock)
322{
323 preempt_disable();
324 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
325 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
326} 145}
327 146
328#endif /* CONFIG_PREEMPT */ 147#endif /* CONFIG_PREEMPT */
329 148
330static inline void __spin_unlock(spinlock_t *lock) 149static inline void __raw_spin_unlock(raw_spinlock_t *lock)
331{ 150{
332 spin_release(&lock->dep_map, 1, _RET_IP_); 151 spin_release(&lock->dep_map, 1, _RET_IP_);
333 _raw_spin_unlock(lock); 152 do_raw_spin_unlock(lock);
334 preempt_enable();
335}
336
337static inline void __write_unlock(rwlock_t *lock)
338{
339 rwlock_release(&lock->dep_map, 1, _RET_IP_);
340 _raw_write_unlock(lock);
341 preempt_enable();
342}
343
344static inline void __read_unlock(rwlock_t *lock)
345{
346 rwlock_release(&lock->dep_map, 1, _RET_IP_);
347 _raw_read_unlock(lock);
348 preempt_enable(); 153 preempt_enable();
349} 154}
350 155
351static inline void __spin_unlock_irqrestore(spinlock_t *lock, 156static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
352 unsigned long flags) 157 unsigned long flags)
353{ 158{
354 spin_release(&lock->dep_map, 1, _RET_IP_); 159 spin_release(&lock->dep_map, 1, _RET_IP_);
355 _raw_spin_unlock(lock); 160 do_raw_spin_unlock(lock);
356 local_irq_restore(flags); 161 local_irq_restore(flags);
357 preempt_enable(); 162 preempt_enable();
358} 163}
359 164
360static inline void __spin_unlock_irq(spinlock_t *lock) 165static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
361{ 166{
362 spin_release(&lock->dep_map, 1, _RET_IP_); 167 spin_release(&lock->dep_map, 1, _RET_IP_);
363 _raw_spin_unlock(lock); 168 do_raw_spin_unlock(lock);
364 local_irq_enable(); 169 local_irq_enable();
365 preempt_enable(); 170 preempt_enable();
366} 171}
367 172
368static inline void __spin_unlock_bh(spinlock_t *lock) 173static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
369{ 174{
370 spin_release(&lock->dep_map, 1, _RET_IP_); 175 spin_release(&lock->dep_map, 1, _RET_IP_);
371 _raw_spin_unlock(lock); 176 do_raw_spin_unlock(lock);
372 preempt_enable_no_resched();
373 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
374}
375
376static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
377{
378 rwlock_release(&lock->dep_map, 1, _RET_IP_);
379 _raw_read_unlock(lock);
380 local_irq_restore(flags);
381 preempt_enable();
382}
383
384static inline void __read_unlock_irq(rwlock_t *lock)
385{
386 rwlock_release(&lock->dep_map, 1, _RET_IP_);
387 _raw_read_unlock(lock);
388 local_irq_enable();
389 preempt_enable();
390}
391
392static inline void __read_unlock_bh(rwlock_t *lock)
393{
394 rwlock_release(&lock->dep_map, 1, _RET_IP_);
395 _raw_read_unlock(lock);
396 preempt_enable_no_resched(); 177 preempt_enable_no_resched();
397 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 178 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
398} 179}
399 180
400static inline void __write_unlock_irqrestore(rwlock_t *lock, 181static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
401 unsigned long flags)
402{
403 rwlock_release(&lock->dep_map, 1, _RET_IP_);
404 _raw_write_unlock(lock);
405 local_irq_restore(flags);
406 preempt_enable();
407}
408
409static inline void __write_unlock_irq(rwlock_t *lock)
410{
411 rwlock_release(&lock->dep_map, 1, _RET_IP_);
412 _raw_write_unlock(lock);
413 local_irq_enable();
414 preempt_enable();
415}
416
417static inline void __write_unlock_bh(rwlock_t *lock)
418{
419 rwlock_release(&lock->dep_map, 1, _RET_IP_);
420 _raw_write_unlock(lock);
421 preempt_enable_no_resched();
422 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
423}
424
425static inline int __spin_trylock_bh(spinlock_t *lock)
426{ 182{
427 local_bh_disable(); 183 local_bh_disable();
428 preempt_disable(); 184 preempt_disable();
429 if (_raw_spin_trylock(lock)) { 185 if (do_raw_spin_trylock(lock)) {
430 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 186 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
431 return 1; 187 return 1;
432 } 188 }
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
435 return 0; 191 return 0;
436} 192}
437 193
194#include <linux/rwlock_api_smp.h>
195
438#endif /* __LINUX_SPINLOCK_API_SMP_H */ 196#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d3164576..af1f47229e70 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
16 16
17#define in_lock_functions(ADDR) 0 17#define in_lock_functions(ADDR) 0
18 18
19#define assert_spin_locked(lock) do { (void)(lock); } while (0) 19#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
20 20
21/* 21/*
22 * In the UP-nondebug case there's no real locking going on, so the 22 * In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
40 do { preempt_enable(); __release(lock); (void)(lock); } while (0) 40 do { preempt_enable(); __release(lock); (void)(lock); } while (0)
41 41
42#define __UNLOCK_BH(lock) \ 42#define __UNLOCK_BH(lock) \
43 do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) 43 do { preempt_enable_no_resched(); local_bh_enable(); \
44 __release(lock); (void)(lock); } while (0)
44 45
45#define __UNLOCK_IRQ(lock) \ 46#define __UNLOCK_IRQ(lock) \
46 do { local_irq_enable(); __UNLOCK(lock); } while (0) 47 do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
48#define __UNLOCK_IRQRESTORE(lock, flags) \ 49#define __UNLOCK_IRQRESTORE(lock, flags) \
49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 50 do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
50 51
51#define _spin_lock(lock) __LOCK(lock) 52#define _raw_spin_lock(lock) __LOCK(lock)
52#define _spin_lock_nested(lock, subclass) __LOCK(lock) 53#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
53#define _read_lock(lock) __LOCK(lock) 54#define _raw_read_lock(lock) __LOCK(lock)
54#define _write_lock(lock) __LOCK(lock) 55#define _raw_write_lock(lock) __LOCK(lock)
55#define _spin_lock_bh(lock) __LOCK_BH(lock) 56#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
56#define _read_lock_bh(lock) __LOCK_BH(lock) 57#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
57#define _write_lock_bh(lock) __LOCK_BH(lock) 58#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
58#define _spin_lock_irq(lock) __LOCK_IRQ(lock) 59#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
59#define _read_lock_irq(lock) __LOCK_IRQ(lock) 60#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
60#define _write_lock_irq(lock) __LOCK_IRQ(lock) 61#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
61#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 62#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
62#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 63#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
63#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 64#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
64#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) 65#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
65#define _read_trylock(lock) ({ __LOCK(lock); 1; }) 66#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
66#define _write_trylock(lock) ({ __LOCK(lock); 1; }) 67#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
67#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) 68#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
68#define _spin_unlock(lock) __UNLOCK(lock) 69#define _raw_spin_unlock(lock) __UNLOCK(lock)
69#define _read_unlock(lock) __UNLOCK(lock) 70#define _raw_read_unlock(lock) __UNLOCK(lock)
70#define _write_unlock(lock) __UNLOCK(lock) 71#define _raw_write_unlock(lock) __UNLOCK(lock)
71#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) 72#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
72#define _write_unlock_bh(lock) __UNLOCK_BH(lock) 73#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
73#define _read_unlock_bh(lock) __UNLOCK_BH(lock) 74#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
74#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) 75#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
75#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) 76#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
76#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) 77#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
77#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 78#define _raw_spin_unlock_irqrestore(lock, flags) \
78#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 79 __UNLOCK_IRQRESTORE(lock, flags)
79#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 80#define _raw_read_unlock_irqrestore(lock, flags) \
81 __UNLOCK_IRQRESTORE(lock, flags)
82#define _raw_write_unlock_irqrestore(lock, flags) \
83 __UNLOCK_IRQRESTORE(lock, flags)
80 84
81#endif /* __LINUX_SPINLOCK_API_UP_H */ 85#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a2..851b7783720d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
17 17
18#include <linux/lockdep.h> 18#include <linux/lockdep.h>
19 19
20typedef struct { 20typedef struct raw_spinlock {
21 raw_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK 22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock; 23 unsigned int break_lock;
24#endif 24#endif
@@ -29,26 +29,10 @@ typedef struct {
29#ifdef CONFIG_DEBUG_LOCK_ALLOC 29#ifdef CONFIG_DEBUG_LOCK_ALLOC
30 struct lockdep_map dep_map; 30 struct lockdep_map dep_map;
31#endif 31#endif
32} spinlock_t; 32} raw_spinlock_t;
33 33
34#define SPINLOCK_MAGIC 0xdead4ead 34#define SPINLOCK_MAGIC 0xdead4ead
35 35
36typedef struct {
37 raw_rwlock_t raw_lock;
38#ifdef CONFIG_GENERIC_LOCKBREAK
39 unsigned int break_lock;
40#endif
41#ifdef CONFIG_DEBUG_SPINLOCK
42 unsigned int magic, owner_cpu;
43 void *owner;
44#endif
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46 struct lockdep_map dep_map;
47#endif
48} rwlock_t;
49
50#define RWLOCK_MAGIC 0xdeaf1eed
51
52#define SPINLOCK_OWNER_INIT ((void *)-1L) 36#define SPINLOCK_OWNER_INIT ((void *)-1L)
53 37
54#ifdef CONFIG_DEBUG_LOCK_ALLOC 38#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
57# define SPIN_DEP_MAP_INIT(lockname) 41# define SPIN_DEP_MAP_INIT(lockname)
58#endif 42#endif
59 43
60#ifdef CONFIG_DEBUG_LOCK_ALLOC 44#ifdef CONFIG_DEBUG_SPINLOCK
61# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } 45# define SPIN_DEBUG_INIT(lockname) \
46 .magic = SPINLOCK_MAGIC, \
47 .owner_cpu = -1, \
48 .owner = SPINLOCK_OWNER_INIT,
62#else 49#else
63# define RW_DEP_MAP_INIT(lockname) 50# define SPIN_DEBUG_INIT(lockname)
64#endif 51#endif
65 52
66#ifdef CONFIG_DEBUG_SPINLOCK 53#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
67# define __SPIN_LOCK_UNLOCKED(lockname) \ 54 { \
68 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 55 .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
69 .magic = SPINLOCK_MAGIC, \ 56 SPIN_DEBUG_INIT(lockname) \
70 .owner = SPINLOCK_OWNER_INIT, \ 57 SPIN_DEP_MAP_INIT(lockname) }
71 .owner_cpu = -1, \ 58
72 SPIN_DEP_MAP_INIT(lockname) } 59#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
73#define __RW_LOCK_UNLOCKED(lockname) \ 60 (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
74 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 61
75 .magic = RWLOCK_MAGIC, \ 62#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
76 .owner = SPINLOCK_OWNER_INIT, \ 63
77 .owner_cpu = -1, \ 64typedef struct spinlock {
78 RW_DEP_MAP_INIT(lockname) } 65 union {
79#else 66 struct raw_spinlock rlock;
80# define __SPIN_LOCK_UNLOCKED(lockname) \ 67
81 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 68#ifdef CONFIG_DEBUG_LOCK_ALLOC
82 SPIN_DEP_MAP_INIT(lockname) } 69# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
83#define __RW_LOCK_UNLOCKED(lockname) \ 70 struct {
84 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 71 u8 __padding[LOCK_PADSIZE];
85 RW_DEP_MAP_INIT(lockname) } 72 struct lockdep_map dep_map;
73 };
86#endif 74#endif
75 };
76} spinlock_t;
77
78#define __SPIN_LOCK_INITIALIZER(lockname) \
79 { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
80
81#define __SPIN_LOCK_UNLOCKED(lockname) \
82 (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
87 83
88/* 84/*
89 * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and 85 * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
90 * are hence deprecated. 86 * deprecated.
91 * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or 87 * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
92 * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. 88 * appropriate.
93 */ 89 */
94#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) 90#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
95#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
96 91
97#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) 92#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
98#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) 93
94#include <linux/rwlock_types.h>
99 95
100#endif /* __LINUX_SPINLOCK_TYPES_H */ 96#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..c09b6407ae1b 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
16 16
17typedef struct { 17typedef struct {
18 volatile unsigned int slock; 18 volatile unsigned int slock;
19} raw_spinlock_t; 19} arch_spinlock_t;
20 20
21#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 21#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
22 22
23#else 23#else
24 24
25typedef struct { } raw_spinlock_t; 25typedef struct { } arch_spinlock_t;
26 26
27#define __RAW_SPIN_LOCK_UNLOCKED { } 27#define __ARCH_SPIN_LOCK_UNLOCKED { }
28 28
29#endif 29#endif
30 30
31typedef struct { 31typedef struct {
32 /* no debug version on UP */ 32 /* no debug version on UP */
33} raw_rwlock_t; 33} arch_rwlock_t;
34 34
35#define __RAW_RW_LOCK_UNLOCKED { } 35#define __ARCH_RW_LOCK_UNLOCKED { }
36 36
37#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ 37#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215b..b14f6a91e19f 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
18 */ 18 */
19 19
20#ifdef CONFIG_DEBUG_SPINLOCK 20#ifdef CONFIG_DEBUG_SPINLOCK
21#define __raw_spin_is_locked(x) ((x)->slock == 0) 21#define arch_spin_is_locked(x) ((x)->slock == 0)
22 22
23static inline void __raw_spin_lock(raw_spinlock_t *lock) 23static inline void arch_spin_lock(arch_spinlock_t *lock)
24{ 24{
25 lock->slock = 0; 25 lock->slock = 0;
26} 26}
27 27
28static inline void 28static inline void
29__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 29arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
30{ 30{
31 local_irq_save(flags); 31 local_irq_save(flags);
32 lock->slock = 0; 32 lock->slock = 0;
33} 33}
34 34
35static inline int __raw_spin_trylock(raw_spinlock_t *lock) 35static inline int arch_spin_trylock(arch_spinlock_t *lock)
36{ 36{
37 char oldval = lock->slock; 37 char oldval = lock->slock;
38 38
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41 return oldval > 0; 41 return oldval > 0;
42} 42}
43 43
44static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44static inline void arch_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 lock->slock = 1; 46 lock->slock = 1;
47} 47}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
49/* 49/*
50 * Read-write spinlocks. No debug version. 50 * Read-write spinlocks. No debug version.
51 */ 51 */
52#define __raw_read_lock(lock) do { (void)(lock); } while (0) 52#define arch_read_lock(lock) do { (void)(lock); } while (0)
53#define __raw_write_lock(lock) do { (void)(lock); } while (0) 53#define arch_write_lock(lock) do { (void)(lock); } while (0)
54#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) 54#define arch_read_trylock(lock) ({ (void)(lock); 1; })
55#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) 55#define arch_write_trylock(lock) ({ (void)(lock); 1; })
56#define __raw_read_unlock(lock) do { (void)(lock); } while (0) 56#define arch_read_unlock(lock) do { (void)(lock); } while (0)
57#define __raw_write_unlock(lock) do { (void)(lock); } while (0) 57#define arch_write_unlock(lock) do { (void)(lock); } while (0)
58 58
59#else /* DEBUG_SPINLOCK */ 59#else /* DEBUG_SPINLOCK */
60#define __raw_spin_is_locked(lock) ((void)(lock), 0) 60#define arch_spin_is_locked(lock) ((void)(lock), 0)
61/* for sched.c and kernel_lock.c: */ 61/* for sched.c and kernel_lock.c: */
62# define __raw_spin_lock(lock) do { (void)(lock); } while (0) 62# define arch_spin_lock(lock) do { (void)(lock); } while (0)
63# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 63# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
64# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) 64# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
65# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) 65# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
66#endif /* DEBUG_SPINLOCK */ 66#endif /* DEBUG_SPINLOCK */
67 67
68#define __raw_spin_is_contended(lock) (((void)(lock), 0)) 68#define arch_spin_is_contended(lock) (((void)(lock), 0))
69 69
70#define __raw_read_can_lock(lock) (((void)(lock), 1)) 70#define arch_read_can_lock(lock) (((void)(lock), 1))
71#define __raw_write_can_lock(lock) (((void)(lock), 1)) 71#define arch_write_can_lock(lock) (((void)(lock), 1))
72 72
73#define __raw_spin_unlock_wait(lock) \ 73#define arch_spin_unlock_wait(lock) \
74 do { cpu_relax(); } while (__raw_spin_is_locked(lock)) 74 do { cpu_relax(); } while (arch_spin_is_locked(lock))
75 75
76#endif /* __LINUX_SPINLOCK_UP_H */ 76#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/kernel/exit.c b/kernel/exit.c
index 6f50ef55a6f3..5962d7ccf243 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code)
933 * an exiting task cleaning up the robust pi futexes. 933 * an exiting task cleaning up the robust pi futexes.
934 */ 934 */
935 smp_mb(); 935 smp_mb();
936 spin_unlock_wait(&tsk->pi_lock); 936 raw_spin_unlock_wait(&tsk->pi_lock);
937 937
938 if (unlikely(in_atomic())) 938 if (unlikely(in_atomic()))
939 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 939 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
diff --git a/kernel/fork.c b/kernel/fork.c
index 1415dc4598ae..9bd91447e052 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
939 939
940static void rt_mutex_init_task(struct task_struct *p) 940static void rt_mutex_init_task(struct task_struct *p)
941{ 941{
942 spin_lock_init(&p->pi_lock); 942 raw_spin_lock_init(&p->pi_lock);
943#ifdef CONFIG_RT_MUTEXES 943#ifdef CONFIG_RT_MUTEXES
944 plist_head_init(&p->pi_waiters, &p->pi_lock); 944 plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
945 p->pi_blocked_on = NULL; 945 p->pi_blocked_on = NULL;
946#endif 946#endif
947} 947}
diff --git a/kernel/futex.c b/kernel/futex.c
index d73ef1f3e55d..8e3c3ffe1b9a 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -403,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state)
403 * and has cleaned up the pi_state already 403 * and has cleaned up the pi_state already
404 */ 404 */
405 if (pi_state->owner) { 405 if (pi_state->owner) {
406 spin_lock_irq(&pi_state->owner->pi_lock); 406 raw_spin_lock_irq(&pi_state->owner->pi_lock);
407 list_del_init(&pi_state->list); 407 list_del_init(&pi_state->list);
408 spin_unlock_irq(&pi_state->owner->pi_lock); 408 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
409 409
410 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); 410 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
411 } 411 }
@@ -470,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr)
470 * pi_state_list anymore, but we have to be careful 470 * pi_state_list anymore, but we have to be careful
471 * versus waiters unqueueing themselves: 471 * versus waiters unqueueing themselves:
472 */ 472 */
473 spin_lock_irq(&curr->pi_lock); 473 raw_spin_lock_irq(&curr->pi_lock);
474 while (!list_empty(head)) { 474 while (!list_empty(head)) {
475 475
476 next = head->next; 476 next = head->next;
477 pi_state = list_entry(next, struct futex_pi_state, list); 477 pi_state = list_entry(next, struct futex_pi_state, list);
478 key = pi_state->key; 478 key = pi_state->key;
479 hb = hash_futex(&key); 479 hb = hash_futex(&key);
480 spin_unlock_irq(&curr->pi_lock); 480 raw_spin_unlock_irq(&curr->pi_lock);
481 481
482 spin_lock(&hb->lock); 482 spin_lock(&hb->lock);
483 483
484 spin_lock_irq(&curr->pi_lock); 484 raw_spin_lock_irq(&curr->pi_lock);
485 /* 485 /*
486 * We dropped the pi-lock, so re-check whether this 486 * We dropped the pi-lock, so re-check whether this
487 * task still owns the PI-state: 487 * task still owns the PI-state:
@@ -495,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr)
495 WARN_ON(list_empty(&pi_state->list)); 495 WARN_ON(list_empty(&pi_state->list));
496 list_del_init(&pi_state->list); 496 list_del_init(&pi_state->list);
497 pi_state->owner = NULL; 497 pi_state->owner = NULL;
498 spin_unlock_irq(&curr->pi_lock); 498 raw_spin_unlock_irq(&curr->pi_lock);
499 499
500 rt_mutex_unlock(&pi_state->pi_mutex); 500 rt_mutex_unlock(&pi_state->pi_mutex);
501 501
502 spin_unlock(&hb->lock); 502 spin_unlock(&hb->lock);
503 503
504 spin_lock_irq(&curr->pi_lock); 504 raw_spin_lock_irq(&curr->pi_lock);
505 } 505 }
506 spin_unlock_irq(&curr->pi_lock); 506 raw_spin_unlock_irq(&curr->pi_lock);
507} 507}
508 508
509static int 509static int
@@ -558,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
558 * change of the task flags, we do this protected by 558 * change of the task flags, we do this protected by
559 * p->pi_lock: 559 * p->pi_lock:
560 */ 560 */
561 spin_lock_irq(&p->pi_lock); 561 raw_spin_lock_irq(&p->pi_lock);
562 if (unlikely(p->flags & PF_EXITING)) { 562 if (unlikely(p->flags & PF_EXITING)) {
563 /* 563 /*
564 * The task is on the way out. When PF_EXITPIDONE is 564 * The task is on the way out. When PF_EXITPIDONE is
@@ -567,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
567 */ 567 */
568 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; 568 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
569 569
570 spin_unlock_irq(&p->pi_lock); 570 raw_spin_unlock_irq(&p->pi_lock);
571 put_task_struct(p); 571 put_task_struct(p);
572 return ret; 572 return ret;
573 } 573 }
@@ -586,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
586 WARN_ON(!list_empty(&pi_state->list)); 586 WARN_ON(!list_empty(&pi_state->list));
587 list_add(&pi_state->list, &p->pi_state_list); 587 list_add(&pi_state->list, &p->pi_state_list);
588 pi_state->owner = p; 588 pi_state->owner = p;
589 spin_unlock_irq(&p->pi_lock); 589 raw_spin_unlock_irq(&p->pi_lock);
590 590
591 put_task_struct(p); 591 put_task_struct(p);
592 592
@@ -760,7 +760,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
760 if (!pi_state) 760 if (!pi_state)
761 return -EINVAL; 761 return -EINVAL;
762 762
763 spin_lock(&pi_state->pi_mutex.wait_lock); 763 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
764 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); 764 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
765 765
766 /* 766 /*
@@ -789,23 +789,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
789 else if (curval != uval) 789 else if (curval != uval)
790 ret = -EINVAL; 790 ret = -EINVAL;
791 if (ret) { 791 if (ret) {
792 spin_unlock(&pi_state->pi_mutex.wait_lock); 792 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
793 return ret; 793 return ret;
794 } 794 }
795 } 795 }
796 796
797 spin_lock_irq(&pi_state->owner->pi_lock); 797 raw_spin_lock_irq(&pi_state->owner->pi_lock);
798 WARN_ON(list_empty(&pi_state->list)); 798 WARN_ON(list_empty(&pi_state->list));
799 list_del_init(&pi_state->list); 799 list_del_init(&pi_state->list);
800 spin_unlock_irq(&pi_state->owner->pi_lock); 800 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
801 801
802 spin_lock_irq(&new_owner->pi_lock); 802 raw_spin_lock_irq(&new_owner->pi_lock);
803 WARN_ON(!list_empty(&pi_state->list)); 803 WARN_ON(!list_empty(&pi_state->list));
804 list_add(&pi_state->list, &new_owner->pi_state_list); 804 list_add(&pi_state->list, &new_owner->pi_state_list);
805 pi_state->owner = new_owner; 805 pi_state->owner = new_owner;
806 spin_unlock_irq(&new_owner->pi_lock); 806 raw_spin_unlock_irq(&new_owner->pi_lock);
807 807
808 spin_unlock(&pi_state->pi_mutex.wait_lock); 808 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
809 rt_mutex_unlock(&pi_state->pi_mutex); 809 rt_mutex_unlock(&pi_state->pi_mutex);
810 810
811 return 0; 811 return 0;
@@ -1010,7 +1010,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1010 plist_add(&q->list, &hb2->chain); 1010 plist_add(&q->list, &hb2->chain);
1011 q->lock_ptr = &hb2->lock; 1011 q->lock_ptr = &hb2->lock;
1012#ifdef CONFIG_DEBUG_PI_LIST 1012#ifdef CONFIG_DEBUG_PI_LIST
1013 q->list.plist.lock = &hb2->lock; 1013 q->list.plist.spinlock = &hb2->lock;
1014#endif 1014#endif
1015 } 1015 }
1016 get_futex_key_refs(key2); 1016 get_futex_key_refs(key2);
@@ -1046,7 +1046,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1046 1046
1047 q->lock_ptr = &hb->lock; 1047 q->lock_ptr = &hb->lock;
1048#ifdef CONFIG_DEBUG_PI_LIST 1048#ifdef CONFIG_DEBUG_PI_LIST
1049 q->list.plist.lock = &hb->lock; 1049 q->list.plist.spinlock = &hb->lock;
1050#endif 1050#endif
1051 1051
1052 wake_up_state(q->task, TASK_NORMAL); 1052 wake_up_state(q->task, TASK_NORMAL);
@@ -1394,7 +1394,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1394 1394
1395 plist_node_init(&q->list, prio); 1395 plist_node_init(&q->list, prio);
1396#ifdef CONFIG_DEBUG_PI_LIST 1396#ifdef CONFIG_DEBUG_PI_LIST
1397 q->list.plist.lock = &hb->lock; 1397 q->list.plist.spinlock = &hb->lock;
1398#endif 1398#endif
1399 plist_add(&q->list, &hb->chain); 1399 plist_add(&q->list, &hb->chain);
1400 q->task = current; 1400 q->task = current;
@@ -1529,18 +1529,18 @@ retry:
1529 * itself. 1529 * itself.
1530 */ 1530 */
1531 if (pi_state->owner != NULL) { 1531 if (pi_state->owner != NULL) {
1532 spin_lock_irq(&pi_state->owner->pi_lock); 1532 raw_spin_lock_irq(&pi_state->owner->pi_lock);
1533 WARN_ON(list_empty(&pi_state->list)); 1533 WARN_ON(list_empty(&pi_state->list));
1534 list_del_init(&pi_state->list); 1534 list_del_init(&pi_state->list);
1535 spin_unlock_irq(&pi_state->owner->pi_lock); 1535 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1536 } 1536 }
1537 1537
1538 pi_state->owner = newowner; 1538 pi_state->owner = newowner;
1539 1539
1540 spin_lock_irq(&newowner->pi_lock); 1540 raw_spin_lock_irq(&newowner->pi_lock);
1541 WARN_ON(!list_empty(&pi_state->list)); 1541 WARN_ON(!list_empty(&pi_state->list));
1542 list_add(&pi_state->list, &newowner->pi_state_list); 1542 list_add(&pi_state->list, &newowner->pi_state_list);
1543 spin_unlock_irq(&newowner->pi_lock); 1543 raw_spin_unlock_irq(&newowner->pi_lock);
1544 return 0; 1544 return 0;
1545 1545
1546 /* 1546 /*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d2f9239dc6ba..0086628b6e97 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
127 for (;;) { 127 for (;;) {
128 base = timer->base; 128 base = timer->base;
129 if (likely(base != NULL)) { 129 if (likely(base != NULL)) {
130 spin_lock_irqsave(&base->cpu_base->lock, *flags); 130 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
131 if (likely(base == timer->base)) 131 if (likely(base == timer->base))
132 return base; 132 return base;
133 /* The timer has migrated to another CPU: */ 133 /* The timer has migrated to another CPU: */
134 spin_unlock_irqrestore(&base->cpu_base->lock, *flags); 134 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
135 } 135 }
136 cpu_relax(); 136 cpu_relax();
137 } 137 }
@@ -208,13 +208,13 @@ again:
208 208
209 /* See the comment in lock_timer_base() */ 209 /* See the comment in lock_timer_base() */
210 timer->base = NULL; 210 timer->base = NULL;
211 spin_unlock(&base->cpu_base->lock); 211 raw_spin_unlock(&base->cpu_base->lock);
212 spin_lock(&new_base->cpu_base->lock); 212 raw_spin_lock(&new_base->cpu_base->lock);
213 213
214 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { 214 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
215 cpu = this_cpu; 215 cpu = this_cpu;
216 spin_unlock(&new_base->cpu_base->lock); 216 raw_spin_unlock(&new_base->cpu_base->lock);
217 spin_lock(&base->cpu_base->lock); 217 raw_spin_lock(&base->cpu_base->lock);
218 timer->base = base; 218 timer->base = base;
219 goto again; 219 goto again;
220 } 220 }
@@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
230{ 230{
231 struct hrtimer_clock_base *base = timer->base; 231 struct hrtimer_clock_base *base = timer->base;
232 232
233 spin_lock_irqsave(&base->cpu_base->lock, *flags); 233 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
234 234
235 return base; 235 return base;
236} 236}
@@ -628,12 +628,12 @@ static void retrigger_next_event(void *arg)
628 base = &__get_cpu_var(hrtimer_bases); 628 base = &__get_cpu_var(hrtimer_bases);
629 629
630 /* Adjust CLOCK_REALTIME offset */ 630 /* Adjust CLOCK_REALTIME offset */
631 spin_lock(&base->lock); 631 raw_spin_lock(&base->lock);
632 base->clock_base[CLOCK_REALTIME].offset = 632 base->clock_base[CLOCK_REALTIME].offset =
633 timespec_to_ktime(realtime_offset); 633 timespec_to_ktime(realtime_offset);
634 634
635 hrtimer_force_reprogram(base, 0); 635 hrtimer_force_reprogram(base, 0);
636 spin_unlock(&base->lock); 636 raw_spin_unlock(&base->lock);
637} 637}
638 638
639/* 639/*
@@ -694,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
694{ 694{
695 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 695 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
696 if (wakeup) { 696 if (wakeup) {
697 spin_unlock(&base->cpu_base->lock); 697 raw_spin_unlock(&base->cpu_base->lock);
698 raise_softirq_irqoff(HRTIMER_SOFTIRQ); 698 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
699 spin_lock(&base->cpu_base->lock); 699 raw_spin_lock(&base->cpu_base->lock);
700 } else 700 } else
701 __raise_softirq_irqoff(HRTIMER_SOFTIRQ); 701 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
702 702
@@ -790,7 +790,7 @@ static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
790static inline 790static inline
791void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 791void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
792{ 792{
793 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); 793 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
794} 794}
795 795
796/** 796/**
@@ -1123,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void)
1123 unsigned long flags; 1123 unsigned long flags;
1124 int i; 1124 int i;
1125 1125
1126 spin_lock_irqsave(&cpu_base->lock, flags); 1126 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1127 1127
1128 if (!hrtimer_hres_active()) { 1128 if (!hrtimer_hres_active()) {
1129 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 1129 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
@@ -1140,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void)
1140 } 1140 }
1141 } 1141 }
1142 1142
1143 spin_unlock_irqrestore(&cpu_base->lock, flags); 1143 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1144 1144
1145 if (mindelta.tv64 < 0) 1145 if (mindelta.tv64 < 0)
1146 mindelta.tv64 = 0; 1146 mindelta.tv64 = 0;
@@ -1222,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1222 * they get migrated to another cpu, therefore its safe to unlock 1222 * they get migrated to another cpu, therefore its safe to unlock
1223 * the timer base. 1223 * the timer base.
1224 */ 1224 */
1225 spin_unlock(&cpu_base->lock); 1225 raw_spin_unlock(&cpu_base->lock);
1226 trace_hrtimer_expire_entry(timer, now); 1226 trace_hrtimer_expire_entry(timer, now);
1227 restart = fn(timer); 1227 restart = fn(timer);
1228 trace_hrtimer_expire_exit(timer); 1228 trace_hrtimer_expire_exit(timer);
1229 spin_lock(&cpu_base->lock); 1229 raw_spin_lock(&cpu_base->lock);
1230 1230
1231 /* 1231 /*
1232 * Note: We clear the CALLBACK bit after enqueue_hrtimer and 1232 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
@@ -1261,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1261retry: 1261retry:
1262 expires_next.tv64 = KTIME_MAX; 1262 expires_next.tv64 = KTIME_MAX;
1263 1263
1264 spin_lock(&cpu_base->lock); 1264 raw_spin_lock(&cpu_base->lock);
1265 /* 1265 /*
1266 * We set expires_next to KTIME_MAX here with cpu_base->lock 1266 * We set expires_next to KTIME_MAX here with cpu_base->lock
1267 * held to prevent that a timer is enqueued in our queue via 1267 * held to prevent that a timer is enqueued in our queue via
@@ -1317,7 +1317,7 @@ retry:
1317 * against it. 1317 * against it.
1318 */ 1318 */
1319 cpu_base->expires_next = expires_next; 1319 cpu_base->expires_next = expires_next;
1320 spin_unlock(&cpu_base->lock); 1320 raw_spin_unlock(&cpu_base->lock);
1321 1321
1322 /* Reprogramming necessary ? */ 1322 /* Reprogramming necessary ? */
1323 if (expires_next.tv64 == KTIME_MAX || 1323 if (expires_next.tv64 == KTIME_MAX ||
@@ -1457,7 +1457,7 @@ void hrtimer_run_queues(void)
1457 gettime = 0; 1457 gettime = 0;
1458 } 1458 }
1459 1459
1460 spin_lock(&cpu_base->lock); 1460 raw_spin_lock(&cpu_base->lock);
1461 1461
1462 while ((node = base->first)) { 1462 while ((node = base->first)) {
1463 struct hrtimer *timer; 1463 struct hrtimer *timer;
@@ -1469,7 +1469,7 @@ void hrtimer_run_queues(void)
1469 1469
1470 __run_hrtimer(timer, &base->softirq_time); 1470 __run_hrtimer(timer, &base->softirq_time);
1471 } 1471 }
1472 spin_unlock(&cpu_base->lock); 1472 raw_spin_unlock(&cpu_base->lock);
1473 } 1473 }
1474} 1474}
1475 1475
@@ -1625,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1625 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1625 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1626 int i; 1626 int i;
1627 1627
1628 spin_lock_init(&cpu_base->lock); 1628 raw_spin_lock_init(&cpu_base->lock);
1629 1629
1630 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1630 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1631 cpu_base->clock_base[i].cpu_base = cpu_base; 1631 cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1683,16 +1683,16 @@ static void migrate_hrtimers(int scpu)
1683 * The caller is globally serialized and nobody else 1683 * The caller is globally serialized and nobody else
1684 * takes two locks at once, deadlock is not possible. 1684 * takes two locks at once, deadlock is not possible.
1685 */ 1685 */
1686 spin_lock(&new_base->lock); 1686 raw_spin_lock(&new_base->lock);
1687 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1687 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1688 1688
1689 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1689 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1690 migrate_hrtimer_list(&old_base->clock_base[i], 1690 migrate_hrtimer_list(&old_base->clock_base[i],
1691 &new_base->clock_base[i]); 1691 &new_base->clock_base[i]);
1692 } 1692 }
1693 1693
1694 spin_unlock(&old_base->lock); 1694 raw_spin_unlock(&old_base->lock);
1695 spin_unlock(&new_base->lock); 1695 raw_spin_unlock(&new_base->lock);
1696 1696
1697 /* Check, if we got expired work to do */ 1697 /* Check, if we got expired work to do */
1698 __hrtimer_peek_ahead_timers(); 1698 __hrtimer_peek_ahead_timers();
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 366eedf949c0..dbcbf6a33a08 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -96,7 +96,7 @@ static int task_bp_pinned(struct task_struct *tsk)
96 96
97 list = &ctx->event_list; 97 list = &ctx->event_list;
98 98
99 spin_lock_irqsave(&ctx->lock, flags); 99 raw_spin_lock_irqsave(&ctx->lock, flags);
100 100
101 /* 101 /*
102 * The current breakpoint counter is not included in the list 102 * The current breakpoint counter is not included in the list
@@ -107,7 +107,7 @@ static int task_bp_pinned(struct task_struct *tsk)
107 count++; 107 count++;
108 } 108 }
109 109
110 spin_unlock_irqrestore(&ctx->lock, flags); 110 raw_spin_unlock_irqrestore(&ctx->lock, flags);
111 111
112 return count; 112 return count;
113} 113}
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 1de9700f416e..2295a31ef110 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
45 * flush such a longstanding irq before considering it as spurious. 45 * flush such a longstanding irq before considering it as spurious.
46 */ 46 */
47 for_each_irq_desc_reverse(i, desc) { 47 for_each_irq_desc_reverse(i, desc) {
48 spin_lock_irq(&desc->lock); 48 raw_spin_lock_irq(&desc->lock);
49 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 49 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
50 /* 50 /*
51 * An old-style architecture might still have 51 * An old-style architecture might still have
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
61 desc->chip->set_type(i, IRQ_TYPE_PROBE); 61 desc->chip->set_type(i, IRQ_TYPE_PROBE);
62 desc->chip->startup(i); 62 desc->chip->startup(i);
63 } 63 }
64 spin_unlock_irq(&desc->lock); 64 raw_spin_unlock_irq(&desc->lock);
65 } 65 }
66 66
67 /* Wait for longstanding interrupts to trigger. */ 67 /* Wait for longstanding interrupts to trigger. */
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
73 * happened in the previous stage, it may have masked itself) 73 * happened in the previous stage, it may have masked itself)
74 */ 74 */
75 for_each_irq_desc_reverse(i, desc) { 75 for_each_irq_desc_reverse(i, desc) {
76 spin_lock_irq(&desc->lock); 76 raw_spin_lock_irq(&desc->lock);
77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
79 if (desc->chip->startup(i)) 79 if (desc->chip->startup(i))
80 desc->status |= IRQ_PENDING; 80 desc->status |= IRQ_PENDING;
81 } 81 }
82 spin_unlock_irq(&desc->lock); 82 raw_spin_unlock_irq(&desc->lock);
83 } 83 }
84 84
85 /* 85 /*
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
91 * Now filter out any obviously spurious interrupts 91 * Now filter out any obviously spurious interrupts
92 */ 92 */
93 for_each_irq_desc(i, desc) { 93 for_each_irq_desc(i, desc) {
94 spin_lock_irq(&desc->lock); 94 raw_spin_lock_irq(&desc->lock);
95 status = desc->status; 95 status = desc->status;
96 96
97 if (status & IRQ_AUTODETECT) { 97 if (status & IRQ_AUTODETECT) {
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
103 if (i < 32) 103 if (i < 32)
104 mask |= 1 << i; 104 mask |= 1 << i;
105 } 105 }
106 spin_unlock_irq(&desc->lock); 106 raw_spin_unlock_irq(&desc->lock);
107 } 107 }
108 108
109 return mask; 109 return mask;
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val)
129 int i; 129 int i;
130 130
131 for_each_irq_desc(i, desc) { 131 for_each_irq_desc(i, desc) {
132 spin_lock_irq(&desc->lock); 132 raw_spin_lock_irq(&desc->lock);
133 status = desc->status; 133 status = desc->status;
134 134
135 if (status & IRQ_AUTODETECT) { 135 if (status & IRQ_AUTODETECT) {
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val)
139 desc->status = status & ~IRQ_AUTODETECT; 139 desc->status = status & ~IRQ_AUTODETECT;
140 desc->chip->shutdown(i); 140 desc->chip->shutdown(i);
141 } 141 }
142 spin_unlock_irq(&desc->lock); 142 raw_spin_unlock_irq(&desc->lock);
143 } 143 }
144 mutex_unlock(&probing_active); 144 mutex_unlock(&probing_active);
145 145
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
171 unsigned int status; 171 unsigned int status;
172 172
173 for_each_irq_desc(i, desc) { 173 for_each_irq_desc(i, desc) {
174 spin_lock_irq(&desc->lock); 174 raw_spin_lock_irq(&desc->lock);
175 status = desc->status; 175 status = desc->status;
176 176
177 if (status & IRQ_AUTODETECT) { 177 if (status & IRQ_AUTODETECT) {
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
183 desc->status = status & ~IRQ_AUTODETECT; 183 desc->status = status & ~IRQ_AUTODETECT;
184 desc->chip->shutdown(i); 184 desc->chip->shutdown(i);
185 } 185 }
186 spin_unlock_irq(&desc->lock); 186 raw_spin_unlock_irq(&desc->lock);
187 } 187 }
188 mutex_unlock(&probing_active); 188 mutex_unlock(&probing_active);
189 189
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ba566c261adc..ecc3fa28f666 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
34 } 34 }
35 35
36 /* Ensure we don't have left over values from a previous use of this irq */ 36 /* Ensure we don't have left over values from a previous use of this irq */
37 spin_lock_irqsave(&desc->lock, flags); 37 raw_spin_lock_irqsave(&desc->lock, flags);
38 desc->status = IRQ_DISABLED; 38 desc->status = IRQ_DISABLED;
39 desc->chip = &no_irq_chip; 39 desc->chip = &no_irq_chip;
40 desc->handle_irq = handle_bad_irq; 40 desc->handle_irq = handle_bad_irq;
@@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
51 cpumask_clear(desc->pending_mask); 51 cpumask_clear(desc->pending_mask);
52#endif 52#endif
53#endif 53#endif
54 spin_unlock_irqrestore(&desc->lock, flags); 54 raw_spin_unlock_irqrestore(&desc->lock, flags);
55} 55}
56 56
57/** 57/**
@@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq)
68 return; 68 return;
69 } 69 }
70 70
71 spin_lock_irqsave(&desc->lock, flags); 71 raw_spin_lock_irqsave(&desc->lock, flags);
72 if (desc->action) { 72 if (desc->action) {
73 spin_unlock_irqrestore(&desc->lock, flags); 73 raw_spin_unlock_irqrestore(&desc->lock, flags);
74 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", 74 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
75 irq); 75 irq);
76 return; 76 return;
@@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq)
82 desc->chip = &no_irq_chip; 82 desc->chip = &no_irq_chip;
83 desc->name = NULL; 83 desc->name = NULL;
84 clear_kstat_irqs(desc); 84 clear_kstat_irqs(desc);
85 spin_unlock_irqrestore(&desc->lock, flags); 85 raw_spin_unlock_irqrestore(&desc->lock, flags);
86} 86}
87 87
88 88
@@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
104 if (!chip) 104 if (!chip)
105 chip = &no_irq_chip; 105 chip = &no_irq_chip;
106 106
107 spin_lock_irqsave(&desc->lock, flags); 107 raw_spin_lock_irqsave(&desc->lock, flags);
108 irq_chip_set_defaults(chip); 108 irq_chip_set_defaults(chip);
109 desc->chip = chip; 109 desc->chip = chip;
110 spin_unlock_irqrestore(&desc->lock, flags); 110 raw_spin_unlock_irqrestore(&desc->lock, flags);
111 111
112 return 0; 112 return 0;
113} 113}
@@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type)
133 if (type == IRQ_TYPE_NONE) 133 if (type == IRQ_TYPE_NONE)
134 return 0; 134 return 0;
135 135
136 spin_lock_irqsave(&desc->lock, flags); 136 raw_spin_lock_irqsave(&desc->lock, flags);
137 ret = __irq_set_trigger(desc, irq, type); 137 ret = __irq_set_trigger(desc, irq, type);
138 spin_unlock_irqrestore(&desc->lock, flags); 138 raw_spin_unlock_irqrestore(&desc->lock, flags);
139 return ret; 139 return ret;
140} 140}
141EXPORT_SYMBOL(set_irq_type); 141EXPORT_SYMBOL(set_irq_type);
@@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data)
158 return -EINVAL; 158 return -EINVAL;
159 } 159 }
160 160
161 spin_lock_irqsave(&desc->lock, flags); 161 raw_spin_lock_irqsave(&desc->lock, flags);
162 desc->handler_data = data; 162 desc->handler_data = data;
163 spin_unlock_irqrestore(&desc->lock, flags); 163 raw_spin_unlock_irqrestore(&desc->lock, flags);
164 return 0; 164 return 0;
165} 165}
166EXPORT_SYMBOL(set_irq_data); 166EXPORT_SYMBOL(set_irq_data);
@@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
185 185
186 spin_lock_irqsave(&desc->lock, flags); 186 raw_spin_lock_irqsave(&desc->lock, flags);
187 desc->msi_desc = entry; 187 desc->msi_desc = entry;
188 if (entry) 188 if (entry)
189 entry->irq = irq; 189 entry->irq = irq;
190 spin_unlock_irqrestore(&desc->lock, flags); 190 raw_spin_unlock_irqrestore(&desc->lock, flags);
191 return 0; 191 return 0;
192} 192}
193 193
@@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data)
214 return -EINVAL; 214 return -EINVAL;
215 } 215 }
216 216
217 spin_lock_irqsave(&desc->lock, flags); 217 raw_spin_lock_irqsave(&desc->lock, flags);
218 desc->chip_data = data; 218 desc->chip_data = data;
219 spin_unlock_irqrestore(&desc->lock, flags); 219 raw_spin_unlock_irqrestore(&desc->lock, flags);
220 220
221 return 0; 221 return 0;
222} 222}
@@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int irq, int nest)
241 if (!desc) 241 if (!desc)
242 return; 242 return;
243 243
244 spin_lock_irqsave(&desc->lock, flags); 244 raw_spin_lock_irqsave(&desc->lock, flags);
245 if (nest) 245 if (nest)
246 desc->status |= IRQ_NESTED_THREAD; 246 desc->status |= IRQ_NESTED_THREAD;
247 else 247 else
248 desc->status &= ~IRQ_NESTED_THREAD; 248 desc->status &= ~IRQ_NESTED_THREAD;
249 spin_unlock_irqrestore(&desc->lock, flags); 249 raw_spin_unlock_irqrestore(&desc->lock, flags);
250} 250}
251EXPORT_SYMBOL_GPL(set_irq_nested_thread); 251EXPORT_SYMBOL_GPL(set_irq_nested_thread);
252 252
@@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq)
343 343
344 might_sleep(); 344 might_sleep();
345 345
346 spin_lock_irq(&desc->lock); 346 raw_spin_lock_irq(&desc->lock);
347 347
348 kstat_incr_irqs_this_cpu(irq, desc); 348 kstat_incr_irqs_this_cpu(irq, desc);
349 349
@@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq)
352 goto out_unlock; 352 goto out_unlock;
353 353
354 desc->status |= IRQ_INPROGRESS; 354 desc->status |= IRQ_INPROGRESS;
355 spin_unlock_irq(&desc->lock); 355 raw_spin_unlock_irq(&desc->lock);
356 356
357 action_ret = action->thread_fn(action->irq, action->dev_id); 357 action_ret = action->thread_fn(action->irq, action->dev_id);
358 if (!noirqdebug) 358 if (!noirqdebug)
359 note_interrupt(irq, desc, action_ret); 359 note_interrupt(irq, desc, action_ret);
360 360
361 spin_lock_irq(&desc->lock); 361 raw_spin_lock_irq(&desc->lock);
362 desc->status &= ~IRQ_INPROGRESS; 362 desc->status &= ~IRQ_INPROGRESS;
363 363
364out_unlock: 364out_unlock:
365 spin_unlock_irq(&desc->lock); 365 raw_spin_unlock_irq(&desc->lock);
366} 366}
367EXPORT_SYMBOL_GPL(handle_nested_irq); 367EXPORT_SYMBOL_GPL(handle_nested_irq);
368 368
@@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
384 struct irqaction *action; 384 struct irqaction *action;
385 irqreturn_t action_ret; 385 irqreturn_t action_ret;
386 386
387 spin_lock(&desc->lock); 387 raw_spin_lock(&desc->lock);
388 388
389 if (unlikely(desc->status & IRQ_INPROGRESS)) 389 if (unlikely(desc->status & IRQ_INPROGRESS))
390 goto out_unlock; 390 goto out_unlock;
@@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
396 goto out_unlock; 396 goto out_unlock;
397 397
398 desc->status |= IRQ_INPROGRESS; 398 desc->status |= IRQ_INPROGRESS;
399 spin_unlock(&desc->lock); 399 raw_spin_unlock(&desc->lock);
400 400
401 action_ret = handle_IRQ_event(irq, action); 401 action_ret = handle_IRQ_event(irq, action);
402 if (!noirqdebug) 402 if (!noirqdebug)
403 note_interrupt(irq, desc, action_ret); 403 note_interrupt(irq, desc, action_ret);
404 404
405 spin_lock(&desc->lock); 405 raw_spin_lock(&desc->lock);
406 desc->status &= ~IRQ_INPROGRESS; 406 desc->status &= ~IRQ_INPROGRESS;
407out_unlock: 407out_unlock:
408 spin_unlock(&desc->lock); 408 raw_spin_unlock(&desc->lock);
409} 409}
410 410
411/** 411/**
@@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
424 struct irqaction *action; 424 struct irqaction *action;
425 irqreturn_t action_ret; 425 irqreturn_t action_ret;
426 426
427 spin_lock(&desc->lock); 427 raw_spin_lock(&desc->lock);
428 mask_ack_irq(desc, irq); 428 mask_ack_irq(desc, irq);
429 429
430 if (unlikely(desc->status & IRQ_INPROGRESS)) 430 if (unlikely(desc->status & IRQ_INPROGRESS))
@@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
441 goto out_unlock; 441 goto out_unlock;
442 442
443 desc->status |= IRQ_INPROGRESS; 443 desc->status |= IRQ_INPROGRESS;
444 spin_unlock(&desc->lock); 444 raw_spin_unlock(&desc->lock);
445 445
446 action_ret = handle_IRQ_event(irq, action); 446 action_ret = handle_IRQ_event(irq, action);
447 if (!noirqdebug) 447 if (!noirqdebug)
448 note_interrupt(irq, desc, action_ret); 448 note_interrupt(irq, desc, action_ret);
449 449
450 spin_lock(&desc->lock); 450 raw_spin_lock(&desc->lock);
451 desc->status &= ~IRQ_INPROGRESS; 451 desc->status &= ~IRQ_INPROGRESS;
452 452
453 if (unlikely(desc->status & IRQ_ONESHOT)) 453 if (unlikely(desc->status & IRQ_ONESHOT))
@@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
455 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 455 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
456 desc->chip->unmask(irq); 456 desc->chip->unmask(irq);
457out_unlock: 457out_unlock:
458 spin_unlock(&desc->lock); 458 raw_spin_unlock(&desc->lock);
459} 459}
460EXPORT_SYMBOL_GPL(handle_level_irq); 460EXPORT_SYMBOL_GPL(handle_level_irq);
461 461
@@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
475 struct irqaction *action; 475 struct irqaction *action;
476 irqreturn_t action_ret; 476 irqreturn_t action_ret;
477 477
478 spin_lock(&desc->lock); 478 raw_spin_lock(&desc->lock);
479 479
480 if (unlikely(desc->status & IRQ_INPROGRESS)) 480 if (unlikely(desc->status & IRQ_INPROGRESS))
481 goto out; 481 goto out;
@@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
497 497
498 desc->status |= IRQ_INPROGRESS; 498 desc->status |= IRQ_INPROGRESS;
499 desc->status &= ~IRQ_PENDING; 499 desc->status &= ~IRQ_PENDING;
500 spin_unlock(&desc->lock); 500 raw_spin_unlock(&desc->lock);
501 501
502 action_ret = handle_IRQ_event(irq, action); 502 action_ret = handle_IRQ_event(irq, action);
503 if (!noirqdebug) 503 if (!noirqdebug)
504 note_interrupt(irq, desc, action_ret); 504 note_interrupt(irq, desc, action_ret);
505 505
506 spin_lock(&desc->lock); 506 raw_spin_lock(&desc->lock);
507 desc->status &= ~IRQ_INPROGRESS; 507 desc->status &= ~IRQ_INPROGRESS;
508out: 508out:
509 desc->chip->eoi(irq); 509 desc->chip->eoi(irq);
510 510
511 spin_unlock(&desc->lock); 511 raw_spin_unlock(&desc->lock);
512} 512}
513 513
514/** 514/**
@@ -530,7 +530,7 @@ out:
530void 530void
531handle_edge_irq(unsigned int irq, struct irq_desc *desc) 531handle_edge_irq(unsigned int irq, struct irq_desc *desc)
532{ 532{
533 spin_lock(&desc->lock); 533 raw_spin_lock(&desc->lock);
534 534
535 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 535 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
536 536
@@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
576 } 576 }
577 577
578 desc->status &= ~IRQ_PENDING; 578 desc->status &= ~IRQ_PENDING;
579 spin_unlock(&desc->lock); 579 raw_spin_unlock(&desc->lock);
580 action_ret = handle_IRQ_event(irq, action); 580 action_ret = handle_IRQ_event(irq, action);
581 if (!noirqdebug) 581 if (!noirqdebug)
582 note_interrupt(irq, desc, action_ret); 582 note_interrupt(irq, desc, action_ret);
583 spin_lock(&desc->lock); 583 raw_spin_lock(&desc->lock);
584 584
585 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 585 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
586 586
587 desc->status &= ~IRQ_INPROGRESS; 587 desc->status &= ~IRQ_INPROGRESS;
588out_unlock: 588out_unlock:
589 spin_unlock(&desc->lock); 589 raw_spin_unlock(&desc->lock);
590} 590}
591 591
592/** 592/**
@@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
643 } 643 }
644 644
645 chip_bus_lock(irq, desc); 645 chip_bus_lock(irq, desc);
646 spin_lock_irqsave(&desc->lock, flags); 646 raw_spin_lock_irqsave(&desc->lock, flags);
647 647
648 /* Uninstall? */ 648 /* Uninstall? */
649 if (handle == handle_bad_irq) { 649 if (handle == handle_bad_irq) {
@@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
661 desc->depth = 0; 661 desc->depth = 0;
662 desc->chip->startup(irq); 662 desc->chip->startup(irq);
663 } 663 }
664 spin_unlock_irqrestore(&desc->lock, flags); 664 raw_spin_unlock_irqrestore(&desc->lock, flags);
665 chip_bus_sync_unlock(irq, desc); 665 chip_bus_sync_unlock(irq, desc);
666} 666}
667EXPORT_SYMBOL_GPL(__set_irq_handler); 667EXPORT_SYMBOL_GPL(__set_irq_handler);
@@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int irq)
692 return; 692 return;
693 } 693 }
694 694
695 spin_lock_irqsave(&desc->lock, flags); 695 raw_spin_lock_irqsave(&desc->lock, flags);
696 desc->status |= IRQ_NOPROBE; 696 desc->status |= IRQ_NOPROBE;
697 spin_unlock_irqrestore(&desc->lock, flags); 697 raw_spin_unlock_irqrestore(&desc->lock, flags);
698} 698}
699 699
700void __init set_irq_probe(unsigned int irq) 700void __init set_irq_probe(unsigned int irq)
@@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int irq)
707 return; 707 return;
708 } 708 }
709 709
710 spin_lock_irqsave(&desc->lock, flags); 710 raw_spin_lock_irqsave(&desc->lock, flags);
711 desc->status &= ~IRQ_NOPROBE; 711 desc->status &= ~IRQ_NOPROBE;
712 spin_unlock_irqrestore(&desc->lock, flags); 712 raw_spin_unlock_irqrestore(&desc->lock, flags);
713} 713}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 17c71bb565c6..814940e7f485 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = {
80 .chip = &no_irq_chip, 80 .chip = &no_irq_chip,
81 .handle_irq = handle_bad_irq, 81 .handle_irq = handle_bad_irq,
82 .depth = 1, 82 .depth = 1,
83 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 83 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
84}; 84};
85 85
86void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 86void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
@@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
108{ 108{
109 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 109 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
110 110
111 spin_lock_init(&desc->lock); 111 raw_spin_lock_init(&desc->lock);
112 desc->irq = irq; 112 desc->irq = irq;
113#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
114 desc->node = node; 114 desc->node = node;
@@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
130/* 130/*
131 * Protect the sparse_irqs: 131 * Protect the sparse_irqs:
132 */ 132 */
133DEFINE_SPINLOCK(sparse_irq_lock); 133DEFINE_RAW_SPINLOCK(sparse_irq_lock);
134 134
135struct irq_desc **irq_desc_ptrs __read_mostly; 135struct irq_desc **irq_desc_ptrs __read_mostly;
136 136
@@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
141 .chip = &no_irq_chip, 141 .chip = &no_irq_chip,
142 .handle_irq = handle_bad_irq, 142 .handle_irq = handle_bad_irq,
143 .depth = 1, 143 .depth = 1,
144 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 144 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
145 } 145 }
146}; 146};
147 147
@@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
212 if (desc) 212 if (desc)
213 return desc; 213 return desc;
214 214
215 spin_lock_irqsave(&sparse_irq_lock, flags); 215 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216 216
217 /* We have to check it to avoid races with another CPU */ 217 /* We have to check it to avoid races with another CPU */
218 desc = irq_desc_ptrs[irq]; 218 desc = irq_desc_ptrs[irq];
@@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
234 irq_desc_ptrs[irq] = desc; 234 irq_desc_ptrs[irq] = desc;
235 235
236out_unlock: 236out_unlock:
237 spin_unlock_irqrestore(&sparse_irq_lock, flags); 237 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
238 238
239 return desc; 239 return desc;
240} 240}
@@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
247 .chip = &no_irq_chip, 247 .chip = &no_irq_chip,
248 .handle_irq = handle_bad_irq, 248 .handle_irq = handle_bad_irq,
249 .depth = 1, 249 .depth = 1,
250 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 250 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
251 } 251 }
252}; 252};
253 253
@@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq)
473 return 1; 473 return 1;
474 } 474 }
475 475
476 spin_lock(&desc->lock); 476 raw_spin_lock(&desc->lock);
477 if (desc->chip->ack) 477 if (desc->chip->ack)
478 desc->chip->ack(irq); 478 desc->chip->ack(irq);
479 /* 479 /*
@@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq)
517 for (;;) { 517 for (;;) {
518 irqreturn_t action_ret; 518 irqreturn_t action_ret;
519 519
520 spin_unlock(&desc->lock); 520 raw_spin_unlock(&desc->lock);
521 521
522 action_ret = handle_IRQ_event(irq, action); 522 action_ret = handle_IRQ_event(irq, action);
523 if (!noirqdebug) 523 if (!noirqdebug)
524 note_interrupt(irq, desc, action_ret); 524 note_interrupt(irq, desc, action_ret);
525 525
526 spin_lock(&desc->lock); 526 raw_spin_lock(&desc->lock);
527 if (likely(!(desc->status & IRQ_PENDING))) 527 if (likely(!(desc->status & IRQ_PENDING)))
528 break; 528 break;
529 desc->status &= ~IRQ_PENDING; 529 desc->status &= ~IRQ_PENDING;
@@ -536,7 +536,7 @@ out:
536 * disabled while the handler was running. 536 * disabled while the handler was running.
537 */ 537 */
538 desc->chip->end(irq); 538 desc->chip->end(irq);
539 spin_unlock(&desc->lock); 539 raw_spin_unlock(&desc->lock);
540 540
541 return 1; 541 return 1;
542} 542}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 1b5d742c6a77..b2821f070a3d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
18extern struct lock_class_key irq_desc_lock_class; 18extern struct lock_class_key irq_desc_lock_class;
19extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 19extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
20extern void clear_kstat_irqs(struct irq_desc *desc); 20extern void clear_kstat_irqs(struct irq_desc *desc);
21extern spinlock_t sparse_irq_lock; 21extern raw_spinlock_t sparse_irq_lock;
22 22
23#ifdef CONFIG_SPARSE_IRQ 23#ifdef CONFIG_SPARSE_IRQ
24/* irq_desc_ptrs allocated at boot time */ 24/* irq_desc_ptrs allocated at boot time */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7305b297d1eb..eb6078ca60c7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
46 cpu_relax(); 46 cpu_relax();
47 47
48 /* Ok, that indicated we're done: double-check carefully. */ 48 /* Ok, that indicated we're done: double-check carefully. */
49 spin_lock_irqsave(&desc->lock, flags); 49 raw_spin_lock_irqsave(&desc->lock, flags);
50 status = desc->status; 50 status = desc->status;
51 spin_unlock_irqrestore(&desc->lock, flags); 51 raw_spin_unlock_irqrestore(&desc->lock, flags);
52 52
53 /* Oops, that failed? */ 53 /* Oops, that failed? */
54 } while (status & IRQ_INPROGRESS); 54 } while (status & IRQ_INPROGRESS);
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
114 if (!desc->chip->set_affinity) 114 if (!desc->chip->set_affinity)
115 return -EINVAL; 115 return -EINVAL;
116 116
117 spin_lock_irqsave(&desc->lock, flags); 117 raw_spin_lock_irqsave(&desc->lock, flags);
118 118
119#ifdef CONFIG_GENERIC_PENDING_IRQ 119#ifdef CONFIG_GENERIC_PENDING_IRQ
120 if (desc->status & IRQ_MOVE_PCNTXT) { 120 if (desc->status & IRQ_MOVE_PCNTXT) {
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
134 } 134 }
135#endif 135#endif
136 desc->status |= IRQ_AFFINITY_SET; 136 desc->status |= IRQ_AFFINITY_SET;
137 spin_unlock_irqrestore(&desc->lock, flags); 137 raw_spin_unlock_irqrestore(&desc->lock, flags);
138 return 0; 138 return 0;
139} 139}
140 140
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq)
181 unsigned long flags; 181 unsigned long flags;
182 int ret; 182 int ret;
183 183
184 spin_lock_irqsave(&desc->lock, flags); 184 raw_spin_lock_irqsave(&desc->lock, flags);
185 ret = setup_affinity(irq, desc); 185 ret = setup_affinity(irq, desc);
186 if (!ret) 186 if (!ret)
187 irq_set_thread_affinity(desc); 187 irq_set_thread_affinity(desc);
188 spin_unlock_irqrestore(&desc->lock, flags); 188 raw_spin_unlock_irqrestore(&desc->lock, flags);
189 189
190 return ret; 190 return ret;
191} 191}
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq)
231 return; 231 return;
232 232
233 chip_bus_lock(irq, desc); 233 chip_bus_lock(irq, desc);
234 spin_lock_irqsave(&desc->lock, flags); 234 raw_spin_lock_irqsave(&desc->lock, flags);
235 __disable_irq(desc, irq, false); 235 __disable_irq(desc, irq, false);
236 spin_unlock_irqrestore(&desc->lock, flags); 236 raw_spin_unlock_irqrestore(&desc->lock, flags);
237 chip_bus_sync_unlock(irq, desc); 237 chip_bus_sync_unlock(irq, desc);
238} 238}
239EXPORT_SYMBOL(disable_irq_nosync); 239EXPORT_SYMBOL(disable_irq_nosync);
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq)
308 return; 308 return;
309 309
310 chip_bus_lock(irq, desc); 310 chip_bus_lock(irq, desc);
311 spin_lock_irqsave(&desc->lock, flags); 311 raw_spin_lock_irqsave(&desc->lock, flags);
312 __enable_irq(desc, irq, false); 312 __enable_irq(desc, irq, false);
313 spin_unlock_irqrestore(&desc->lock, flags); 313 raw_spin_unlock_irqrestore(&desc->lock, flags);
314 chip_bus_sync_unlock(irq, desc); 314 chip_bus_sync_unlock(irq, desc);
315} 315}
316EXPORT_SYMBOL(enable_irq); 316EXPORT_SYMBOL(enable_irq);
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
347 /* wakeup-capable irqs can be shared between drivers that 347 /* wakeup-capable irqs can be shared between drivers that
348 * don't need to have the same sleep mode behaviors. 348 * don't need to have the same sleep mode behaviors.
349 */ 349 */
350 spin_lock_irqsave(&desc->lock, flags); 350 raw_spin_lock_irqsave(&desc->lock, flags);
351 if (on) { 351 if (on) {
352 if (desc->wake_depth++ == 0) { 352 if (desc->wake_depth++ == 0) {
353 ret = set_irq_wake_real(irq, on); 353 ret = set_irq_wake_real(irq, on);
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
368 } 368 }
369 } 369 }
370 370
371 spin_unlock_irqrestore(&desc->lock, flags); 371 raw_spin_unlock_irqrestore(&desc->lock, flags);
372 return ret; 372 return ret;
373} 373}
374EXPORT_SYMBOL(set_irq_wake); 374EXPORT_SYMBOL(set_irq_wake);
@@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct irqaction *action)
484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485{ 485{
486 chip_bus_lock(irq, desc); 486 chip_bus_lock(irq, desc);
487 spin_lock_irq(&desc->lock); 487 raw_spin_lock_irq(&desc->lock);
488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
489 desc->status &= ~IRQ_MASKED; 489 desc->status &= ~IRQ_MASKED;
490 desc->chip->unmask(irq); 490 desc->chip->unmask(irq);
491 } 491 }
492 spin_unlock_irq(&desc->lock); 492 raw_spin_unlock_irq(&desc->lock);
493 chip_bus_sync_unlock(irq, desc); 493 chip_bus_sync_unlock(irq, desc);
494} 494}
495 495
@@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
514 return; 514 return;
515 } 515 }
516 516
517 spin_lock_irq(&desc->lock); 517 raw_spin_lock_irq(&desc->lock);
518 cpumask_copy(mask, desc->affinity); 518 cpumask_copy(mask, desc->affinity);
519 spin_unlock_irq(&desc->lock); 519 raw_spin_unlock_irq(&desc->lock);
520 520
521 set_cpus_allowed_ptr(current, mask); 521 set_cpus_allowed_ptr(current, mask);
522 free_cpumask_var(mask); 522 free_cpumask_var(mask);
@@ -545,7 +545,7 @@ static int irq_thread(void *data)
545 545
546 atomic_inc(&desc->threads_active); 546 atomic_inc(&desc->threads_active);
547 547
548 spin_lock_irq(&desc->lock); 548 raw_spin_lock_irq(&desc->lock);
549 if (unlikely(desc->status & IRQ_DISABLED)) { 549 if (unlikely(desc->status & IRQ_DISABLED)) {
550 /* 550 /*
551 * CHECKME: We might need a dedicated 551 * CHECKME: We might need a dedicated
@@ -555,9 +555,9 @@ static int irq_thread(void *data)
555 * retriggers the interrupt itself --- tglx 555 * retriggers the interrupt itself --- tglx
556 */ 556 */
557 desc->status |= IRQ_PENDING; 557 desc->status |= IRQ_PENDING;
558 spin_unlock_irq(&desc->lock); 558 raw_spin_unlock_irq(&desc->lock);
559 } else { 559 } else {
560 spin_unlock_irq(&desc->lock); 560 raw_spin_unlock_irq(&desc->lock);
561 561
562 action->thread_fn(action->irq, action->dev_id); 562 action->thread_fn(action->irq, action->dev_id);
563 563
@@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
679 /* 679 /*
680 * The following block of code has to be executed atomically 680 * The following block of code has to be executed atomically
681 */ 681 */
682 spin_lock_irqsave(&desc->lock, flags); 682 raw_spin_lock_irqsave(&desc->lock, flags);
683 old_ptr = &desc->action; 683 old_ptr = &desc->action;
684 old = *old_ptr; 684 old = *old_ptr;
685 if (old) { 685 if (old) {
@@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
775 __enable_irq(desc, irq, false); 775 __enable_irq(desc, irq, false);
776 } 776 }
777 777
778 spin_unlock_irqrestore(&desc->lock, flags); 778 raw_spin_unlock_irqrestore(&desc->lock, flags);
779 779
780 /* 780 /*
781 * Strictly no need to wake it up, but hung_task complains 781 * Strictly no need to wake it up, but hung_task complains
@@ -802,7 +802,7 @@ mismatch:
802 ret = -EBUSY; 802 ret = -EBUSY;
803 803
804out_thread: 804out_thread:
805 spin_unlock_irqrestore(&desc->lock, flags); 805 raw_spin_unlock_irqrestore(&desc->lock, flags);
806 if (new->thread) { 806 if (new->thread) {
807 struct task_struct *t = new->thread; 807 struct task_struct *t = new->thread;
808 808
@@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
844 if (!desc) 844 if (!desc)
845 return NULL; 845 return NULL;
846 846
847 spin_lock_irqsave(&desc->lock, flags); 847 raw_spin_lock_irqsave(&desc->lock, flags);
848 848
849 /* 849 /*
850 * There can be multiple actions per IRQ descriptor, find the right 850 * There can be multiple actions per IRQ descriptor, find the right
@@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
856 856
857 if (!action) { 857 if (!action) {
858 WARN(1, "Trying to free already-free IRQ %d\n", irq); 858 WARN(1, "Trying to free already-free IRQ %d\n", irq);
859 spin_unlock_irqrestore(&desc->lock, flags); 859 raw_spin_unlock_irqrestore(&desc->lock, flags);
860 860
861 return NULL; 861 return NULL;
862 } 862 }
@@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
884 desc->chip->disable(irq); 884 desc->chip->disable(irq);
885 } 885 }
886 886
887 spin_unlock_irqrestore(&desc->lock, flags); 887 raw_spin_unlock_irqrestore(&desc->lock, flags);
888 888
889 unregister_handler_proc(irq, action); 889 unregister_handler_proc(irq, action);
890 890
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index fcb6c96f2627..241962280836 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -27,7 +27,7 @@ void move_masked_irq(int irq)
27 if (!desc->chip->set_affinity) 27 if (!desc->chip->set_affinity)
28 return; 28 return;
29 29
30 assert_spin_locked(&desc->lock); 30 assert_raw_spin_locked(&desc->lock);
31 31
32 /* 32 /*
33 * If there was a valid mask to work with, please 33 * If there was a valid mask to work with, please
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 3fd30197da2e..26bac9d8f860 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
42 "for migration.\n", irq); 42 "for migration.\n", irq);
43 return false; 43 return false;
44 } 44 }
45 spin_lock_init(&desc->lock); 45 raw_spin_lock_init(&desc->lock);
46 desc->node = node; 46 desc->node = node;
47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
48 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); 48 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
@@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
67 67
68 irq = old_desc->irq; 68 irq = old_desc->irq;
69 69
70 spin_lock_irqsave(&sparse_irq_lock, flags); 70 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
71 71
72 /* We have to check it to avoid races with another CPU */ 72 /* We have to check it to avoid races with another CPU */
73 desc = irq_desc_ptrs[irq]; 73 desc = irq_desc_ptrs[irq];
@@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
91 } 91 }
92 92
93 irq_desc_ptrs[irq] = desc; 93 irq_desc_ptrs[irq] = desc;
94 spin_unlock_irqrestore(&sparse_irq_lock, flags); 94 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
95 95
96 /* free the old one */ 96 /* free the old one */
97 free_one_irq_desc(old_desc, desc); 97 free_one_irq_desc(old_desc, desc);
@@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
100 return desc; 100 return desc;
101 101
102out_unlock: 102out_unlock:
103 spin_unlock_irqrestore(&sparse_irq_lock, flags); 103 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
104 104
105 return desc; 105 return desc;
106} 106}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index a0bb09e79867..0d4005d85b03 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -28,9 +28,9 @@ void suspend_device_irqs(void)
28 for_each_irq_desc(irq, desc) { 28 for_each_irq_desc(irq, desc) {
29 unsigned long flags; 29 unsigned long flags;
30 30
31 spin_lock_irqsave(&desc->lock, flags); 31 raw_spin_lock_irqsave(&desc->lock, flags);
32 __disable_irq(desc, irq, true); 32 __disable_irq(desc, irq, true);
33 spin_unlock_irqrestore(&desc->lock, flags); 33 raw_spin_unlock_irqrestore(&desc->lock, flags);
34 } 34 }
35 35
36 for_each_irq_desc(irq, desc) 36 for_each_irq_desc(irq, desc)
@@ -56,9 +56,9 @@ void resume_device_irqs(void)
56 if (!(desc->status & IRQ_SUSPENDED)) 56 if (!(desc->status & IRQ_SUSPENDED))
57 continue; 57 continue;
58 58
59 spin_lock_irqsave(&desc->lock, flags); 59 raw_spin_lock_irqsave(&desc->lock, flags);
60 __enable_irq(desc, irq, true); 60 __enable_irq(desc, irq, true);
61 spin_unlock_irqrestore(&desc->lock, flags); 61 raw_spin_unlock_irqrestore(&desc->lock, flags);
62 } 62 }
63} 63}
64EXPORT_SYMBOL_GPL(resume_device_irqs); 64EXPORT_SYMBOL_GPL(resume_device_irqs);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 0832145fea97..6f50eccc79c0 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -179,7 +179,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
179 unsigned long flags; 179 unsigned long flags;
180 int ret = 1; 180 int ret = 1;
181 181
182 spin_lock_irqsave(&desc->lock, flags); 182 raw_spin_lock_irqsave(&desc->lock, flags);
183 for (action = desc->action ; action; action = action->next) { 183 for (action = desc->action ; action; action = action->next) {
184 if ((action != new_action) && action->name && 184 if ((action != new_action) && action->name &&
185 !strcmp(new_action->name, action->name)) { 185 !strcmp(new_action->name, action->name)) {
@@ -187,7 +187,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
187 break; 187 break;
188 } 188 }
189 } 189 }
190 spin_unlock_irqrestore(&desc->lock, flags); 190 raw_spin_unlock_irqrestore(&desc->lock, flags);
191 return ret; 191 return ret;
192} 192}
193 193
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index e49ea1c5232d..89fb90ae534f 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
28 struct irqaction *action; 28 struct irqaction *action;
29 int ok = 0, work = 0; 29 int ok = 0, work = 0;
30 30
31 spin_lock(&desc->lock); 31 raw_spin_lock(&desc->lock);
32 /* Already running on another processor */ 32 /* Already running on another processor */
33 if (desc->status & IRQ_INPROGRESS) { 33 if (desc->status & IRQ_INPROGRESS) {
34 /* 34 /*
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc)
37 */ 37 */
38 if (desc->action && (desc->action->flags & IRQF_SHARED)) 38 if (desc->action && (desc->action->flags & IRQF_SHARED))
39 desc->status |= IRQ_PENDING; 39 desc->status |= IRQ_PENDING;
40 spin_unlock(&desc->lock); 40 raw_spin_unlock(&desc->lock);
41 return ok; 41 return ok;
42 } 42 }
43 /* Honour the normal IRQ locking */ 43 /* Honour the normal IRQ locking */
44 desc->status |= IRQ_INPROGRESS; 44 desc->status |= IRQ_INPROGRESS;
45 action = desc->action; 45 action = desc->action;
46 spin_unlock(&desc->lock); 46 raw_spin_unlock(&desc->lock);
47 47
48 while (action) { 48 while (action) {
49 /* Only shared IRQ handlers are safe to call */ 49 /* Only shared IRQ handlers are safe to call */
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
56 } 56 }
57 local_irq_disable(); 57 local_irq_disable();
58 /* Now clean up the flags */ 58 /* Now clean up the flags */
59 spin_lock(&desc->lock); 59 raw_spin_lock(&desc->lock);
60 action = desc->action; 60 action = desc->action;
61 61
62 /* 62 /*
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc)
68 * Perform real IRQ processing for the IRQ we deferred 68 * Perform real IRQ processing for the IRQ we deferred
69 */ 69 */
70 work = 1; 70 work = 1;
71 spin_unlock(&desc->lock); 71 raw_spin_unlock(&desc->lock);
72 handle_IRQ_event(irq, action); 72 handle_IRQ_event(irq, action);
73 spin_lock(&desc->lock); 73 raw_spin_lock(&desc->lock);
74 desc->status &= ~IRQ_PENDING; 74 desc->status &= ~IRQ_PENDING;
75 } 75 }
76 desc->status &= ~IRQ_INPROGRESS; 76 desc->status &= ~IRQ_INPROGRESS;
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
80 */ 80 */
81 if (work && desc->chip && desc->chip->end) 81 if (work && desc->chip && desc->chip->end)
82 desc->chip->end(irq); 82 desc->chip->end(irq);
83 spin_unlock(&desc->lock); 83 raw_spin_unlock(&desc->lock);
84 84
85 return ok; 85 return ok;
86} 86}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 429540c70d3f..5feaddcdbe49 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644);
73 * to use a raw spinlock - we really dont want the spinlock 73 * to use a raw spinlock - we really dont want the spinlock
74 * code to recurse back into the lockdep code... 74 * code to recurse back into the lockdep code...
75 */ 75 */
76static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 76static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
77 77
78static int graph_lock(void) 78static int graph_lock(void)
79{ 79{
80 __raw_spin_lock(&lockdep_lock); 80 arch_spin_lock(&lockdep_lock);
81 /* 81 /*
82 * Make sure that if another CPU detected a bug while 82 * Make sure that if another CPU detected a bug while
83 * walking the graph we dont change it (while the other 83 * walking the graph we dont change it (while the other
@@ -85,7 +85,7 @@ static int graph_lock(void)
85 * dropped already) 85 * dropped already)
86 */ 86 */
87 if (!debug_locks) { 87 if (!debug_locks) {
88 __raw_spin_unlock(&lockdep_lock); 88 arch_spin_unlock(&lockdep_lock);
89 return 0; 89 return 0;
90 } 90 }
91 /* prevent any recursions within lockdep from causing deadlocks */ 91 /* prevent any recursions within lockdep from causing deadlocks */
@@ -95,11 +95,11 @@ static int graph_lock(void)
95 95
96static inline int graph_unlock(void) 96static inline int graph_unlock(void)
97{ 97{
98 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) 98 if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
99 return DEBUG_LOCKS_WARN_ON(1); 99 return DEBUG_LOCKS_WARN_ON(1);
100 100
101 current->lockdep_recursion--; 101 current->lockdep_recursion--;
102 __raw_spin_unlock(&lockdep_lock); 102 arch_spin_unlock(&lockdep_lock);
103 return 0; 103 return 0;
104} 104}
105 105
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void)
111{ 111{
112 int ret = debug_locks_off(); 112 int ret = debug_locks_off();
113 113
114 __raw_spin_unlock(&lockdep_lock); 114 arch_spin_unlock(&lockdep_lock);
115 115
116 return ret; 116 return ret;
117} 117}
@@ -1170,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
1170 this.class = class; 1170 this.class = class;
1171 1171
1172 local_irq_save(flags); 1172 local_irq_save(flags);
1173 __raw_spin_lock(&lockdep_lock); 1173 arch_spin_lock(&lockdep_lock);
1174 ret = __lockdep_count_forward_deps(&this); 1174 ret = __lockdep_count_forward_deps(&this);
1175 __raw_spin_unlock(&lockdep_lock); 1175 arch_spin_unlock(&lockdep_lock);
1176 local_irq_restore(flags); 1176 local_irq_restore(flags);
1177 1177
1178 return ret; 1178 return ret;
@@ -1197,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
1197 this.class = class; 1197 this.class = class;
1198 1198
1199 local_irq_save(flags); 1199 local_irq_save(flags);
1200 __raw_spin_lock(&lockdep_lock); 1200 arch_spin_lock(&lockdep_lock);
1201 ret = __lockdep_count_backward_deps(&this); 1201 ret = __lockdep_count_backward_deps(&this);
1202 __raw_spin_unlock(&lockdep_lock); 1202 arch_spin_unlock(&lockdep_lock);
1203 local_irq_restore(flags); 1203 local_irq_restore(flags);
1204 1204
1205 return ret; 1205 return ret;
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 6b2d735846a5..57d527a16f9d 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock)
43 \ 43 \
44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \ 44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \
45 local_irq_save(flags); \ 45 local_irq_save(flags); \
46 __raw_spin_lock(&(lock)->raw_lock); \ 46 arch_spin_lock(&(lock)->rlock.raw_lock);\
47 DEBUG_LOCKS_WARN_ON(l->magic != l); \ 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \
48 } while (0) 48 } while (0)
49 49
50#define spin_unlock_mutex(lock, flags) \ 50#define spin_unlock_mutex(lock, flags) \
51 do { \ 51 do { \
52 __raw_spin_unlock(&(lock)->raw_lock); \ 52 arch_spin_unlock(&(lock)->rlock.raw_lock); \
53 local_irq_restore(flags); \ 53 local_irq_restore(flags); \
54 preempt_check_resched(); \ 54 preempt_check_resched(); \
55 } while (0) 55 } while (0)
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index e73e53c7582f..9052d6c8c9fd 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
203 * if so. If we locked the right context, then it 203 * if so. If we locked the right context, then it
204 * can't get swapped on us any more. 204 * can't get swapped on us any more.
205 */ 205 */
206 spin_lock_irqsave(&ctx->lock, *flags); 206 raw_spin_lock_irqsave(&ctx->lock, *flags);
207 if (ctx != rcu_dereference(task->perf_event_ctxp)) { 207 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
208 spin_unlock_irqrestore(&ctx->lock, *flags); 208 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
209 goto retry; 209 goto retry;
210 } 210 }
211 211
212 if (!atomic_inc_not_zero(&ctx->refcount)) { 212 if (!atomic_inc_not_zero(&ctx->refcount)) {
213 spin_unlock_irqrestore(&ctx->lock, *flags); 213 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
214 ctx = NULL; 214 ctx = NULL;
215 } 215 }
216 } 216 }
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
231 ctx = perf_lock_task_context(task, &flags); 231 ctx = perf_lock_task_context(task, &flags);
232 if (ctx) { 232 if (ctx) {
233 ++ctx->pin_count; 233 ++ctx->pin_count;
234 spin_unlock_irqrestore(&ctx->lock, flags); 234 raw_spin_unlock_irqrestore(&ctx->lock, flags);
235 } 235 }
236 return ctx; 236 return ctx;
237} 237}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
240{ 240{
241 unsigned long flags; 241 unsigned long flags;
242 242
243 spin_lock_irqsave(&ctx->lock, flags); 243 raw_spin_lock_irqsave(&ctx->lock, flags);
244 --ctx->pin_count; 244 --ctx->pin_count;
245 spin_unlock_irqrestore(&ctx->lock, flags); 245 raw_spin_unlock_irqrestore(&ctx->lock, flags);
246 put_ctx(ctx); 246 put_ctx(ctx);
247} 247}
248 248
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
427 if (ctx->task && cpuctx->task_ctx != ctx) 427 if (ctx->task && cpuctx->task_ctx != ctx)
428 return; 428 return;
429 429
430 spin_lock(&ctx->lock); 430 raw_spin_lock(&ctx->lock);
431 /* 431 /*
432 * Protect the list operation against NMI by disabling the 432 * Protect the list operation against NMI by disabling the
433 * events on a global level. 433 * events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
449 } 449 }
450 450
451 perf_enable(); 451 perf_enable();
452 spin_unlock(&ctx->lock); 452 raw_spin_unlock(&ctx->lock);
453} 453}
454 454
455 455
@@ -488,12 +488,12 @@ retry:
488 task_oncpu_function_call(task, __perf_event_remove_from_context, 488 task_oncpu_function_call(task, __perf_event_remove_from_context,
489 event); 489 event);
490 490
491 spin_lock_irq(&ctx->lock); 491 raw_spin_lock_irq(&ctx->lock);
492 /* 492 /*
493 * If the context is active we need to retry the smp call. 493 * If the context is active we need to retry the smp call.
494 */ 494 */
495 if (ctx->nr_active && !list_empty(&event->group_entry)) { 495 if (ctx->nr_active && !list_empty(&event->group_entry)) {
496 spin_unlock_irq(&ctx->lock); 496 raw_spin_unlock_irq(&ctx->lock);
497 goto retry; 497 goto retry;
498 } 498 }
499 499
@@ -504,7 +504,7 @@ retry:
504 */ 504 */
505 if (!list_empty(&event->group_entry)) 505 if (!list_empty(&event->group_entry))
506 list_del_event(event, ctx); 506 list_del_event(event, ctx);
507 spin_unlock_irq(&ctx->lock); 507 raw_spin_unlock_irq(&ctx->lock);
508} 508}
509 509
510/* 510/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
535 if (ctx->task && cpuctx->task_ctx != ctx) 535 if (ctx->task && cpuctx->task_ctx != ctx)
536 return; 536 return;
537 537
538 spin_lock(&ctx->lock); 538 raw_spin_lock(&ctx->lock);
539 539
540 /* 540 /*
541 * If the event is on, turn it off. 541 * If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
551 event->state = PERF_EVENT_STATE_OFF; 551 event->state = PERF_EVENT_STATE_OFF;
552 } 552 }
553 553
554 spin_unlock(&ctx->lock); 554 raw_spin_unlock(&ctx->lock);
555} 555}
556 556
557/* 557/*
@@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event)
584 retry: 584 retry:
585 task_oncpu_function_call(task, __perf_event_disable, event); 585 task_oncpu_function_call(task, __perf_event_disable, event);
586 586
587 spin_lock_irq(&ctx->lock); 587 raw_spin_lock_irq(&ctx->lock);
588 /* 588 /*
589 * If the event is still active, we need to retry the cross-call. 589 * If the event is still active, we need to retry the cross-call.
590 */ 590 */
591 if (event->state == PERF_EVENT_STATE_ACTIVE) { 591 if (event->state == PERF_EVENT_STATE_ACTIVE) {
592 spin_unlock_irq(&ctx->lock); 592 raw_spin_unlock_irq(&ctx->lock);
593 goto retry; 593 goto retry;
594 } 594 }
595 595
@@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event)
602 event->state = PERF_EVENT_STATE_OFF; 602 event->state = PERF_EVENT_STATE_OFF;
603 } 603 }
604 604
605 spin_unlock_irq(&ctx->lock); 605 raw_spin_unlock_irq(&ctx->lock);
606} 606}
607 607
608static int 608static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
770 cpuctx->task_ctx = ctx; 770 cpuctx->task_ctx = ctx;
771 } 771 }
772 772
773 spin_lock(&ctx->lock); 773 raw_spin_lock(&ctx->lock);
774 ctx->is_active = 1; 774 ctx->is_active = 1;
775 update_context_time(ctx); 775 update_context_time(ctx);
776 776
@@ -820,7 +820,7 @@ static void __perf_install_in_context(void *info)
820 unlock: 820 unlock:
821 perf_enable(); 821 perf_enable();
822 822
823 spin_unlock(&ctx->lock); 823 raw_spin_unlock(&ctx->lock);
824} 824}
825 825
826/* 826/*
@@ -856,12 +856,12 @@ retry:
856 task_oncpu_function_call(task, __perf_install_in_context, 856 task_oncpu_function_call(task, __perf_install_in_context,
857 event); 857 event);
858 858
859 spin_lock_irq(&ctx->lock); 859 raw_spin_lock_irq(&ctx->lock);
860 /* 860 /*
861 * we need to retry the smp call. 861 * we need to retry the smp call.
862 */ 862 */
863 if (ctx->is_active && list_empty(&event->group_entry)) { 863 if (ctx->is_active && list_empty(&event->group_entry)) {
864 spin_unlock_irq(&ctx->lock); 864 raw_spin_unlock_irq(&ctx->lock);
865 goto retry; 865 goto retry;
866 } 866 }
867 867
@@ -872,7 +872,7 @@ retry:
872 */ 872 */
873 if (list_empty(&event->group_entry)) 873 if (list_empty(&event->group_entry))
874 add_event_to_ctx(event, ctx); 874 add_event_to_ctx(event, ctx);
875 spin_unlock_irq(&ctx->lock); 875 raw_spin_unlock_irq(&ctx->lock);
876} 876}
877 877
878/* 878/*
@@ -917,7 +917,7 @@ static void __perf_event_enable(void *info)
917 cpuctx->task_ctx = ctx; 917 cpuctx->task_ctx = ctx;
918 } 918 }
919 919
920 spin_lock(&ctx->lock); 920 raw_spin_lock(&ctx->lock);
921 ctx->is_active = 1; 921 ctx->is_active = 1;
922 update_context_time(ctx); 922 update_context_time(ctx);
923 923
@@ -959,7 +959,7 @@ static void __perf_event_enable(void *info)
959 } 959 }
960 960
961 unlock: 961 unlock:
962 spin_unlock(&ctx->lock); 962 raw_spin_unlock(&ctx->lock);
963} 963}
964 964
965/* 965/*
@@ -985,7 +985,7 @@ void perf_event_enable(struct perf_event *event)
985 return; 985 return;
986 } 986 }
987 987
988 spin_lock_irq(&ctx->lock); 988 raw_spin_lock_irq(&ctx->lock);
989 if (event->state >= PERF_EVENT_STATE_INACTIVE) 989 if (event->state >= PERF_EVENT_STATE_INACTIVE)
990 goto out; 990 goto out;
991 991
@@ -1000,10 +1000,10 @@ void perf_event_enable(struct perf_event *event)
1000 event->state = PERF_EVENT_STATE_OFF; 1000 event->state = PERF_EVENT_STATE_OFF;
1001 1001
1002 retry: 1002 retry:
1003 spin_unlock_irq(&ctx->lock); 1003 raw_spin_unlock_irq(&ctx->lock);
1004 task_oncpu_function_call(task, __perf_event_enable, event); 1004 task_oncpu_function_call(task, __perf_event_enable, event);
1005 1005
1006 spin_lock_irq(&ctx->lock); 1006 raw_spin_lock_irq(&ctx->lock);
1007 1007
1008 /* 1008 /*
1009 * If the context is active and the event is still off, 1009 * If the context is active and the event is still off,
@@ -1020,7 +1020,7 @@ void perf_event_enable(struct perf_event *event)
1020 __perf_event_mark_enabled(event, ctx); 1020 __perf_event_mark_enabled(event, ctx);
1021 1021
1022 out: 1022 out:
1023 spin_unlock_irq(&ctx->lock); 1023 raw_spin_unlock_irq(&ctx->lock);
1024} 1024}
1025 1025
1026static int perf_event_refresh(struct perf_event *event, int refresh) 1026static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1042{ 1042{
1043 struct perf_event *event; 1043 struct perf_event *event;
1044 1044
1045 spin_lock(&ctx->lock); 1045 raw_spin_lock(&ctx->lock);
1046 ctx->is_active = 0; 1046 ctx->is_active = 0;
1047 if (likely(!ctx->nr_events)) 1047 if (likely(!ctx->nr_events))
1048 goto out; 1048 goto out;
@@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1055 } 1055 }
1056 perf_enable(); 1056 perf_enable();
1057 out: 1057 out:
1058 spin_unlock(&ctx->lock); 1058 raw_spin_unlock(&ctx->lock);
1059} 1059}
1060 1060
1061/* 1061/*
@@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1193 * order we take the locks because no other cpu could 1193 * order we take the locks because no other cpu could
1194 * be trying to lock both of these tasks. 1194 * be trying to lock both of these tasks.
1195 */ 1195 */
1196 spin_lock(&ctx->lock); 1196 raw_spin_lock(&ctx->lock);
1197 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 1197 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1198 if (context_equiv(ctx, next_ctx)) { 1198 if (context_equiv(ctx, next_ctx)) {
1199 /* 1199 /*
1200 * XXX do we need a memory barrier of sorts 1200 * XXX do we need a memory barrier of sorts
@@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct task_struct *task,
1208 1208
1209 perf_event_sync_stat(ctx, next_ctx); 1209 perf_event_sync_stat(ctx, next_ctx);
1210 } 1210 }
1211 spin_unlock(&next_ctx->lock); 1211 raw_spin_unlock(&next_ctx->lock);
1212 spin_unlock(&ctx->lock); 1212 raw_spin_unlock(&ctx->lock);
1213 } 1213 }
1214 rcu_read_unlock(); 1214 rcu_read_unlock();
1215 1215
@@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1251 struct perf_event *event; 1251 struct perf_event *event;
1252 int can_add_hw = 1; 1252 int can_add_hw = 1;
1253 1253
1254 spin_lock(&ctx->lock); 1254 raw_spin_lock(&ctx->lock);
1255 ctx->is_active = 1; 1255 ctx->is_active = 1;
1256 if (likely(!ctx->nr_events)) 1256 if (likely(!ctx->nr_events))
1257 goto out; 1257 goto out;
@@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1306 } 1306 }
1307 perf_enable(); 1307 perf_enable();
1308 out: 1308 out:
1309 spin_unlock(&ctx->lock); 1309 raw_spin_unlock(&ctx->lock);
1310} 1310}
1311 1311
1312/* 1312/*
@@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1370 struct hw_perf_event *hwc; 1370 struct hw_perf_event *hwc;
1371 u64 interrupts, freq; 1371 u64 interrupts, freq;
1372 1372
1373 spin_lock(&ctx->lock); 1373 raw_spin_lock(&ctx->lock);
1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1375 if (event->state != PERF_EVENT_STATE_ACTIVE) 1375 if (event->state != PERF_EVENT_STATE_ACTIVE)
1376 continue; 1376 continue;
@@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1425 perf_enable(); 1425 perf_enable();
1426 } 1426 }
1427 } 1427 }
1428 spin_unlock(&ctx->lock); 1428 raw_spin_unlock(&ctx->lock);
1429} 1429}
1430 1430
1431/* 1431/*
@@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1438 if (!ctx->nr_events) 1438 if (!ctx->nr_events)
1439 return; 1439 return;
1440 1440
1441 spin_lock(&ctx->lock); 1441 raw_spin_lock(&ctx->lock);
1442 /* 1442 /*
1443 * Rotate the first entry last (works just fine for group events too): 1443 * Rotate the first entry last (works just fine for group events too):
1444 */ 1444 */
@@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1449 } 1449 }
1450 perf_enable(); 1450 perf_enable();
1451 1451
1452 spin_unlock(&ctx->lock); 1452 raw_spin_unlock(&ctx->lock);
1453} 1453}
1454 1454
1455void perf_event_task_tick(struct task_struct *curr, int cpu) 1455void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1498 1498
1499 __perf_event_task_sched_out(ctx); 1499 __perf_event_task_sched_out(ctx);
1500 1500
1501 spin_lock(&ctx->lock); 1501 raw_spin_lock(&ctx->lock);
1502 1502
1503 list_for_each_entry(event, &ctx->group_list, group_entry) { 1503 list_for_each_entry(event, &ctx->group_list, group_entry) {
1504 if (!event->attr.enable_on_exec) 1504 if (!event->attr.enable_on_exec)
@@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1516 if (enabled) 1516 if (enabled)
1517 unclone_ctx(ctx); 1517 unclone_ctx(ctx);
1518 1518
1519 spin_unlock(&ctx->lock); 1519 raw_spin_unlock(&ctx->lock);
1520 1520
1521 perf_event_task_sched_in(task, smp_processor_id()); 1521 perf_event_task_sched_in(task, smp_processor_id());
1522 out: 1522 out:
@@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info)
1542 if (ctx->task && cpuctx->task_ctx != ctx) 1542 if (ctx->task && cpuctx->task_ctx != ctx)
1543 return; 1543 return;
1544 1544
1545 spin_lock(&ctx->lock); 1545 raw_spin_lock(&ctx->lock);
1546 update_context_time(ctx); 1546 update_context_time(ctx);
1547 update_event_times(event); 1547 update_event_times(event);
1548 spin_unlock(&ctx->lock); 1548 raw_spin_unlock(&ctx->lock);
1549 1549
1550 event->pmu->read(event); 1550 event->pmu->read(event);
1551} 1551}
@@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_event *event)
1563 struct perf_event_context *ctx = event->ctx; 1563 struct perf_event_context *ctx = event->ctx;
1564 unsigned long flags; 1564 unsigned long flags;
1565 1565
1566 spin_lock_irqsave(&ctx->lock, flags); 1566 raw_spin_lock_irqsave(&ctx->lock, flags);
1567 update_context_time(ctx); 1567 update_context_time(ctx);
1568 update_event_times(event); 1568 update_event_times(event);
1569 spin_unlock_irqrestore(&ctx->lock, flags); 1569 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1570 } 1570 }
1571 1571
1572 return atomic64_read(&event->count); 1572 return atomic64_read(&event->count);
@@ -1579,7 +1579,7 @@ static void
1579__perf_event_init_context(struct perf_event_context *ctx, 1579__perf_event_init_context(struct perf_event_context *ctx,
1580 struct task_struct *task) 1580 struct task_struct *task)
1581{ 1581{
1582 spin_lock_init(&ctx->lock); 1582 raw_spin_lock_init(&ctx->lock);
1583 mutex_init(&ctx->mutex); 1583 mutex_init(&ctx->mutex);
1584 INIT_LIST_HEAD(&ctx->group_list); 1584 INIT_LIST_HEAD(&ctx->group_list);
1585 INIT_LIST_HEAD(&ctx->event_list); 1585 INIT_LIST_HEAD(&ctx->event_list);
@@ -1649,7 +1649,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1649 ctx = perf_lock_task_context(task, &flags); 1649 ctx = perf_lock_task_context(task, &flags);
1650 if (ctx) { 1650 if (ctx) {
1651 unclone_ctx(ctx); 1651 unclone_ctx(ctx);
1652 spin_unlock_irqrestore(&ctx->lock, flags); 1652 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1653 } 1653 }
1654 1654
1655 if (!ctx) { 1655 if (!ctx) {
@@ -1987,7 +1987,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
1987 if (!value) 1987 if (!value)
1988 return -EINVAL; 1988 return -EINVAL;
1989 1989
1990 spin_lock_irq(&ctx->lock); 1990 raw_spin_lock_irq(&ctx->lock);
1991 if (event->attr.freq) { 1991 if (event->attr.freq) {
1992 if (value > sysctl_perf_event_sample_rate) { 1992 if (value > sysctl_perf_event_sample_rate) {
1993 ret = -EINVAL; 1993 ret = -EINVAL;
@@ -2000,7 +2000,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
2000 event->hw.sample_period = value; 2000 event->hw.sample_period = value;
2001 } 2001 }
2002unlock: 2002unlock:
2003 spin_unlock_irq(&ctx->lock); 2003 raw_spin_unlock_irq(&ctx->lock);
2004 2004
2005 return ret; 2005 return ret;
2006} 2006}
@@ -4992,7 +4992,7 @@ void perf_event_exit_task(struct task_struct *child)
4992 * reading child->perf_event_ctxp, we wait until it has 4992 * reading child->perf_event_ctxp, we wait until it has
4993 * incremented the context's refcount before we do put_ctx below. 4993 * incremented the context's refcount before we do put_ctx below.
4994 */ 4994 */
4995 spin_lock(&child_ctx->lock); 4995 raw_spin_lock(&child_ctx->lock);
4996 child->perf_event_ctxp = NULL; 4996 child->perf_event_ctxp = NULL;
4997 /* 4997 /*
4998 * If this context is a clone; unclone it so it can't get 4998 * If this context is a clone; unclone it so it can't get
@@ -5001,7 +5001,7 @@ void perf_event_exit_task(struct task_struct *child)
5001 */ 5001 */
5002 unclone_ctx(child_ctx); 5002 unclone_ctx(child_ctx);
5003 update_context_time(child_ctx); 5003 update_context_time(child_ctx);
5004 spin_unlock_irqrestore(&child_ctx->lock, flags); 5004 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5005 5005
5006 /* 5006 /*
5007 * Report the task dead after unscheduling the events so that we 5007 * Report the task dead after unscheduling the events so that we
@@ -5292,11 +5292,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
5292 perf_reserved_percpu = val; 5292 perf_reserved_percpu = val;
5293 for_each_online_cpu(cpu) { 5293 for_each_online_cpu(cpu) {
5294 cpuctx = &per_cpu(perf_cpu_context, cpu); 5294 cpuctx = &per_cpu(perf_cpu_context, cpu);
5295 spin_lock_irq(&cpuctx->ctx.lock); 5295 raw_spin_lock_irq(&cpuctx->ctx.lock);
5296 mpt = min(perf_max_events - cpuctx->ctx.nr_events, 5296 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5297 perf_max_events - perf_reserved_percpu); 5297 perf_max_events - perf_reserved_percpu);
5298 cpuctx->max_pertask = mpt; 5298 cpuctx->max_pertask = mpt;
5299 spin_unlock_irq(&cpuctx->ctx.lock); 5299 raw_spin_unlock_irq(&cpuctx->ctx.lock);
5300 } 5300 }
5301 spin_unlock(&perf_resource_lock); 5301 spin_unlock(&perf_resource_lock);
5302 5302
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 5fcb4fe645e2..ddabb54bb5c8 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -37,8 +37,8 @@ do { \
37 if (rt_trace_on) { \ 37 if (rt_trace_on) { \
38 rt_trace_on = 0; \ 38 rt_trace_on = 0; \
39 console_verbose(); \ 39 console_verbose(); \
40 if (spin_is_locked(&current->pi_lock)) \ 40 if (raw_spin_is_locked(&current->pi_lock)) \
41 spin_unlock(&current->pi_lock); \ 41 raw_spin_unlock(&current->pi_lock); \
42 } \ 42 } \
43} while (0) 43} while (0)
44 44
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 29bd4baf9e75..a9604815786a 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
138{ 138{
139 unsigned long flags; 139 unsigned long flags;
140 140
141 spin_lock_irqsave(&task->pi_lock, flags); 141 raw_spin_lock_irqsave(&task->pi_lock, flags);
142 __rt_mutex_adjust_prio(task); 142 __rt_mutex_adjust_prio(task);
143 spin_unlock_irqrestore(&task->pi_lock, flags); 143 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
144} 144}
145 145
146/* 146/*
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
195 /* 195 /*
196 * Task can not go away as we did a get_task() before ! 196 * Task can not go away as we did a get_task() before !
197 */ 197 */
198 spin_lock_irqsave(&task->pi_lock, flags); 198 raw_spin_lock_irqsave(&task->pi_lock, flags);
199 199
200 waiter = task->pi_blocked_on; 200 waiter = task->pi_blocked_on;
201 /* 201 /*
@@ -231,8 +231,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
231 goto out_unlock_pi; 231 goto out_unlock_pi;
232 232
233 lock = waiter->lock; 233 lock = waiter->lock;
234 if (!spin_trylock(&lock->wait_lock)) { 234 if (!raw_spin_trylock(&lock->wait_lock)) {
235 spin_unlock_irqrestore(&task->pi_lock, flags); 235 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
236 cpu_relax(); 236 cpu_relax();
237 goto retry; 237 goto retry;
238 } 238 }
@@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
240 /* Deadlock detection */ 240 /* Deadlock detection */
241 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 241 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
242 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 242 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
243 spin_unlock(&lock->wait_lock); 243 raw_spin_unlock(&lock->wait_lock);
244 ret = deadlock_detect ? -EDEADLK : 0; 244 ret = deadlock_detect ? -EDEADLK : 0;
245 goto out_unlock_pi; 245 goto out_unlock_pi;
246 } 246 }
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
253 plist_add(&waiter->list_entry, &lock->wait_list); 253 plist_add(&waiter->list_entry, &lock->wait_list);
254 254
255 /* Release the task */ 255 /* Release the task */
256 spin_unlock_irqrestore(&task->pi_lock, flags); 256 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
257 put_task_struct(task); 257 put_task_struct(task);
258 258
259 /* Grab the next task */ 259 /* Grab the next task */
260 task = rt_mutex_owner(lock); 260 task = rt_mutex_owner(lock);
261 get_task_struct(task); 261 get_task_struct(task);
262 spin_lock_irqsave(&task->pi_lock, flags); 262 raw_spin_lock_irqsave(&task->pi_lock, flags);
263 263
264 if (waiter == rt_mutex_top_waiter(lock)) { 264 if (waiter == rt_mutex_top_waiter(lock)) {
265 /* Boost the owner */ 265 /* Boost the owner */
@@ -277,10 +277,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
277 __rt_mutex_adjust_prio(task); 277 __rt_mutex_adjust_prio(task);
278 } 278 }
279 279
280 spin_unlock_irqrestore(&task->pi_lock, flags); 280 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
281 281
282 top_waiter = rt_mutex_top_waiter(lock); 282 top_waiter = rt_mutex_top_waiter(lock);
283 spin_unlock(&lock->wait_lock); 283 raw_spin_unlock(&lock->wait_lock);
284 284
285 if (!detect_deadlock && waiter != top_waiter) 285 if (!detect_deadlock && waiter != top_waiter)
286 goto out_put_task; 286 goto out_put_task;
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
288 goto again; 288 goto again;
289 289
290 out_unlock_pi: 290 out_unlock_pi:
291 spin_unlock_irqrestore(&task->pi_lock, flags); 291 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
292 out_put_task: 292 out_put_task:
293 put_task_struct(task); 293 put_task_struct(task);
294 294
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
313 if (pendowner == task) 313 if (pendowner == task)
314 return 1; 314 return 1;
315 315
316 spin_lock_irqsave(&pendowner->pi_lock, flags); 316 raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
317 if (task->prio >= pendowner->prio) { 317 if (task->prio >= pendowner->prio) {
318 spin_unlock_irqrestore(&pendowner->pi_lock, flags); 318 raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
319 return 0; 319 return 0;
320 } 320 }
321 321
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
325 * priority. 325 * priority.
326 */ 326 */
327 if (likely(!rt_mutex_has_waiters(lock))) { 327 if (likely(!rt_mutex_has_waiters(lock))) {
328 spin_unlock_irqrestore(&pendowner->pi_lock, flags); 328 raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
329 return 1; 329 return 1;
330 } 330 }
331 331
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
333 next = rt_mutex_top_waiter(lock); 333 next = rt_mutex_top_waiter(lock);
334 plist_del(&next->pi_list_entry, &pendowner->pi_waiters); 334 plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
335 __rt_mutex_adjust_prio(pendowner); 335 __rt_mutex_adjust_prio(pendowner);
336 spin_unlock_irqrestore(&pendowner->pi_lock, flags); 336 raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
337 337
338 /* 338 /*
339 * We are going to steal the lock and a waiter was 339 * We are going to steal the lock and a waiter was
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
350 * might be task: 350 * might be task:
351 */ 351 */
352 if (likely(next->task != task)) { 352 if (likely(next->task != task)) {
353 spin_lock_irqsave(&task->pi_lock, flags); 353 raw_spin_lock_irqsave(&task->pi_lock, flags);
354 plist_add(&next->pi_list_entry, &task->pi_waiters); 354 plist_add(&next->pi_list_entry, &task->pi_waiters);
355 __rt_mutex_adjust_prio(task); 355 __rt_mutex_adjust_prio(task);
356 spin_unlock_irqrestore(&task->pi_lock, flags); 356 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
357 } 357 }
358 return 1; 358 return 1;
359} 359}
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
420 unsigned long flags; 420 unsigned long flags;
421 int chain_walk = 0, res; 421 int chain_walk = 0, res;
422 422
423 spin_lock_irqsave(&task->pi_lock, flags); 423 raw_spin_lock_irqsave(&task->pi_lock, flags);
424 __rt_mutex_adjust_prio(task); 424 __rt_mutex_adjust_prio(task);
425 waiter->task = task; 425 waiter->task = task;
426 waiter->lock = lock; 426 waiter->lock = lock;
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
434 434
435 task->pi_blocked_on = waiter; 435 task->pi_blocked_on = waiter;
436 436
437 spin_unlock_irqrestore(&task->pi_lock, flags); 437 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
438 438
439 if (waiter == rt_mutex_top_waiter(lock)) { 439 if (waiter == rt_mutex_top_waiter(lock)) {
440 spin_lock_irqsave(&owner->pi_lock, flags); 440 raw_spin_lock_irqsave(&owner->pi_lock, flags);
441 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); 441 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
442 plist_add(&waiter->pi_list_entry, &owner->pi_waiters); 442 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
443 443
444 __rt_mutex_adjust_prio(owner); 444 __rt_mutex_adjust_prio(owner);
445 if (owner->pi_blocked_on) 445 if (owner->pi_blocked_on)
446 chain_walk = 1; 446 chain_walk = 1;
447 spin_unlock_irqrestore(&owner->pi_lock, flags); 447 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
448 } 448 }
449 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) 449 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
450 chain_walk = 1; 450 chain_walk = 1;
@@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
459 */ 459 */
460 get_task_struct(owner); 460 get_task_struct(owner);
461 461
462 spin_unlock(&lock->wait_lock); 462 raw_spin_unlock(&lock->wait_lock);
463 463
464 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, 464 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
465 task); 465 task);
466 466
467 spin_lock(&lock->wait_lock); 467 raw_spin_lock(&lock->wait_lock);
468 468
469 return res; 469 return res;
470} 470}
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
483 struct task_struct *pendowner; 483 struct task_struct *pendowner;
484 unsigned long flags; 484 unsigned long flags;
485 485
486 spin_lock_irqsave(&current->pi_lock, flags); 486 raw_spin_lock_irqsave(&current->pi_lock, flags);
487 487
488 waiter = rt_mutex_top_waiter(lock); 488 waiter = rt_mutex_top_waiter(lock);
489 plist_del(&waiter->list_entry, &lock->wait_list); 489 plist_del(&waiter->list_entry, &lock->wait_list);
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
500 500
501 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); 501 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
502 502
503 spin_unlock_irqrestore(&current->pi_lock, flags); 503 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
504 504
505 /* 505 /*
506 * Clear the pi_blocked_on variable and enqueue a possible 506 * Clear the pi_blocked_on variable and enqueue a possible
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
509 * waiter with higher priority than pending-owner->normal_prio 509 * waiter with higher priority than pending-owner->normal_prio
510 * is blocked on the unboosted (pending) owner. 510 * is blocked on the unboosted (pending) owner.
511 */ 511 */
512 spin_lock_irqsave(&pendowner->pi_lock, flags); 512 raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
513 513
514 WARN_ON(!pendowner->pi_blocked_on); 514 WARN_ON(!pendowner->pi_blocked_on);
515 WARN_ON(pendowner->pi_blocked_on != waiter); 515 WARN_ON(pendowner->pi_blocked_on != waiter);
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
523 next = rt_mutex_top_waiter(lock); 523 next = rt_mutex_top_waiter(lock);
524 plist_add(&next->pi_list_entry, &pendowner->pi_waiters); 524 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
525 } 525 }
526 spin_unlock_irqrestore(&pendowner->pi_lock, flags); 526 raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
527 527
528 wake_up_process(pendowner); 528 wake_up_process(pendowner);
529} 529}
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock,
541 unsigned long flags; 541 unsigned long flags;
542 int chain_walk = 0; 542 int chain_walk = 0;
543 543
544 spin_lock_irqsave(&current->pi_lock, flags); 544 raw_spin_lock_irqsave(&current->pi_lock, flags);
545 plist_del(&waiter->list_entry, &lock->wait_list); 545 plist_del(&waiter->list_entry, &lock->wait_list);
546 waiter->task = NULL; 546 waiter->task = NULL;
547 current->pi_blocked_on = NULL; 547 current->pi_blocked_on = NULL;
548 spin_unlock_irqrestore(&current->pi_lock, flags); 548 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
549 549
550 if (first && owner != current) { 550 if (first && owner != current) {
551 551
552 spin_lock_irqsave(&owner->pi_lock, flags); 552 raw_spin_lock_irqsave(&owner->pi_lock, flags);
553 553
554 plist_del(&waiter->pi_list_entry, &owner->pi_waiters); 554 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
555 555
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock,
564 if (owner->pi_blocked_on) 564 if (owner->pi_blocked_on)
565 chain_walk = 1; 565 chain_walk = 1;
566 566
567 spin_unlock_irqrestore(&owner->pi_lock, flags); 567 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
568 } 568 }
569 569
570 WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); 570 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
@@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mutex *lock,
575 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 575 /* gets dropped in rt_mutex_adjust_prio_chain()! */
576 get_task_struct(owner); 576 get_task_struct(owner);
577 577
578 spin_unlock(&lock->wait_lock); 578 raw_spin_unlock(&lock->wait_lock);
579 579
580 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); 580 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
581 581
582 spin_lock(&lock->wait_lock); 582 raw_spin_lock(&lock->wait_lock);
583} 583}
584 584
585/* 585/*
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task)
592 struct rt_mutex_waiter *waiter; 592 struct rt_mutex_waiter *waiter;
593 unsigned long flags; 593 unsigned long flags;
594 594
595 spin_lock_irqsave(&task->pi_lock, flags); 595 raw_spin_lock_irqsave(&task->pi_lock, flags);
596 596
597 waiter = task->pi_blocked_on; 597 waiter = task->pi_blocked_on;
598 if (!waiter || waiter->list_entry.prio == task->prio) { 598 if (!waiter || waiter->list_entry.prio == task->prio) {
599 spin_unlock_irqrestore(&task->pi_lock, flags); 599 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
600 return; 600 return;
601 } 601 }
602 602
603 spin_unlock_irqrestore(&task->pi_lock, flags); 603 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
604 604
605 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 605 /* gets dropped in rt_mutex_adjust_prio_chain()! */
606 get_task_struct(task); 606 get_task_struct(task);
@@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
672 break; 672 break;
673 } 673 }
674 674
675 spin_unlock(&lock->wait_lock); 675 raw_spin_unlock(&lock->wait_lock);
676 676
677 debug_rt_mutex_print_deadlock(waiter); 677 debug_rt_mutex_print_deadlock(waiter);
678 678
679 if (waiter->task) 679 if (waiter->task)
680 schedule_rt_mutex(lock); 680 schedule_rt_mutex(lock);
681 681
682 spin_lock(&lock->wait_lock); 682 raw_spin_lock(&lock->wait_lock);
683 set_current_state(state); 683 set_current_state(state);
684 } 684 }
685 685
@@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
700 debug_rt_mutex_init_waiter(&waiter); 700 debug_rt_mutex_init_waiter(&waiter);
701 waiter.task = NULL; 701 waiter.task = NULL;
702 702
703 spin_lock(&lock->wait_lock); 703 raw_spin_lock(&lock->wait_lock);
704 704
705 /* Try to acquire the lock again: */ 705 /* Try to acquire the lock again: */
706 if (try_to_take_rt_mutex(lock)) { 706 if (try_to_take_rt_mutex(lock)) {
707 spin_unlock(&lock->wait_lock); 707 raw_spin_unlock(&lock->wait_lock);
708 return 0; 708 return 0;
709 } 709 }
710 710
@@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
731 */ 731 */
732 fixup_rt_mutex_waiters(lock); 732 fixup_rt_mutex_waiters(lock);
733 733
734 spin_unlock(&lock->wait_lock); 734 raw_spin_unlock(&lock->wait_lock);
735 735
736 /* Remove pending timer: */ 736 /* Remove pending timer: */
737 if (unlikely(timeout)) 737 if (unlikely(timeout))
@@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
758{ 758{
759 int ret = 0; 759 int ret = 0;
760 760
761 spin_lock(&lock->wait_lock); 761 raw_spin_lock(&lock->wait_lock);
762 762
763 if (likely(rt_mutex_owner(lock) != current)) { 763 if (likely(rt_mutex_owner(lock) != current)) {
764 764
@@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
770 fixup_rt_mutex_waiters(lock); 770 fixup_rt_mutex_waiters(lock);
771 } 771 }
772 772
773 spin_unlock(&lock->wait_lock); 773 raw_spin_unlock(&lock->wait_lock);
774 774
775 return ret; 775 return ret;
776} 776}
@@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
781static void __sched 781static void __sched
782rt_mutex_slowunlock(struct rt_mutex *lock) 782rt_mutex_slowunlock(struct rt_mutex *lock)
783{ 783{
784 spin_lock(&lock->wait_lock); 784 raw_spin_lock(&lock->wait_lock);
785 785
786 debug_rt_mutex_unlock(lock); 786 debug_rt_mutex_unlock(lock);
787 787
@@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
789 789
790 if (!rt_mutex_has_waiters(lock)) { 790 if (!rt_mutex_has_waiters(lock)) {
791 lock->owner = NULL; 791 lock->owner = NULL;
792 spin_unlock(&lock->wait_lock); 792 raw_spin_unlock(&lock->wait_lock);
793 return; 793 return;
794 } 794 }
795 795
796 wakeup_next_waiter(lock); 796 wakeup_next_waiter(lock);
797 797
798 spin_unlock(&lock->wait_lock); 798 raw_spin_unlock(&lock->wait_lock);
799 799
800 /* Undo pi boosting if necessary: */ 800 /* Undo pi boosting if necessary: */
801 rt_mutex_adjust_prio(current); 801 rt_mutex_adjust_prio(current);
@@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
970void __rt_mutex_init(struct rt_mutex *lock, const char *name) 970void __rt_mutex_init(struct rt_mutex *lock, const char *name)
971{ 971{
972 lock->owner = NULL; 972 lock->owner = NULL;
973 spin_lock_init(&lock->wait_lock); 973 raw_spin_lock_init(&lock->wait_lock);
974 plist_head_init(&lock->wait_list, &lock->wait_lock); 974 plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
975 975
976 debug_rt_mutex_init(lock, name); 976 debug_rt_mutex_init(lock, name);
977} 977}
@@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1032{ 1032{
1033 int ret; 1033 int ret;
1034 1034
1035 spin_lock(&lock->wait_lock); 1035 raw_spin_lock(&lock->wait_lock);
1036 1036
1037 mark_rt_mutex_waiters(lock); 1037 mark_rt_mutex_waiters(lock);
1038 1038
@@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1040 /* We got the lock for task. */ 1040 /* We got the lock for task. */
1041 debug_rt_mutex_lock(lock); 1041 debug_rt_mutex_lock(lock);
1042 rt_mutex_set_owner(lock, task, 0); 1042 rt_mutex_set_owner(lock, task, 0);
1043 spin_unlock(&lock->wait_lock); 1043 raw_spin_unlock(&lock->wait_lock);
1044 rt_mutex_deadlock_account_lock(lock, task); 1044 rt_mutex_deadlock_account_lock(lock, task);
1045 return 1; 1045 return 1;
1046 } 1046 }
@@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1056 */ 1056 */
1057 ret = 0; 1057 ret = 0;
1058 } 1058 }
1059 spin_unlock(&lock->wait_lock); 1059 raw_spin_unlock(&lock->wait_lock);
1060 1060
1061 debug_rt_mutex_print_deadlock(waiter); 1061 debug_rt_mutex_print_deadlock(waiter);
1062 1062
@@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1106{ 1106{
1107 int ret; 1107 int ret;
1108 1108
1109 spin_lock(&lock->wait_lock); 1109 raw_spin_lock(&lock->wait_lock);
1110 1110
1111 set_current_state(TASK_INTERRUPTIBLE); 1111 set_current_state(TASK_INTERRUPTIBLE);
1112 1112
@@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1124 */ 1124 */
1125 fixup_rt_mutex_waiters(lock); 1125 fixup_rt_mutex_waiters(lock);
1126 1126
1127 spin_unlock(&lock->wait_lock); 1127 raw_spin_unlock(&lock->wait_lock);
1128 1128
1129 /* 1129 /*
1130 * Readjust priority, when we did not get the lock. We might have been 1130 * Readjust priority, when we did not get the lock. We might have been
diff --git a/kernel/sched.c b/kernel/sched.c
index fd05861b2111..18cceeecce35 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -141,7 +141,7 @@ struct rt_prio_array {
141 141
142struct rt_bandwidth { 142struct rt_bandwidth {
143 /* nests inside the rq lock: */ 143 /* nests inside the rq lock: */
144 spinlock_t rt_runtime_lock; 144 raw_spinlock_t rt_runtime_lock;
145 ktime_t rt_period; 145 ktime_t rt_period;
146 u64 rt_runtime; 146 u64 rt_runtime;
147 struct hrtimer rt_period_timer; 147 struct hrtimer rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
178 rt_b->rt_period = ns_to_ktime(period); 178 rt_b->rt_period = ns_to_ktime(period);
179 rt_b->rt_runtime = runtime; 179 rt_b->rt_runtime = runtime;
180 180
181 spin_lock_init(&rt_b->rt_runtime_lock); 181 raw_spin_lock_init(&rt_b->rt_runtime_lock);
182 182
183 hrtimer_init(&rt_b->rt_period_timer, 183 hrtimer_init(&rt_b->rt_period_timer,
184 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 184 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
200 if (hrtimer_active(&rt_b->rt_period_timer)) 200 if (hrtimer_active(&rt_b->rt_period_timer))
201 return; 201 return;
202 202
203 spin_lock(&rt_b->rt_runtime_lock); 203 raw_spin_lock(&rt_b->rt_runtime_lock);
204 for (;;) { 204 for (;;) {
205 unsigned long delta; 205 unsigned long delta;
206 ktime_t soft, hard; 206 ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, 217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
218 HRTIMER_MODE_ABS_PINNED, 0); 218 HRTIMER_MODE_ABS_PINNED, 0);
219 } 219 }
220 spin_unlock(&rt_b->rt_runtime_lock); 220 raw_spin_unlock(&rt_b->rt_runtime_lock);
221} 221}
222 222
223#ifdef CONFIG_RT_GROUP_SCHED 223#ifdef CONFIG_RT_GROUP_SCHED
@@ -470,7 +470,7 @@ struct rt_rq {
470 u64 rt_time; 470 u64 rt_time;
471 u64 rt_runtime; 471 u64 rt_runtime;
472 /* Nests inside the rq lock: */ 472 /* Nests inside the rq lock: */
473 spinlock_t rt_runtime_lock; 473 raw_spinlock_t rt_runtime_lock;
474 474
475#ifdef CONFIG_RT_GROUP_SCHED 475#ifdef CONFIG_RT_GROUP_SCHED
476 unsigned long rt_nr_boosted; 476 unsigned long rt_nr_boosted;
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain;
525 */ 525 */
526struct rq { 526struct rq {
527 /* runqueue lock: */ 527 /* runqueue lock: */
528 spinlock_t lock; 528 raw_spinlock_t lock;
529 529
530 /* 530 /*
531 * nr_running and cpu_load should be in the same cacheline because 531 * nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
685 */ 685 */
686int runqueue_is_locked(int cpu) 686int runqueue_is_locked(int cpu)
687{ 687{
688 return spin_is_locked(&cpu_rq(cpu)->lock); 688 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
689} 689}
690 690
691/* 691/*
@@ -893,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
893 */ 893 */
894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
895 895
896 spin_unlock_irq(&rq->lock); 896 raw_spin_unlock_irq(&rq->lock);
897} 897}
898 898
899#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 899#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -917,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
917 next->oncpu = 1; 917 next->oncpu = 1;
918#endif 918#endif
919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 919#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
920 spin_unlock_irq(&rq->lock); 920 raw_spin_unlock_irq(&rq->lock);
921#else 921#else
922 spin_unlock(&rq->lock); 922 raw_spin_unlock(&rq->lock);
923#endif 923#endif
924} 924}
925 925
@@ -949,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
949{ 949{
950 for (;;) { 950 for (;;) {
951 struct rq *rq = task_rq(p); 951 struct rq *rq = task_rq(p);
952 spin_lock(&rq->lock); 952 raw_spin_lock(&rq->lock);
953 if (likely(rq == task_rq(p))) 953 if (likely(rq == task_rq(p)))
954 return rq; 954 return rq;
955 spin_unlock(&rq->lock); 955 raw_spin_unlock(&rq->lock);
956 } 956 }
957} 957}
958 958
@@ -969,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
969 for (;;) { 969 for (;;) {
970 local_irq_save(*flags); 970 local_irq_save(*flags);
971 rq = task_rq(p); 971 rq = task_rq(p);
972 spin_lock(&rq->lock); 972 raw_spin_lock(&rq->lock);
973 if (likely(rq == task_rq(p))) 973 if (likely(rq == task_rq(p)))
974 return rq; 974 return rq;
975 spin_unlock_irqrestore(&rq->lock, *flags); 975 raw_spin_unlock_irqrestore(&rq->lock, *flags);
976 } 976 }
977} 977}
978 978
@@ -981,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
981 struct rq *rq = task_rq(p); 981 struct rq *rq = task_rq(p);
982 982
983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */ 983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
984 spin_unlock_wait(&rq->lock); 984 raw_spin_unlock_wait(&rq->lock);
985} 985}
986 986
987static void __task_rq_unlock(struct rq *rq) 987static void __task_rq_unlock(struct rq *rq)
988 __releases(rq->lock) 988 __releases(rq->lock)
989{ 989{
990 spin_unlock(&rq->lock); 990 raw_spin_unlock(&rq->lock);
991} 991}
992 992
993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 993static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
994 __releases(rq->lock) 994 __releases(rq->lock)
995{ 995{
996 spin_unlock_irqrestore(&rq->lock, *flags); 996 raw_spin_unlock_irqrestore(&rq->lock, *flags);
997} 997}
998 998
999/* 999/*
@@ -1006,7 +1006,7 @@ static struct rq *this_rq_lock(void)
1006 1006
1007 local_irq_disable(); 1007 local_irq_disable();
1008 rq = this_rq(); 1008 rq = this_rq();
1009 spin_lock(&rq->lock); 1009 raw_spin_lock(&rq->lock);
1010 1010
1011 return rq; 1011 return rq;
1012} 1012}
@@ -1053,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1053 1053
1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1055 1055
1056 spin_lock(&rq->lock); 1056 raw_spin_lock(&rq->lock);
1057 update_rq_clock(rq); 1057 update_rq_clock(rq);
1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1059 spin_unlock(&rq->lock); 1059 raw_spin_unlock(&rq->lock);
1060 1060
1061 return HRTIMER_NORESTART; 1061 return HRTIMER_NORESTART;
1062} 1062}
@@ -1069,10 +1069,10 @@ static void __hrtick_start(void *arg)
1069{ 1069{
1070 struct rq *rq = arg; 1070 struct rq *rq = arg;
1071 1071
1072 spin_lock(&rq->lock); 1072 raw_spin_lock(&rq->lock);
1073 hrtimer_restart(&rq->hrtick_timer); 1073 hrtimer_restart(&rq->hrtick_timer);
1074 rq->hrtick_csd_pending = 0; 1074 rq->hrtick_csd_pending = 0;
1075 spin_unlock(&rq->lock); 1075 raw_spin_unlock(&rq->lock);
1076} 1076}
1077 1077
1078/* 1078/*
@@ -1179,7 +1179,7 @@ static void resched_task(struct task_struct *p)
1179{ 1179{
1180 int cpu; 1180 int cpu;
1181 1181
1182 assert_spin_locked(&task_rq(p)->lock); 1182 assert_raw_spin_locked(&task_rq(p)->lock);
1183 1183
1184 if (test_tsk_need_resched(p)) 1184 if (test_tsk_need_resched(p))
1185 return; 1185 return;
@@ -1201,10 +1201,10 @@ static void resched_cpu(int cpu)
1201 struct rq *rq = cpu_rq(cpu); 1201 struct rq *rq = cpu_rq(cpu);
1202 unsigned long flags; 1202 unsigned long flags;
1203 1203
1204 if (!spin_trylock_irqsave(&rq->lock, flags)) 1204 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1205 return; 1205 return;
1206 resched_task(cpu_curr(cpu)); 1206 resched_task(cpu_curr(cpu));
1207 spin_unlock_irqrestore(&rq->lock, flags); 1207 raw_spin_unlock_irqrestore(&rq->lock, flags);
1208} 1208}
1209 1209
1210#ifdef CONFIG_NO_HZ 1210#ifdef CONFIG_NO_HZ
@@ -1273,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1273#else /* !CONFIG_SMP */ 1273#else /* !CONFIG_SMP */
1274static void resched_task(struct task_struct *p) 1274static void resched_task(struct task_struct *p)
1275{ 1275{
1276 assert_spin_locked(&task_rq(p)->lock); 1276 assert_raw_spin_locked(&task_rq(p)->lock);
1277 set_tsk_need_resched(p); 1277 set_tsk_need_resched(p);
1278} 1278}
1279 1279
@@ -1600,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1600 struct rq *rq = cpu_rq(cpu); 1600 struct rq *rq = cpu_rq(cpu);
1601 unsigned long flags; 1601 unsigned long flags;
1602 1602
1603 spin_lock_irqsave(&rq->lock, flags); 1603 raw_spin_lock_irqsave(&rq->lock, flags);
1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; 1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1606 __set_se_shares(tg->se[cpu], shares); 1606 __set_se_shares(tg->se[cpu], shares);
1607 spin_unlock_irqrestore(&rq->lock, flags); 1607 raw_spin_unlock_irqrestore(&rq->lock, flags);
1608 } 1608 }
1609} 1609}
1610 1610
@@ -1706,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1706 if (root_task_group_empty()) 1706 if (root_task_group_empty())
1707 return; 1707 return;
1708 1708
1709 spin_unlock(&rq->lock); 1709 raw_spin_unlock(&rq->lock);
1710 update_shares(sd); 1710 update_shares(sd);
1711 spin_lock(&rq->lock); 1711 raw_spin_lock(&rq->lock);
1712} 1712}
1713 1713
1714static void update_h_load(long cpu) 1714static void update_h_load(long cpu)
@@ -1748,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1748 __acquires(busiest->lock) 1748 __acquires(busiest->lock)
1749 __acquires(this_rq->lock) 1749 __acquires(this_rq->lock)
1750{ 1750{
1751 spin_unlock(&this_rq->lock); 1751 raw_spin_unlock(&this_rq->lock);
1752 double_rq_lock(this_rq, busiest); 1752 double_rq_lock(this_rq, busiest);
1753 1753
1754 return 1; 1754 return 1;
@@ -1769,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1769{ 1769{
1770 int ret = 0; 1770 int ret = 0;
1771 1771
1772 if (unlikely(!spin_trylock(&busiest->lock))) { 1772 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1773 if (busiest < this_rq) { 1773 if (busiest < this_rq) {
1774 spin_unlock(&this_rq->lock); 1774 raw_spin_unlock(&this_rq->lock);
1775 spin_lock(&busiest->lock); 1775 raw_spin_lock(&busiest->lock);
1776 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 1776 raw_spin_lock_nested(&this_rq->lock,
1777 SINGLE_DEPTH_NESTING);
1777 ret = 1; 1778 ret = 1;
1778 } else 1779 } else
1779 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 1780 raw_spin_lock_nested(&busiest->lock,
1781 SINGLE_DEPTH_NESTING);
1780 } 1782 }
1781 return ret; 1783 return ret;
1782} 1784}
@@ -1790,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1790{ 1792{
1791 if (unlikely(!irqs_disabled())) { 1793 if (unlikely(!irqs_disabled())) {
1792 /* printk() doesn't work good under rq->lock */ 1794 /* printk() doesn't work good under rq->lock */
1793 spin_unlock(&this_rq->lock); 1795 raw_spin_unlock(&this_rq->lock);
1794 BUG_ON(1); 1796 BUG_ON(1);
1795 } 1797 }
1796 1798
@@ -1800,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1800static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1802static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1801 __releases(busiest->lock) 1803 __releases(busiest->lock)
1802{ 1804{
1803 spin_unlock(&busiest->lock); 1805 raw_spin_unlock(&busiest->lock);
1804 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1806 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1805} 1807}
1806#endif 1808#endif
@@ -2023,13 +2025,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
2023 return; 2025 return;
2024 } 2026 }
2025 2027
2026 spin_lock_irqsave(&rq->lock, flags); 2028 raw_spin_lock_irqsave(&rq->lock, flags);
2027 update_rq_clock(rq); 2029 update_rq_clock(rq);
2028 set_task_cpu(p, cpu); 2030 set_task_cpu(p, cpu);
2029 p->cpus_allowed = cpumask_of_cpu(cpu); 2031 p->cpus_allowed = cpumask_of_cpu(cpu);
2030 p->rt.nr_cpus_allowed = 1; 2032 p->rt.nr_cpus_allowed = 1;
2031 p->flags |= PF_THREAD_BOUND; 2033 p->flags |= PF_THREAD_BOUND;
2032 spin_unlock_irqrestore(&rq->lock, flags); 2034 raw_spin_unlock_irqrestore(&rq->lock, flags);
2033} 2035}
2034EXPORT_SYMBOL(kthread_bind); 2036EXPORT_SYMBOL(kthread_bind);
2035 2037
@@ -2781,10 +2783,10 @@ static inline void post_schedule(struct rq *rq)
2781 if (rq->post_schedule) { 2783 if (rq->post_schedule) {
2782 unsigned long flags; 2784 unsigned long flags;
2783 2785
2784 spin_lock_irqsave(&rq->lock, flags); 2786 raw_spin_lock_irqsave(&rq->lock, flags);
2785 if (rq->curr->sched_class->post_schedule) 2787 if (rq->curr->sched_class->post_schedule)
2786 rq->curr->sched_class->post_schedule(rq); 2788 rq->curr->sched_class->post_schedule(rq);
2787 spin_unlock_irqrestore(&rq->lock, flags); 2789 raw_spin_unlock_irqrestore(&rq->lock, flags);
2788 2790
2789 rq->post_schedule = 0; 2791 rq->post_schedule = 0;
2790 } 2792 }
@@ -3066,15 +3068,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
3066{ 3068{
3067 BUG_ON(!irqs_disabled()); 3069 BUG_ON(!irqs_disabled());
3068 if (rq1 == rq2) { 3070 if (rq1 == rq2) {
3069 spin_lock(&rq1->lock); 3071 raw_spin_lock(&rq1->lock);
3070 __acquire(rq2->lock); /* Fake it out ;) */ 3072 __acquire(rq2->lock); /* Fake it out ;) */
3071 } else { 3073 } else {
3072 if (rq1 < rq2) { 3074 if (rq1 < rq2) {
3073 spin_lock(&rq1->lock); 3075 raw_spin_lock(&rq1->lock);
3074 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 3076 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
3075 } else { 3077 } else {
3076 spin_lock(&rq2->lock); 3078 raw_spin_lock(&rq2->lock);
3077 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 3079 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
3078 } 3080 }
3079 } 3081 }
3080 update_rq_clock(rq1); 3082 update_rq_clock(rq1);
@@ -3091,9 +3093,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
3091 __releases(rq1->lock) 3093 __releases(rq1->lock)
3092 __releases(rq2->lock) 3094 __releases(rq2->lock)
3093{ 3095{
3094 spin_unlock(&rq1->lock); 3096 raw_spin_unlock(&rq1->lock);
3095 if (rq1 != rq2) 3097 if (rq1 != rq2)
3096 spin_unlock(&rq2->lock); 3098 raw_spin_unlock(&rq2->lock);
3097 else 3099 else
3098 __release(rq2->lock); 3100 __release(rq2->lock);
3099} 3101}
@@ -4186,14 +4188,15 @@ redo:
4186 4188
4187 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 4189 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
4188 4190
4189 spin_lock_irqsave(&busiest->lock, flags); 4191 raw_spin_lock_irqsave(&busiest->lock, flags);
4190 4192
4191 /* don't kick the migration_thread, if the curr 4193 /* don't kick the migration_thread, if the curr
4192 * task on busiest cpu can't be moved to this_cpu 4194 * task on busiest cpu can't be moved to this_cpu
4193 */ 4195 */
4194 if (!cpumask_test_cpu(this_cpu, 4196 if (!cpumask_test_cpu(this_cpu,
4195 &busiest->curr->cpus_allowed)) { 4197 &busiest->curr->cpus_allowed)) {
4196 spin_unlock_irqrestore(&busiest->lock, flags); 4198 raw_spin_unlock_irqrestore(&busiest->lock,
4199 flags);
4197 all_pinned = 1; 4200 all_pinned = 1;
4198 goto out_one_pinned; 4201 goto out_one_pinned;
4199 } 4202 }
@@ -4203,7 +4206,7 @@ redo:
4203 busiest->push_cpu = this_cpu; 4206 busiest->push_cpu = this_cpu;
4204 active_balance = 1; 4207 active_balance = 1;
4205 } 4208 }
4206 spin_unlock_irqrestore(&busiest->lock, flags); 4209 raw_spin_unlock_irqrestore(&busiest->lock, flags);
4207 if (active_balance) 4210 if (active_balance)
4208 wake_up_process(busiest->migration_thread); 4211 wake_up_process(busiest->migration_thread);
4209 4212
@@ -4385,10 +4388,10 @@ redo:
4385 /* 4388 /*
4386 * Should not call ttwu while holding a rq->lock 4389 * Should not call ttwu while holding a rq->lock
4387 */ 4390 */
4388 spin_unlock(&this_rq->lock); 4391 raw_spin_unlock(&this_rq->lock);
4389 if (active_balance) 4392 if (active_balance)
4390 wake_up_process(busiest->migration_thread); 4393 wake_up_process(busiest->migration_thread);
4391 spin_lock(&this_rq->lock); 4394 raw_spin_lock(&this_rq->lock);
4392 4395
4393 } else 4396 } else
4394 sd->nr_balance_failed = 0; 4397 sd->nr_balance_failed = 0;
@@ -5257,11 +5260,11 @@ void scheduler_tick(void)
5257 5260
5258 sched_clock_tick(); 5261 sched_clock_tick();
5259 5262
5260 spin_lock(&rq->lock); 5263 raw_spin_lock(&rq->lock);
5261 update_rq_clock(rq); 5264 update_rq_clock(rq);
5262 update_cpu_load(rq); 5265 update_cpu_load(rq);
5263 curr->sched_class->task_tick(rq, curr, 0); 5266 curr->sched_class->task_tick(rq, curr, 0);
5264 spin_unlock(&rq->lock); 5267 raw_spin_unlock(&rq->lock);
5265 5268
5266 perf_event_task_tick(curr, cpu); 5269 perf_event_task_tick(curr, cpu);
5267 5270
@@ -5455,7 +5458,7 @@ need_resched_nonpreemptible:
5455 if (sched_feat(HRTICK)) 5458 if (sched_feat(HRTICK))
5456 hrtick_clear(rq); 5459 hrtick_clear(rq);
5457 5460
5458 spin_lock_irq(&rq->lock); 5461 raw_spin_lock_irq(&rq->lock);
5459 update_rq_clock(rq); 5462 update_rq_clock(rq);
5460 clear_tsk_need_resched(prev); 5463 clear_tsk_need_resched(prev);
5461 5464
@@ -5491,7 +5494,7 @@ need_resched_nonpreemptible:
5491 cpu = smp_processor_id(); 5494 cpu = smp_processor_id();
5492 rq = cpu_rq(cpu); 5495 rq = cpu_rq(cpu);
5493 } else 5496 } else
5494 spin_unlock_irq(&rq->lock); 5497 raw_spin_unlock_irq(&rq->lock);
5495 5498
5496 post_schedule(rq); 5499 post_schedule(rq);
5497 5500
@@ -6320,7 +6323,7 @@ recheck:
6320 * make sure no PI-waiters arrive (or leave) while we are 6323 * make sure no PI-waiters arrive (or leave) while we are
6321 * changing the priority of the task: 6324 * changing the priority of the task:
6322 */ 6325 */
6323 spin_lock_irqsave(&p->pi_lock, flags); 6326 raw_spin_lock_irqsave(&p->pi_lock, flags);
6324 /* 6327 /*
6325 * To be able to change p->policy safely, the apropriate 6328 * To be able to change p->policy safely, the apropriate
6326 * runqueue lock must be held. 6329 * runqueue lock must be held.
@@ -6330,7 +6333,7 @@ recheck:
6330 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 6333 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6331 policy = oldpolicy = -1; 6334 policy = oldpolicy = -1;
6332 __task_rq_unlock(rq); 6335 __task_rq_unlock(rq);
6333 spin_unlock_irqrestore(&p->pi_lock, flags); 6336 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6334 goto recheck; 6337 goto recheck;
6335 } 6338 }
6336 update_rq_clock(rq); 6339 update_rq_clock(rq);
@@ -6354,7 +6357,7 @@ recheck:
6354 check_class_changed(rq, p, prev_class, oldprio, running); 6357 check_class_changed(rq, p, prev_class, oldprio, running);
6355 } 6358 }
6356 __task_rq_unlock(rq); 6359 __task_rq_unlock(rq);
6357 spin_unlock_irqrestore(&p->pi_lock, flags); 6360 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6358 6361
6359 rt_mutex_adjust_pi(p); 6362 rt_mutex_adjust_pi(p);
6360 6363
@@ -6684,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield)
6684 */ 6687 */
6685 __release(rq->lock); 6688 __release(rq->lock);
6686 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 6689 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
6687 _raw_spin_unlock(&rq->lock); 6690 do_raw_spin_unlock(&rq->lock);
6688 preempt_enable_no_resched(); 6691 preempt_enable_no_resched();
6689 6692
6690 schedule(); 6693 schedule();
@@ -6980,7 +6983,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6980 struct rq *rq = cpu_rq(cpu); 6983 struct rq *rq = cpu_rq(cpu);
6981 unsigned long flags; 6984 unsigned long flags;
6982 6985
6983 spin_lock_irqsave(&rq->lock, flags); 6986 raw_spin_lock_irqsave(&rq->lock, flags);
6984 6987
6985 __sched_fork(idle); 6988 __sched_fork(idle);
6986 idle->se.exec_start = sched_clock(); 6989 idle->se.exec_start = sched_clock();
@@ -6992,7 +6995,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6992#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 6995#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
6993 idle->oncpu = 1; 6996 idle->oncpu = 1;
6994#endif 6997#endif
6995 spin_unlock_irqrestore(&rq->lock, flags); 6998 raw_spin_unlock_irqrestore(&rq->lock, flags);
6996 6999
6997 /* Set the preempt count _outside_ the spinlocks! */ 7000 /* Set the preempt count _outside_ the spinlocks! */
6998#if defined(CONFIG_PREEMPT) 7001#if defined(CONFIG_PREEMPT)
@@ -7209,10 +7212,10 @@ static int migration_thread(void *data)
7209 struct migration_req *req; 7212 struct migration_req *req;
7210 struct list_head *head; 7213 struct list_head *head;
7211 7214
7212 spin_lock_irq(&rq->lock); 7215 raw_spin_lock_irq(&rq->lock);
7213 7216
7214 if (cpu_is_offline(cpu)) { 7217 if (cpu_is_offline(cpu)) {
7215 spin_unlock_irq(&rq->lock); 7218 raw_spin_unlock_irq(&rq->lock);
7216 break; 7219 break;
7217 } 7220 }
7218 7221
@@ -7224,7 +7227,7 @@ static int migration_thread(void *data)
7224 head = &rq->migration_queue; 7227 head = &rq->migration_queue;
7225 7228
7226 if (list_empty(head)) { 7229 if (list_empty(head)) {
7227 spin_unlock_irq(&rq->lock); 7230 raw_spin_unlock_irq(&rq->lock);
7228 schedule(); 7231 schedule();
7229 set_current_state(TASK_INTERRUPTIBLE); 7232 set_current_state(TASK_INTERRUPTIBLE);
7230 continue; 7233 continue;
@@ -7233,14 +7236,14 @@ static int migration_thread(void *data)
7233 list_del_init(head->next); 7236 list_del_init(head->next);
7234 7237
7235 if (req->task != NULL) { 7238 if (req->task != NULL) {
7236 spin_unlock(&rq->lock); 7239 raw_spin_unlock(&rq->lock);
7237 __migrate_task(req->task, cpu, req->dest_cpu); 7240 __migrate_task(req->task, cpu, req->dest_cpu);
7238 } else if (likely(cpu == (badcpu = smp_processor_id()))) { 7241 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7239 req->dest_cpu = RCU_MIGRATION_GOT_QS; 7242 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7240 spin_unlock(&rq->lock); 7243 raw_spin_unlock(&rq->lock);
7241 } else { 7244 } else {
7242 req->dest_cpu = RCU_MIGRATION_MUST_SYNC; 7245 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7243 spin_unlock(&rq->lock); 7246 raw_spin_unlock(&rq->lock);
7244 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); 7247 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7245 } 7248 }
7246 local_irq_enable(); 7249 local_irq_enable();
@@ -7363,14 +7366,14 @@ void sched_idle_next(void)
7363 * Strictly not necessary since rest of the CPUs are stopped by now 7366 * Strictly not necessary since rest of the CPUs are stopped by now
7364 * and interrupts disabled on the current cpu. 7367 * and interrupts disabled on the current cpu.
7365 */ 7368 */
7366 spin_lock_irqsave(&rq->lock, flags); 7369 raw_spin_lock_irqsave(&rq->lock, flags);
7367 7370
7368 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 7371 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
7369 7372
7370 update_rq_clock(rq); 7373 update_rq_clock(rq);
7371 activate_task(rq, p, 0); 7374 activate_task(rq, p, 0);
7372 7375
7373 spin_unlock_irqrestore(&rq->lock, flags); 7376 raw_spin_unlock_irqrestore(&rq->lock, flags);
7374} 7377}
7375 7378
7376/* 7379/*
@@ -7406,9 +7409,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
7406 * that's OK. No task can be added to this CPU, so iteration is 7409 * that's OK. No task can be added to this CPU, so iteration is
7407 * fine. 7410 * fine.
7408 */ 7411 */
7409 spin_unlock_irq(&rq->lock); 7412 raw_spin_unlock_irq(&rq->lock);
7410 move_task_off_dead_cpu(dead_cpu, p); 7413 move_task_off_dead_cpu(dead_cpu, p);
7411 spin_lock_irq(&rq->lock); 7414 raw_spin_lock_irq(&rq->lock);
7412 7415
7413 put_task_struct(p); 7416 put_task_struct(p);
7414} 7417}
@@ -7674,13 +7677,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7674 7677
7675 /* Update our root-domain */ 7678 /* Update our root-domain */
7676 rq = cpu_rq(cpu); 7679 rq = cpu_rq(cpu);
7677 spin_lock_irqsave(&rq->lock, flags); 7680 raw_spin_lock_irqsave(&rq->lock, flags);
7678 if (rq->rd) { 7681 if (rq->rd) {
7679 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7682 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7680 7683
7681 set_rq_online(rq); 7684 set_rq_online(rq);
7682 } 7685 }
7683 spin_unlock_irqrestore(&rq->lock, flags); 7686 raw_spin_unlock_irqrestore(&rq->lock, flags);
7684 break; 7687 break;
7685 7688
7686#ifdef CONFIG_HOTPLUG_CPU 7689#ifdef CONFIG_HOTPLUG_CPU
@@ -7705,13 +7708,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7705 put_task_struct(rq->migration_thread); 7708 put_task_struct(rq->migration_thread);
7706 rq->migration_thread = NULL; 7709 rq->migration_thread = NULL;
7707 /* Idle task back to normal (off runqueue, low prio) */ 7710 /* Idle task back to normal (off runqueue, low prio) */
7708 spin_lock_irq(&rq->lock); 7711 raw_spin_lock_irq(&rq->lock);
7709 update_rq_clock(rq); 7712 update_rq_clock(rq);
7710 deactivate_task(rq, rq->idle, 0); 7713 deactivate_task(rq, rq->idle, 0);
7711 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 7714 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7712 rq->idle->sched_class = &idle_sched_class; 7715 rq->idle->sched_class = &idle_sched_class;
7713 migrate_dead_tasks(cpu); 7716 migrate_dead_tasks(cpu);
7714 spin_unlock_irq(&rq->lock); 7717 raw_spin_unlock_irq(&rq->lock);
7715 cpuset_unlock(); 7718 cpuset_unlock();
7716 migrate_nr_uninterruptible(rq); 7719 migrate_nr_uninterruptible(rq);
7717 BUG_ON(rq->nr_running != 0); 7720 BUG_ON(rq->nr_running != 0);
@@ -7721,30 +7724,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7721 * they didn't take sched_hotcpu_mutex. Just wake up 7724 * they didn't take sched_hotcpu_mutex. Just wake up
7722 * the requestors. 7725 * the requestors.
7723 */ 7726 */
7724 spin_lock_irq(&rq->lock); 7727 raw_spin_lock_irq(&rq->lock);
7725 while (!list_empty(&rq->migration_queue)) { 7728 while (!list_empty(&rq->migration_queue)) {
7726 struct migration_req *req; 7729 struct migration_req *req;
7727 7730
7728 req = list_entry(rq->migration_queue.next, 7731 req = list_entry(rq->migration_queue.next,
7729 struct migration_req, list); 7732 struct migration_req, list);
7730 list_del_init(&req->list); 7733 list_del_init(&req->list);
7731 spin_unlock_irq(&rq->lock); 7734 raw_spin_unlock_irq(&rq->lock);
7732 complete(&req->done); 7735 complete(&req->done);
7733 spin_lock_irq(&rq->lock); 7736 raw_spin_lock_irq(&rq->lock);
7734 } 7737 }
7735 spin_unlock_irq(&rq->lock); 7738 raw_spin_unlock_irq(&rq->lock);
7736 break; 7739 break;
7737 7740
7738 case CPU_DYING: 7741 case CPU_DYING:
7739 case CPU_DYING_FROZEN: 7742 case CPU_DYING_FROZEN:
7740 /* Update our root-domain */ 7743 /* Update our root-domain */
7741 rq = cpu_rq(cpu); 7744 rq = cpu_rq(cpu);
7742 spin_lock_irqsave(&rq->lock, flags); 7745 raw_spin_lock_irqsave(&rq->lock, flags);
7743 if (rq->rd) { 7746 if (rq->rd) {
7744 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7747 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7745 set_rq_offline(rq); 7748 set_rq_offline(rq);
7746 } 7749 }
7747 spin_unlock_irqrestore(&rq->lock, flags); 7750 raw_spin_unlock_irqrestore(&rq->lock, flags);
7748 break; 7751 break;
7749#endif 7752#endif
7750 } 7753 }
@@ -7974,7 +7977,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7974 struct root_domain *old_rd = NULL; 7977 struct root_domain *old_rd = NULL;
7975 unsigned long flags; 7978 unsigned long flags;
7976 7979
7977 spin_lock_irqsave(&rq->lock, flags); 7980 raw_spin_lock_irqsave(&rq->lock, flags);
7978 7981
7979 if (rq->rd) { 7982 if (rq->rd) {
7980 old_rd = rq->rd; 7983 old_rd = rq->rd;
@@ -8000,7 +8003,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
8000 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 8003 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
8001 set_rq_online(rq); 8004 set_rq_online(rq);
8002 8005
8003 spin_unlock_irqrestore(&rq->lock, flags); 8006 raw_spin_unlock_irqrestore(&rq->lock, flags);
8004 8007
8005 if (old_rd) 8008 if (old_rd)
8006 free_rootdomain(old_rd); 8009 free_rootdomain(old_rd);
@@ -9357,13 +9360,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
9357#ifdef CONFIG_SMP 9360#ifdef CONFIG_SMP
9358 rt_rq->rt_nr_migratory = 0; 9361 rt_rq->rt_nr_migratory = 0;
9359 rt_rq->overloaded = 0; 9362 rt_rq->overloaded = 0;
9360 plist_head_init(&rt_rq->pushable_tasks, &rq->lock); 9363 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
9361#endif 9364#endif
9362 9365
9363 rt_rq->rt_time = 0; 9366 rt_rq->rt_time = 0;
9364 rt_rq->rt_throttled = 0; 9367 rt_rq->rt_throttled = 0;
9365 rt_rq->rt_runtime = 0; 9368 rt_rq->rt_runtime = 0;
9366 spin_lock_init(&rt_rq->rt_runtime_lock); 9369 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
9367 9370
9368#ifdef CONFIG_RT_GROUP_SCHED 9371#ifdef CONFIG_RT_GROUP_SCHED
9369 rt_rq->rt_nr_boosted = 0; 9372 rt_rq->rt_nr_boosted = 0;
@@ -9523,7 +9526,7 @@ void __init sched_init(void)
9523 struct rq *rq; 9526 struct rq *rq;
9524 9527
9525 rq = cpu_rq(i); 9528 rq = cpu_rq(i);
9526 spin_lock_init(&rq->lock); 9529 raw_spin_lock_init(&rq->lock);
9527 rq->nr_running = 0; 9530 rq->nr_running = 0;
9528 rq->calc_load_active = 0; 9531 rq->calc_load_active = 0;
9529 rq->calc_load_update = jiffies + LOAD_FREQ; 9532 rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9621,7 +9624,7 @@ void __init sched_init(void)
9621#endif 9624#endif
9622 9625
9623#ifdef CONFIG_RT_MUTEXES 9626#ifdef CONFIG_RT_MUTEXES
9624 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); 9627 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
9625#endif 9628#endif
9626 9629
9627 /* 9630 /*
@@ -9746,13 +9749,13 @@ void normalize_rt_tasks(void)
9746 continue; 9749 continue;
9747 } 9750 }
9748 9751
9749 spin_lock(&p->pi_lock); 9752 raw_spin_lock(&p->pi_lock);
9750 rq = __task_rq_lock(p); 9753 rq = __task_rq_lock(p);
9751 9754
9752 normalize_task(rq, p); 9755 normalize_task(rq, p);
9753 9756
9754 __task_rq_unlock(rq); 9757 __task_rq_unlock(rq);
9755 spin_unlock(&p->pi_lock); 9758 raw_spin_unlock(&p->pi_lock);
9756 } while_each_thread(g, p); 9759 } while_each_thread(g, p);
9757 9760
9758 read_unlock_irqrestore(&tasklist_lock, flags); 9761 read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10115,9 +10118,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
10115 struct rq *rq = cfs_rq->rq; 10118 struct rq *rq = cfs_rq->rq;
10116 unsigned long flags; 10119 unsigned long flags;
10117 10120
10118 spin_lock_irqsave(&rq->lock, flags); 10121 raw_spin_lock_irqsave(&rq->lock, flags);
10119 __set_se_shares(se, shares); 10122 __set_se_shares(se, shares);
10120 spin_unlock_irqrestore(&rq->lock, flags); 10123 raw_spin_unlock_irqrestore(&rq->lock, flags);
10121} 10124}
10122 10125
10123static DEFINE_MUTEX(shares_mutex); 10126static DEFINE_MUTEX(shares_mutex);
@@ -10302,18 +10305,18 @@ static int tg_set_bandwidth(struct task_group *tg,
10302 if (err) 10305 if (err)
10303 goto unlock; 10306 goto unlock;
10304 10307
10305 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10308 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10306 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 10309 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
10307 tg->rt_bandwidth.rt_runtime = rt_runtime; 10310 tg->rt_bandwidth.rt_runtime = rt_runtime;
10308 10311
10309 for_each_possible_cpu(i) { 10312 for_each_possible_cpu(i) {
10310 struct rt_rq *rt_rq = tg->rt_rq[i]; 10313 struct rt_rq *rt_rq = tg->rt_rq[i];
10311 10314
10312 spin_lock(&rt_rq->rt_runtime_lock); 10315 raw_spin_lock(&rt_rq->rt_runtime_lock);
10313 rt_rq->rt_runtime = rt_runtime; 10316 rt_rq->rt_runtime = rt_runtime;
10314 spin_unlock(&rt_rq->rt_runtime_lock); 10317 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10315 } 10318 }
10316 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10319 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10317 unlock: 10320 unlock:
10318 read_unlock(&tasklist_lock); 10321 read_unlock(&tasklist_lock);
10319 mutex_unlock(&rt_constraints_mutex); 10322 mutex_unlock(&rt_constraints_mutex);
@@ -10418,15 +10421,15 @@ static int sched_rt_global_constraints(void)
10418 if (sysctl_sched_rt_runtime == 0) 10421 if (sysctl_sched_rt_runtime == 0)
10419 return -EBUSY; 10422 return -EBUSY;
10420 10423
10421 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 10424 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
10422 for_each_possible_cpu(i) { 10425 for_each_possible_cpu(i) {
10423 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 10426 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
10424 10427
10425 spin_lock(&rt_rq->rt_runtime_lock); 10428 raw_spin_lock(&rt_rq->rt_runtime_lock);
10426 rt_rq->rt_runtime = global_rt_runtime(); 10429 rt_rq->rt_runtime = global_rt_runtime();
10427 spin_unlock(&rt_rq->rt_runtime_lock); 10430 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10428 } 10431 }
10429 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 10432 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
10430 10433
10431 return 0; 10434 return 0;
10432} 10435}
@@ -10717,9 +10720,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
10717 /* 10720 /*
10718 * Take rq->lock to make 64-bit read safe on 32-bit platforms. 10721 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
10719 */ 10722 */
10720 spin_lock_irq(&cpu_rq(cpu)->lock); 10723 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10721 data = *cpuusage; 10724 data = *cpuusage;
10722 spin_unlock_irq(&cpu_rq(cpu)->lock); 10725 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10723#else 10726#else
10724 data = *cpuusage; 10727 data = *cpuusage;
10725#endif 10728#endif
@@ -10735,9 +10738,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
10735 /* 10738 /*
10736 * Take rq->lock to make 64-bit write safe on 32-bit platforms. 10739 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
10737 */ 10740 */
10738 spin_lock_irq(&cpu_rq(cpu)->lock); 10741 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10739 *cpuusage = val; 10742 *cpuusage = val;
10740 spin_unlock_irq(&cpu_rq(cpu)->lock); 10743 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10741#else 10744#else
10742 *cpuusage = val; 10745 *cpuusage = val;
10743#endif 10746#endif
@@ -10971,9 +10974,9 @@ void synchronize_sched_expedited(void)
10971 init_completion(&req->done); 10974 init_completion(&req->done);
10972 req->task = NULL; 10975 req->task = NULL;
10973 req->dest_cpu = RCU_MIGRATION_NEED_QS; 10976 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10974 spin_lock_irqsave(&rq->lock, flags); 10977 raw_spin_lock_irqsave(&rq->lock, flags);
10975 list_add(&req->list, &rq->migration_queue); 10978 list_add(&req->list, &rq->migration_queue);
10976 spin_unlock_irqrestore(&rq->lock, flags); 10979 raw_spin_unlock_irqrestore(&rq->lock, flags);
10977 wake_up_process(rq->migration_thread); 10980 wake_up_process(rq->migration_thread);
10978 } 10981 }
10979 for_each_online_cpu(cpu) { 10982 for_each_online_cpu(cpu) {
@@ -10981,11 +10984,11 @@ void synchronize_sched_expedited(void)
10981 req = &per_cpu(rcu_migration_req, cpu); 10984 req = &per_cpu(rcu_migration_req, cpu);
10982 rq = cpu_rq(cpu); 10985 rq = cpu_rq(cpu);
10983 wait_for_completion(&req->done); 10986 wait_for_completion(&req->done);
10984 spin_lock_irqsave(&rq->lock, flags); 10987 raw_spin_lock_irqsave(&rq->lock, flags);
10985 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) 10988 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
10986 need_full_sync = 1; 10989 need_full_sync = 1;
10987 req->dest_cpu = RCU_MIGRATION_IDLE; 10990 req->dest_cpu = RCU_MIGRATION_IDLE;
10988 spin_unlock_irqrestore(&rq->lock, flags); 10991 raw_spin_unlock_irqrestore(&rq->lock, flags);
10989 } 10992 }
10990 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10993 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10991 synchronize_sched_expedited_count++; 10994 synchronize_sched_expedited_count++;
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 0f052fc674d5..597b33099dfa 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -135,26 +135,26 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
135 if (likely(newpri != CPUPRI_INVALID)) { 135 if (likely(newpri != CPUPRI_INVALID)) {
136 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; 136 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
137 137
138 spin_lock_irqsave(&vec->lock, flags); 138 raw_spin_lock_irqsave(&vec->lock, flags);
139 139
140 cpumask_set_cpu(cpu, vec->mask); 140 cpumask_set_cpu(cpu, vec->mask);
141 vec->count++; 141 vec->count++;
142 if (vec->count == 1) 142 if (vec->count == 1)
143 set_bit(newpri, cp->pri_active); 143 set_bit(newpri, cp->pri_active);
144 144
145 spin_unlock_irqrestore(&vec->lock, flags); 145 raw_spin_unlock_irqrestore(&vec->lock, flags);
146 } 146 }
147 if (likely(oldpri != CPUPRI_INVALID)) { 147 if (likely(oldpri != CPUPRI_INVALID)) {
148 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; 148 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
149 149
150 spin_lock_irqsave(&vec->lock, flags); 150 raw_spin_lock_irqsave(&vec->lock, flags);
151 151
152 vec->count--; 152 vec->count--;
153 if (!vec->count) 153 if (!vec->count)
154 clear_bit(oldpri, cp->pri_active); 154 clear_bit(oldpri, cp->pri_active);
155 cpumask_clear_cpu(cpu, vec->mask); 155 cpumask_clear_cpu(cpu, vec->mask);
156 156
157 spin_unlock_irqrestore(&vec->lock, flags); 157 raw_spin_unlock_irqrestore(&vec->lock, flags);
158 } 158 }
159 159
160 *currpri = newpri; 160 *currpri = newpri;
@@ -180,7 +180,7 @@ int cpupri_init(struct cpupri *cp, bool bootmem)
180 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { 180 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
181 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; 181 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
182 182
183 spin_lock_init(&vec->lock); 183 raw_spin_lock_init(&vec->lock);
184 vec->count = 0; 184 vec->count = 0;
185 if (!zalloc_cpumask_var(&vec->mask, gfp)) 185 if (!zalloc_cpumask_var(&vec->mask, gfp))
186 goto cleanup; 186 goto cleanup;
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 9a7e859b8fbf..7cb5bb6b95be 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -12,7 +12,7 @@
12/* values 2-101 are RT priorities 0-99 */ 12/* values 2-101 are RT priorities 0-99 */
13 13
14struct cpupri_vec { 14struct cpupri_vec {
15 spinlock_t lock; 15 raw_spinlock_t lock;
16 int count; 16 int count;
17 cpumask_var_t mask; 17 cpumask_var_t mask;
18}; 18};
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 5ae24fc65d75..67f95aada4b9 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
185 SPLIT_NS(cfs_rq->exec_clock)); 185 SPLIT_NS(cfs_rq->exec_clock));
186 186
187 spin_lock_irqsave(&rq->lock, flags); 187 raw_spin_lock_irqsave(&rq->lock, flags);
188 if (cfs_rq->rb_leftmost) 188 if (cfs_rq->rb_leftmost)
189 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; 189 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
190 last = __pick_last_entity(cfs_rq); 190 last = __pick_last_entity(cfs_rq);
@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
192 max_vruntime = last->vruntime; 192 max_vruntime = last->vruntime;
193 min_vruntime = cfs_rq->min_vruntime; 193 min_vruntime = cfs_rq->min_vruntime;
194 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 194 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
195 spin_unlock_irqrestore(&rq->lock, flags); 195 raw_spin_unlock_irqrestore(&rq->lock, flags);
196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
197 SPLIT_NS(MIN_vruntime)); 197 SPLIT_NS(MIN_vruntime));
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 804a411838f1..5bedf6e3ebf3 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1955,7 +1955,7 @@ static void task_fork_fair(struct task_struct *p)
1955 struct rq *rq = this_rq(); 1955 struct rq *rq = this_rq();
1956 unsigned long flags; 1956 unsigned long flags;
1957 1957
1958 spin_lock_irqsave(&rq->lock, flags); 1958 raw_spin_lock_irqsave(&rq->lock, flags);
1959 1959
1960 if (unlikely(task_cpu(p) != this_cpu)) 1960 if (unlikely(task_cpu(p) != this_cpu))
1961 __set_task_cpu(p, this_cpu); 1961 __set_task_cpu(p, this_cpu);
@@ -1975,7 +1975,7 @@ static void task_fork_fair(struct task_struct *p)
1975 resched_task(rq->curr); 1975 resched_task(rq->curr);
1976 } 1976 }
1977 1977
1978 spin_unlock_irqrestore(&rq->lock, flags); 1978 raw_spin_unlock_irqrestore(&rq->lock, flags);
1979} 1979}
1980 1980
1981/* 1981/*
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 33d5384a73a8..5f93b570d383 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
34static void 34static void
35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
36{ 36{
37 spin_unlock_irq(&rq->lock); 37 raw_spin_unlock_irq(&rq->lock);
38 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 38 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
39 dump_stack(); 39 dump_stack();
40 spin_lock_irq(&rq->lock); 40 raw_spin_lock_irq(&rq->lock);
41} 41}
42 42
43static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) 43static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index aecbd9c6b20c..d2ea2828164e 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
327 327
328 weight = cpumask_weight(rd->span); 328 weight = cpumask_weight(rd->span);
329 329
330 spin_lock(&rt_b->rt_runtime_lock); 330 raw_spin_lock(&rt_b->rt_runtime_lock);
331 rt_period = ktime_to_ns(rt_b->rt_period); 331 rt_period = ktime_to_ns(rt_b->rt_period);
332 for_each_cpu(i, rd->span) { 332 for_each_cpu(i, rd->span) {
333 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 333 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
@@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
336 if (iter == rt_rq) 336 if (iter == rt_rq)
337 continue; 337 continue;
338 338
339 spin_lock(&iter->rt_runtime_lock); 339 raw_spin_lock(&iter->rt_runtime_lock);
340 /* 340 /*
341 * Either all rqs have inf runtime and there's nothing to steal 341 * Either all rqs have inf runtime and there's nothing to steal
342 * or __disable_runtime() below sets a specific rq to inf to 342 * or __disable_runtime() below sets a specific rq to inf to
@@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
358 rt_rq->rt_runtime += diff; 358 rt_rq->rt_runtime += diff;
359 more = 1; 359 more = 1;
360 if (rt_rq->rt_runtime == rt_period) { 360 if (rt_rq->rt_runtime == rt_period) {
361 spin_unlock(&iter->rt_runtime_lock); 361 raw_spin_unlock(&iter->rt_runtime_lock);
362 break; 362 break;
363 } 363 }
364 } 364 }
365next: 365next:
366 spin_unlock(&iter->rt_runtime_lock); 366 raw_spin_unlock(&iter->rt_runtime_lock);
367 } 367 }
368 spin_unlock(&rt_b->rt_runtime_lock); 368 raw_spin_unlock(&rt_b->rt_runtime_lock);
369 369
370 return more; 370 return more;
371} 371}
@@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq)
386 s64 want; 386 s64 want;
387 int i; 387 int i;
388 388
389 spin_lock(&rt_b->rt_runtime_lock); 389 raw_spin_lock(&rt_b->rt_runtime_lock);
390 spin_lock(&rt_rq->rt_runtime_lock); 390 raw_spin_lock(&rt_rq->rt_runtime_lock);
391 /* 391 /*
392 * Either we're all inf and nobody needs to borrow, or we're 392 * Either we're all inf and nobody needs to borrow, or we're
393 * already disabled and thus have nothing to do, or we have 393 * already disabled and thus have nothing to do, or we have
@@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq)
396 if (rt_rq->rt_runtime == RUNTIME_INF || 396 if (rt_rq->rt_runtime == RUNTIME_INF ||
397 rt_rq->rt_runtime == rt_b->rt_runtime) 397 rt_rq->rt_runtime == rt_b->rt_runtime)
398 goto balanced; 398 goto balanced;
399 spin_unlock(&rt_rq->rt_runtime_lock); 399 raw_spin_unlock(&rt_rq->rt_runtime_lock);
400 400
401 /* 401 /*
402 * Calculate the difference between what we started out with 402 * Calculate the difference between what we started out with
@@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq)
418 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 418 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
419 continue; 419 continue;
420 420
421 spin_lock(&iter->rt_runtime_lock); 421 raw_spin_lock(&iter->rt_runtime_lock);
422 if (want > 0) { 422 if (want > 0) {
423 diff = min_t(s64, iter->rt_runtime, want); 423 diff = min_t(s64, iter->rt_runtime, want);
424 iter->rt_runtime -= diff; 424 iter->rt_runtime -= diff;
@@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq)
427 iter->rt_runtime -= want; 427 iter->rt_runtime -= want;
428 want -= want; 428 want -= want;
429 } 429 }
430 spin_unlock(&iter->rt_runtime_lock); 430 raw_spin_unlock(&iter->rt_runtime_lock);
431 431
432 if (!want) 432 if (!want)
433 break; 433 break;
434 } 434 }
435 435
436 spin_lock(&rt_rq->rt_runtime_lock); 436 raw_spin_lock(&rt_rq->rt_runtime_lock);
437 /* 437 /*
438 * We cannot be left wanting - that would mean some runtime 438 * We cannot be left wanting - that would mean some runtime
439 * leaked out of the system. 439 * leaked out of the system.
@@ -445,8 +445,8 @@ balanced:
445 * runtime - in which case borrowing doesn't make sense. 445 * runtime - in which case borrowing doesn't make sense.
446 */ 446 */
447 rt_rq->rt_runtime = RUNTIME_INF; 447 rt_rq->rt_runtime = RUNTIME_INF;
448 spin_unlock(&rt_rq->rt_runtime_lock); 448 raw_spin_unlock(&rt_rq->rt_runtime_lock);
449 spin_unlock(&rt_b->rt_runtime_lock); 449 raw_spin_unlock(&rt_b->rt_runtime_lock);
450 } 450 }
451} 451}
452 452
@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq)
454{ 454{
455 unsigned long flags; 455 unsigned long flags;
456 456
457 spin_lock_irqsave(&rq->lock, flags); 457 raw_spin_lock_irqsave(&rq->lock, flags);
458 __disable_runtime(rq); 458 __disable_runtime(rq);
459 spin_unlock_irqrestore(&rq->lock, flags); 459 raw_spin_unlock_irqrestore(&rq->lock, flags);
460} 460}
461 461
462static void __enable_runtime(struct rq *rq) 462static void __enable_runtime(struct rq *rq)
@@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq)
472 for_each_leaf_rt_rq(rt_rq, rq) { 472 for_each_leaf_rt_rq(rt_rq, rq) {
473 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 473 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
474 474
475 spin_lock(&rt_b->rt_runtime_lock); 475 raw_spin_lock(&rt_b->rt_runtime_lock);
476 spin_lock(&rt_rq->rt_runtime_lock); 476 raw_spin_lock(&rt_rq->rt_runtime_lock);
477 rt_rq->rt_runtime = rt_b->rt_runtime; 477 rt_rq->rt_runtime = rt_b->rt_runtime;
478 rt_rq->rt_time = 0; 478 rt_rq->rt_time = 0;
479 rt_rq->rt_throttled = 0; 479 rt_rq->rt_throttled = 0;
480 spin_unlock(&rt_rq->rt_runtime_lock); 480 raw_spin_unlock(&rt_rq->rt_runtime_lock);
481 spin_unlock(&rt_b->rt_runtime_lock); 481 raw_spin_unlock(&rt_b->rt_runtime_lock);
482 } 482 }
483} 483}
484 484
@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq)
486{ 486{
487 unsigned long flags; 487 unsigned long flags;
488 488
489 spin_lock_irqsave(&rq->lock, flags); 489 raw_spin_lock_irqsave(&rq->lock, flags);
490 __enable_runtime(rq); 490 __enable_runtime(rq);
491 spin_unlock_irqrestore(&rq->lock, flags); 491 raw_spin_unlock_irqrestore(&rq->lock, flags);
492} 492}
493 493
494static int balance_runtime(struct rt_rq *rt_rq) 494static int balance_runtime(struct rt_rq *rt_rq)
@@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
496 int more = 0; 496 int more = 0;
497 497
498 if (rt_rq->rt_time > rt_rq->rt_runtime) { 498 if (rt_rq->rt_time > rt_rq->rt_runtime) {
499 spin_unlock(&rt_rq->rt_runtime_lock); 499 raw_spin_unlock(&rt_rq->rt_runtime_lock);
500 more = do_balance_runtime(rt_rq); 500 more = do_balance_runtime(rt_rq);
501 spin_lock(&rt_rq->rt_runtime_lock); 501 raw_spin_lock(&rt_rq->rt_runtime_lock);
502 } 502 }
503 503
504 return more; 504 return more;
@@ -524,11 +524,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
524 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 524 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
525 struct rq *rq = rq_of_rt_rq(rt_rq); 525 struct rq *rq = rq_of_rt_rq(rt_rq);
526 526
527 spin_lock(&rq->lock); 527 raw_spin_lock(&rq->lock);
528 if (rt_rq->rt_time) { 528 if (rt_rq->rt_time) {
529 u64 runtime; 529 u64 runtime;
530 530
531 spin_lock(&rt_rq->rt_runtime_lock); 531 raw_spin_lock(&rt_rq->rt_runtime_lock);
532 if (rt_rq->rt_throttled) 532 if (rt_rq->rt_throttled)
533 balance_runtime(rt_rq); 533 balance_runtime(rt_rq);
534 runtime = rt_rq->rt_runtime; 534 runtime = rt_rq->rt_runtime;
@@ -539,13 +539,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
539 } 539 }
540 if (rt_rq->rt_time || rt_rq->rt_nr_running) 540 if (rt_rq->rt_time || rt_rq->rt_nr_running)
541 idle = 0; 541 idle = 0;
542 spin_unlock(&rt_rq->rt_runtime_lock); 542 raw_spin_unlock(&rt_rq->rt_runtime_lock);
543 } else if (rt_rq->rt_nr_running) 543 } else if (rt_rq->rt_nr_running)
544 idle = 0; 544 idle = 0;
545 545
546 if (enqueue) 546 if (enqueue)
547 sched_rt_rq_enqueue(rt_rq); 547 sched_rt_rq_enqueue(rt_rq);
548 spin_unlock(&rq->lock); 548 raw_spin_unlock(&rq->lock);
549 } 549 }
550 550
551 return idle; 551 return idle;
@@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq)
624 rt_rq = rt_rq_of_se(rt_se); 624 rt_rq = rt_rq_of_se(rt_se);
625 625
626 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 626 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
627 spin_lock(&rt_rq->rt_runtime_lock); 627 raw_spin_lock(&rt_rq->rt_runtime_lock);
628 rt_rq->rt_time += delta_exec; 628 rt_rq->rt_time += delta_exec;
629 if (sched_rt_runtime_exceeded(rt_rq)) 629 if (sched_rt_runtime_exceeded(rt_rq))
630 resched_task(curr); 630 resched_task(curr);
631 spin_unlock(&rt_rq->rt_runtime_lock); 631 raw_spin_unlock(&rt_rq->rt_runtime_lock);
632 } 632 }
633 } 633 }
634} 634}
@@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1246 task_running(rq, task) || 1246 task_running(rq, task) ||
1247 !task->se.on_rq)) { 1247 !task->se.on_rq)) {
1248 1248
1249 spin_unlock(&lowest_rq->lock); 1249 raw_spin_unlock(&lowest_rq->lock);
1250 lowest_rq = NULL; 1250 lowest_rq = NULL;
1251 break; 1251 break;
1252 } 1252 }
diff --git a/kernel/smp.c b/kernel/smp.c
index 00a1d0ede532..de735a6637d0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
16 16
17static struct { 17static struct {
18 struct list_head queue; 18 struct list_head queue;
19 spinlock_t lock; 19 raw_spinlock_t lock;
20} call_function __cacheline_aligned_in_smp = 20} call_function __cacheline_aligned_in_smp =
21 { 21 {
22 .queue = LIST_HEAD_INIT(call_function.queue), 22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), 23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
24 }; 24 };
25 25
26enum { 26enum {
@@ -35,7 +35,7 @@ struct call_function_data {
35 35
36struct call_single_queue { 36struct call_single_queue {
37 struct list_head list; 37 struct list_head list;
38 spinlock_t lock; 38 raw_spinlock_t lock;
39}; 39};
40 40
41static DEFINE_PER_CPU(struct call_function_data, cfd_data); 41static DEFINE_PER_CPU(struct call_function_data, cfd_data);
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
80 for_each_possible_cpu(i) { 80 for_each_possible_cpu(i) {
81 struct call_single_queue *q = &per_cpu(call_single_queue, i); 81 struct call_single_queue *q = &per_cpu(call_single_queue, i);
82 82
83 spin_lock_init(&q->lock); 83 raw_spin_lock_init(&q->lock);
84 INIT_LIST_HEAD(&q->list); 84 INIT_LIST_HEAD(&q->list);
85 } 85 }
86 86
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
141 unsigned long flags; 141 unsigned long flags;
142 int ipi; 142 int ipi;
143 143
144 spin_lock_irqsave(&dst->lock, flags); 144 raw_spin_lock_irqsave(&dst->lock, flags);
145 ipi = list_empty(&dst->list); 145 ipi = list_empty(&dst->list);
146 list_add_tail(&data->list, &dst->list); 146 list_add_tail(&data->list, &dst->list);
147 spin_unlock_irqrestore(&dst->lock, flags); 147 raw_spin_unlock_irqrestore(&dst->lock, flags);
148 148
149 /* 149 /*
150 * The list addition should be visible before sending the IPI 150 * The list addition should be visible before sending the IPI
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
201 refs = atomic_dec_return(&data->refs); 201 refs = atomic_dec_return(&data->refs);
202 WARN_ON(refs < 0); 202 WARN_ON(refs < 0);
203 if (!refs) { 203 if (!refs) {
204 spin_lock(&call_function.lock); 204 raw_spin_lock(&call_function.lock);
205 list_del_rcu(&data->csd.list); 205 list_del_rcu(&data->csd.list);
206 spin_unlock(&call_function.lock); 206 raw_spin_unlock(&call_function.lock);
207 } 207 }
208 208
209 if (refs) 209 if (refs)
@@ -229,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void)
229 */ 229 */
230 WARN_ON_ONCE(!cpu_online(smp_processor_id())); 230 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
231 231
232 spin_lock(&q->lock); 232 raw_spin_lock(&q->lock);
233 list_replace_init(&q->list, &list); 233 list_replace_init(&q->list, &list);
234 spin_unlock(&q->lock); 234 raw_spin_unlock(&q->lock);
235 235
236 while (!list_empty(&list)) { 236 while (!list_empty(&list)) {
237 struct call_single_data *data; 237 struct call_single_data *data;
@@ -448,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask,
448 cpumask_clear_cpu(this_cpu, data->cpumask); 448 cpumask_clear_cpu(this_cpu, data->cpumask);
449 atomic_set(&data->refs, cpumask_weight(data->cpumask)); 449 atomic_set(&data->refs, cpumask_weight(data->cpumask));
450 450
451 spin_lock_irqsave(&call_function.lock, flags); 451 raw_spin_lock_irqsave(&call_function.lock, flags);
452 /* 452 /*
453 * Place entry at the _HEAD_ of the list, so that any cpu still 453 * Place entry at the _HEAD_ of the list, so that any cpu still
454 * observing the entry in generic_smp_call_function_interrupt() 454 * observing the entry in generic_smp_call_function_interrupt()
455 * will not miss any other list entries: 455 * will not miss any other list entries:
456 */ 456 */
457 list_add_rcu(&data->csd.list, &call_function.queue); 457 list_add_rcu(&data->csd.list, &call_function.queue);
458 spin_unlock_irqrestore(&call_function.lock, flags); 458 raw_spin_unlock_irqrestore(&call_function.lock, flags);
459 459
460 /* 460 /*
461 * Make the list addition visible before sending the ipi. 461 * Make the list addition visible before sending the ipi.
@@ -500,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function);
500 500
501void ipi_call_lock(void) 501void ipi_call_lock(void)
502{ 502{
503 spin_lock(&call_function.lock); 503 raw_spin_lock(&call_function.lock);
504} 504}
505 505
506void ipi_call_unlock(void) 506void ipi_call_unlock(void)
507{ 507{
508 spin_unlock(&call_function.lock); 508 raw_spin_unlock(&call_function.lock);
509} 509}
510 510
511void ipi_call_lock_irq(void) 511void ipi_call_lock_irq(void)
512{ 512{
513 spin_lock_irq(&call_function.lock); 513 raw_spin_lock_irq(&call_function.lock);
514} 514}
515 515
516void ipi_call_unlock_irq(void) 516void ipi_call_unlock_irq(void)
517{ 517{
518 spin_unlock_irq(&call_function.lock); 518 raw_spin_unlock_irq(&call_function.lock);
519} 519}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 41e042219ff6..be6517fb9c14 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -32,6 +32,8 @@
32 * include/linux/spinlock_api_smp.h 32 * include/linux/spinlock_api_smp.h
33 */ 33 */
34#else 34#else
35#define raw_read_can_lock(l) read_can_lock(l)
36#define raw_write_can_lock(l) write_can_lock(l)
35/* 37/*
36 * We build the __lock_function inlines here. They are too large for 38 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function 39 * inlining all over the place, but here is only one user per function
@@ -42,49 +44,49 @@
42 * towards that other CPU that it should break the lock ASAP. 44 * towards that other CPU that it should break the lock ASAP.
43 */ 45 */
44#define BUILD_LOCK_OPS(op, locktype) \ 46#define BUILD_LOCK_OPS(op, locktype) \
45void __lockfunc __##op##_lock(locktype##_t *lock) \ 47void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
46{ \ 48{ \
47 for (;;) { \ 49 for (;;) { \
48 preempt_disable(); \ 50 preempt_disable(); \
49 if (likely(_raw_##op##_trylock(lock))) \ 51 if (likely(do_raw_##op##_trylock(lock))) \
50 break; \ 52 break; \
51 preempt_enable(); \ 53 preempt_enable(); \
52 \ 54 \
53 if (!(lock)->break_lock) \ 55 if (!(lock)->break_lock) \
54 (lock)->break_lock = 1; \ 56 (lock)->break_lock = 1; \
55 while (!op##_can_lock(lock) && (lock)->break_lock) \ 57 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
56 _raw_##op##_relax(&lock->raw_lock); \ 58 arch_##op##_relax(&lock->raw_lock); \
57 } \ 59 } \
58 (lock)->break_lock = 0; \ 60 (lock)->break_lock = 0; \
59} \ 61} \
60 \ 62 \
61unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ 63unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
62{ \ 64{ \
63 unsigned long flags; \ 65 unsigned long flags; \
64 \ 66 \
65 for (;;) { \ 67 for (;;) { \
66 preempt_disable(); \ 68 preempt_disable(); \
67 local_irq_save(flags); \ 69 local_irq_save(flags); \
68 if (likely(_raw_##op##_trylock(lock))) \ 70 if (likely(do_raw_##op##_trylock(lock))) \
69 break; \ 71 break; \
70 local_irq_restore(flags); \ 72 local_irq_restore(flags); \
71 preempt_enable(); \ 73 preempt_enable(); \
72 \ 74 \
73 if (!(lock)->break_lock) \ 75 if (!(lock)->break_lock) \
74 (lock)->break_lock = 1; \ 76 (lock)->break_lock = 1; \
75 while (!op##_can_lock(lock) && (lock)->break_lock) \ 77 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
76 _raw_##op##_relax(&lock->raw_lock); \ 78 arch_##op##_relax(&lock->raw_lock); \
77 } \ 79 } \
78 (lock)->break_lock = 0; \ 80 (lock)->break_lock = 0; \
79 return flags; \ 81 return flags; \
80} \ 82} \
81 \ 83 \
82void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ 84void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
83{ \ 85{ \
84 _##op##_lock_irqsave(lock); \ 86 _raw_##op##_lock_irqsave(lock); \
85} \ 87} \
86 \ 88 \
87void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ 89void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
88{ \ 90{ \
89 unsigned long flags; \ 91 unsigned long flags; \
90 \ 92 \
@@ -93,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
93 /* irq-disabling. We use the generic preemption-aware */ \ 95 /* irq-disabling. We use the generic preemption-aware */ \
94 /* function: */ \ 96 /* function: */ \
95 /**/ \ 97 /**/ \
96 flags = _##op##_lock_irqsave(lock); \ 98 flags = _raw_##op##_lock_irqsave(lock); \
97 local_bh_disable(); \ 99 local_bh_disable(); \
98 local_irq_restore(flags); \ 100 local_irq_restore(flags); \
99} \ 101} \
@@ -107,269 +109,269 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
107 * __[spin|read|write]_lock_irqsave() 109 * __[spin|read|write]_lock_irqsave()
108 * __[spin|read|write]_lock_bh() 110 * __[spin|read|write]_lock_bh()
109 */ 111 */
110BUILD_LOCK_OPS(spin, spinlock); 112BUILD_LOCK_OPS(spin, raw_spinlock);
111BUILD_LOCK_OPS(read, rwlock); 113BUILD_LOCK_OPS(read, rwlock);
112BUILD_LOCK_OPS(write, rwlock); 114BUILD_LOCK_OPS(write, rwlock);
113 115
114#endif 116#endif
115 117
116#ifdef CONFIG_DEBUG_LOCK_ALLOC 118#ifndef CONFIG_INLINE_SPIN_TRYLOCK
117 119int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
118void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
119{ 120{
120 preempt_disable(); 121 return __raw_spin_trylock(lock);
121 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
122 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
123} 122}
124EXPORT_SYMBOL(_spin_lock_nested); 123EXPORT_SYMBOL(_raw_spin_trylock);
124#endif
125 125
126unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, 126#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
127 int subclass) 127int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
128{ 128{
129 unsigned long flags; 129 return __raw_spin_trylock_bh(lock);
130
131 local_irq_save(flags);
132 preempt_disable();
133 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
134 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
135 _raw_spin_lock_flags, &flags);
136 return flags;
137} 130}
138EXPORT_SYMBOL(_spin_lock_irqsave_nested); 131EXPORT_SYMBOL(_raw_spin_trylock_bh);
132#endif
139 133
140void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, 134#ifndef CONFIG_INLINE_SPIN_LOCK
141 struct lockdep_map *nest_lock) 135void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
142{ 136{
143 preempt_disable(); 137 __raw_spin_lock(lock);
144 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
145 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
146} 138}
147EXPORT_SYMBOL(_spin_lock_nest_lock); 139EXPORT_SYMBOL(_raw_spin_lock);
148
149#endif 140#endif
150 141
151#ifndef CONFIG_INLINE_SPIN_TRYLOCK 142#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
152int __lockfunc _spin_trylock(spinlock_t *lock) 143unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
153{ 144{
154 return __spin_trylock(lock); 145 return __raw_spin_lock_irqsave(lock);
155} 146}
156EXPORT_SYMBOL(_spin_trylock); 147EXPORT_SYMBOL(_raw_spin_lock_irqsave);
157#endif 148#endif
158 149
159#ifndef CONFIG_INLINE_READ_TRYLOCK 150#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
160int __lockfunc _read_trylock(rwlock_t *lock) 151void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
161{ 152{
162 return __read_trylock(lock); 153 __raw_spin_lock_irq(lock);
163} 154}
164EXPORT_SYMBOL(_read_trylock); 155EXPORT_SYMBOL(_raw_spin_lock_irq);
165#endif 156#endif
166 157
167#ifndef CONFIG_INLINE_WRITE_TRYLOCK 158#ifndef CONFIG_INLINE_SPIN_LOCK_BH
168int __lockfunc _write_trylock(rwlock_t *lock) 159void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
169{ 160{
170 return __write_trylock(lock); 161 __raw_spin_lock_bh(lock);
171} 162}
172EXPORT_SYMBOL(_write_trylock); 163EXPORT_SYMBOL(_raw_spin_lock_bh);
173#endif 164#endif
174 165
175#ifndef CONFIG_INLINE_READ_LOCK 166#ifndef CONFIG_INLINE_SPIN_UNLOCK
176void __lockfunc _read_lock(rwlock_t *lock) 167void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
177{ 168{
178 __read_lock(lock); 169 __raw_spin_unlock(lock);
179} 170}
180EXPORT_SYMBOL(_read_lock); 171EXPORT_SYMBOL(_raw_spin_unlock);
181#endif 172#endif
182 173
183#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 174#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
184unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 175void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
185{ 176{
186 return __spin_lock_irqsave(lock); 177 __raw_spin_unlock_irqrestore(lock, flags);
187} 178}
188EXPORT_SYMBOL(_spin_lock_irqsave); 179EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
189#endif 180#endif
190 181
191#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ 182#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
192void __lockfunc _spin_lock_irq(spinlock_t *lock) 183void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
193{ 184{
194 __spin_lock_irq(lock); 185 __raw_spin_unlock_irq(lock);
195} 186}
196EXPORT_SYMBOL(_spin_lock_irq); 187EXPORT_SYMBOL(_raw_spin_unlock_irq);
197#endif 188#endif
198 189
199#ifndef CONFIG_INLINE_SPIN_LOCK_BH 190#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
200void __lockfunc _spin_lock_bh(spinlock_t *lock) 191void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
201{ 192{
202 __spin_lock_bh(lock); 193 __raw_spin_unlock_bh(lock);
203} 194}
204EXPORT_SYMBOL(_spin_lock_bh); 195EXPORT_SYMBOL(_raw_spin_unlock_bh);
205#endif 196#endif
206 197
207#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE 198#ifndef CONFIG_INLINE_READ_TRYLOCK
208unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 199int __lockfunc _raw_read_trylock(rwlock_t *lock)
209{ 200{
210 return __read_lock_irqsave(lock); 201 return __raw_read_trylock(lock);
211} 202}
212EXPORT_SYMBOL(_read_lock_irqsave); 203EXPORT_SYMBOL(_raw_read_trylock);
213#endif 204#endif
214 205
215#ifndef CONFIG_INLINE_READ_LOCK_IRQ 206#ifndef CONFIG_INLINE_READ_LOCK
216void __lockfunc _read_lock_irq(rwlock_t *lock) 207void __lockfunc _raw_read_lock(rwlock_t *lock)
217{ 208{
218 __read_lock_irq(lock); 209 __raw_read_lock(lock);
219} 210}
220EXPORT_SYMBOL(_read_lock_irq); 211EXPORT_SYMBOL(_raw_read_lock);
221#endif 212#endif
222 213
223#ifndef CONFIG_INLINE_READ_LOCK_BH 214#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
224void __lockfunc _read_lock_bh(rwlock_t *lock) 215unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
225{ 216{
226 __read_lock_bh(lock); 217 return __raw_read_lock_irqsave(lock);
227} 218}
228EXPORT_SYMBOL(_read_lock_bh); 219EXPORT_SYMBOL(_raw_read_lock_irqsave);
229#endif 220#endif
230 221
231#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE 222#ifndef CONFIG_INLINE_READ_LOCK_IRQ
232unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 223void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
233{ 224{
234 return __write_lock_irqsave(lock); 225 __raw_read_lock_irq(lock);
235} 226}
236EXPORT_SYMBOL(_write_lock_irqsave); 227EXPORT_SYMBOL(_raw_read_lock_irq);
237#endif 228#endif
238 229
239#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ 230#ifndef CONFIG_INLINE_READ_LOCK_BH
240void __lockfunc _write_lock_irq(rwlock_t *lock) 231void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
241{ 232{
242 __write_lock_irq(lock); 233 __raw_read_lock_bh(lock);
243} 234}
244EXPORT_SYMBOL(_write_lock_irq); 235EXPORT_SYMBOL(_raw_read_lock_bh);
245#endif 236#endif
246 237
247#ifndef CONFIG_INLINE_WRITE_LOCK_BH 238#ifndef CONFIG_INLINE_READ_UNLOCK
248void __lockfunc _write_lock_bh(rwlock_t *lock) 239void __lockfunc _raw_read_unlock(rwlock_t *lock)
249{ 240{
250 __write_lock_bh(lock); 241 __raw_read_unlock(lock);
251} 242}
252EXPORT_SYMBOL(_write_lock_bh); 243EXPORT_SYMBOL(_raw_read_unlock);
253#endif 244#endif
254 245
255#ifndef CONFIG_INLINE_SPIN_LOCK 246#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
256void __lockfunc _spin_lock(spinlock_t *lock) 247void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
257{ 248{
258 __spin_lock(lock); 249 __raw_read_unlock_irqrestore(lock, flags);
259} 250}
260EXPORT_SYMBOL(_spin_lock); 251EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
261#endif 252#endif
262 253
263#ifndef CONFIG_INLINE_WRITE_LOCK 254#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
264void __lockfunc _write_lock(rwlock_t *lock) 255void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
265{ 256{
266 __write_lock(lock); 257 __raw_read_unlock_irq(lock);
267} 258}
268EXPORT_SYMBOL(_write_lock); 259EXPORT_SYMBOL(_raw_read_unlock_irq);
269#endif 260#endif
270 261
271#ifndef CONFIG_INLINE_SPIN_UNLOCK 262#ifndef CONFIG_INLINE_READ_UNLOCK_BH
272void __lockfunc _spin_unlock(spinlock_t *lock) 263void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
273{ 264{
274 __spin_unlock(lock); 265 __raw_read_unlock_bh(lock);
275} 266}
276EXPORT_SYMBOL(_spin_unlock); 267EXPORT_SYMBOL(_raw_read_unlock_bh);
277#endif 268#endif
278 269
279#ifndef CONFIG_INLINE_WRITE_UNLOCK 270#ifndef CONFIG_INLINE_WRITE_TRYLOCK
280void __lockfunc _write_unlock(rwlock_t *lock) 271int __lockfunc _raw_write_trylock(rwlock_t *lock)
281{ 272{
282 __write_unlock(lock); 273 return __raw_write_trylock(lock);
283} 274}
284EXPORT_SYMBOL(_write_unlock); 275EXPORT_SYMBOL(_raw_write_trylock);
285#endif 276#endif
286 277
287#ifndef CONFIG_INLINE_READ_UNLOCK 278#ifndef CONFIG_INLINE_WRITE_LOCK
288void __lockfunc _read_unlock(rwlock_t *lock) 279void __lockfunc _raw_write_lock(rwlock_t *lock)
289{ 280{
290 __read_unlock(lock); 281 __raw_write_lock(lock);
291} 282}
292EXPORT_SYMBOL(_read_unlock); 283EXPORT_SYMBOL(_raw_write_lock);
293#endif 284#endif
294 285
295#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 286#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
296void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 287unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
297{ 288{
298 __spin_unlock_irqrestore(lock, flags); 289 return __raw_write_lock_irqsave(lock);
299} 290}
300EXPORT_SYMBOL(_spin_unlock_irqrestore); 291EXPORT_SYMBOL(_raw_write_lock_irqsave);
301#endif 292#endif
302 293
303#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ 294#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
304void __lockfunc _spin_unlock_irq(spinlock_t *lock) 295void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
305{ 296{
306 __spin_unlock_irq(lock); 297 __raw_write_lock_irq(lock);
307} 298}
308EXPORT_SYMBOL(_spin_unlock_irq); 299EXPORT_SYMBOL(_raw_write_lock_irq);
309#endif 300#endif
310 301
311#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH 302#ifndef CONFIG_INLINE_WRITE_LOCK_BH
312void __lockfunc _spin_unlock_bh(spinlock_t *lock) 303void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
313{ 304{
314 __spin_unlock_bh(lock); 305 __raw_write_lock_bh(lock);
315} 306}
316EXPORT_SYMBOL(_spin_unlock_bh); 307EXPORT_SYMBOL(_raw_write_lock_bh);
317#endif 308#endif
318 309
319#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE 310#ifndef CONFIG_INLINE_WRITE_UNLOCK
320void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 311void __lockfunc _raw_write_unlock(rwlock_t *lock)
321{ 312{
322 __read_unlock_irqrestore(lock, flags); 313 __raw_write_unlock(lock);
323} 314}
324EXPORT_SYMBOL(_read_unlock_irqrestore); 315EXPORT_SYMBOL(_raw_write_unlock);
325#endif 316#endif
326 317
327#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ 318#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
328void __lockfunc _read_unlock_irq(rwlock_t *lock) 319void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
329{ 320{
330 __read_unlock_irq(lock); 321 __raw_write_unlock_irqrestore(lock, flags);
331} 322}
332EXPORT_SYMBOL(_read_unlock_irq); 323EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
333#endif 324#endif
334 325
335#ifndef CONFIG_INLINE_READ_UNLOCK_BH 326#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
336void __lockfunc _read_unlock_bh(rwlock_t *lock) 327void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
337{ 328{
338 __read_unlock_bh(lock); 329 __raw_write_unlock_irq(lock);
339} 330}
340EXPORT_SYMBOL(_read_unlock_bh); 331EXPORT_SYMBOL(_raw_write_unlock_irq);
341#endif 332#endif
342 333
343#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE 334#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
344void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 335void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
345{ 336{
346 __write_unlock_irqrestore(lock, flags); 337 __raw_write_unlock_bh(lock);
347} 338}
348EXPORT_SYMBOL(_write_unlock_irqrestore); 339EXPORT_SYMBOL(_raw_write_unlock_bh);
349#endif 340#endif
350 341
351#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ 342#ifdef CONFIG_DEBUG_LOCK_ALLOC
352void __lockfunc _write_unlock_irq(rwlock_t *lock) 343
344void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
353{ 345{
354 __write_unlock_irq(lock); 346 preempt_disable();
347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
348 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
355} 349}
356EXPORT_SYMBOL(_write_unlock_irq); 350EXPORT_SYMBOL(_raw_spin_lock_nested);
357#endif
358 351
359#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH 352unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
360void __lockfunc _write_unlock_bh(rwlock_t *lock) 353 int subclass)
361{ 354{
362 __write_unlock_bh(lock); 355 unsigned long flags;
356
357 local_irq_save(flags);
358 preempt_disable();
359 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
360 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
361 do_raw_spin_lock_flags, &flags);
362 return flags;
363} 363}
364EXPORT_SYMBOL(_write_unlock_bh); 364EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
365#endif
366 365
367#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH 366void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
368int __lockfunc _spin_trylock_bh(spinlock_t *lock) 367 struct lockdep_map *nest_lock)
369{ 368{
370 return __spin_trylock_bh(lock); 369 preempt_disable();
370 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
371 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
371} 372}
372EXPORT_SYMBOL(_spin_trylock_bh); 373EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
374
373#endif 375#endif
374 376
375notrace int in_lock_functions(unsigned long addr) 377notrace int in_lock_functions(unsigned long addr)
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 20a8920029ee..3d5fc0fd1cca 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -30,7 +30,7 @@ static LIST_HEAD(clockevents_released);
30static RAW_NOTIFIER_HEAD(clockevents_chain); 30static RAW_NOTIFIER_HEAD(clockevents_chain);
31 31
32/* Protection for the above */ 32/* Protection for the above */
33static DEFINE_SPINLOCK(clockevents_lock); 33static DEFINE_RAW_SPINLOCK(clockevents_lock);
34 34
35/** 35/**
36 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 36 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
@@ -141,9 +141,9 @@ int clockevents_register_notifier(struct notifier_block *nb)
141 unsigned long flags; 141 unsigned long flags;
142 int ret; 142 int ret;
143 143
144 spin_lock_irqsave(&clockevents_lock, flags); 144 raw_spin_lock_irqsave(&clockevents_lock, flags);
145 ret = raw_notifier_chain_register(&clockevents_chain, nb); 145 ret = raw_notifier_chain_register(&clockevents_chain, nb);
146 spin_unlock_irqrestore(&clockevents_lock, flags); 146 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
147 147
148 return ret; 148 return ret;
149} 149}
@@ -185,13 +185,13 @@ void clockevents_register_device(struct clock_event_device *dev)
185 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 185 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
186 BUG_ON(!dev->cpumask); 186 BUG_ON(!dev->cpumask);
187 187
188 spin_lock_irqsave(&clockevents_lock, flags); 188 raw_spin_lock_irqsave(&clockevents_lock, flags);
189 189
190 list_add(&dev->list, &clockevent_devices); 190 list_add(&dev->list, &clockevent_devices);
191 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); 191 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
192 clockevents_notify_released(); 192 clockevents_notify_released();
193 193
194 spin_unlock_irqrestore(&clockevents_lock, flags); 194 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
195} 195}
196EXPORT_SYMBOL_GPL(clockevents_register_device); 196EXPORT_SYMBOL_GPL(clockevents_register_device);
197 197
@@ -241,7 +241,7 @@ void clockevents_notify(unsigned long reason, void *arg)
241 struct list_head *node, *tmp; 241 struct list_head *node, *tmp;
242 unsigned long flags; 242 unsigned long flags;
243 243
244 spin_lock_irqsave(&clockevents_lock, flags); 244 raw_spin_lock_irqsave(&clockevents_lock, flags);
245 clockevents_do_notify(reason, arg); 245 clockevents_do_notify(reason, arg);
246 246
247 switch (reason) { 247 switch (reason) {
@@ -256,7 +256,7 @@ void clockevents_notify(unsigned long reason, void *arg)
256 default: 256 default:
257 break; 257 break;
258 } 258 }
259 spin_unlock_irqrestore(&clockevents_lock, flags); 259 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
260} 260}
261EXPORT_SYMBOL_GPL(clockevents_notify); 261EXPORT_SYMBOL_GPL(clockevents_notify);
262#endif 262#endif
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index c2ec25087a35..b3bafd5fc66d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device;
31/* FIXME: Use cpumask_var_t. */ 31/* FIXME: Use cpumask_var_t. */
32static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); 32static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
33static DECLARE_BITMAP(tmpmask, NR_CPUS); 33static DECLARE_BITMAP(tmpmask, NR_CPUS);
34static DEFINE_SPINLOCK(tick_broadcast_lock); 34static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
35static int tick_broadcast_force; 35static int tick_broadcast_force;
36 36
37#ifdef CONFIG_TICK_ONESHOT 37#ifdef CONFIG_TICK_ONESHOT
@@ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
96 unsigned long flags; 96 unsigned long flags;
97 int ret = 0; 97 int ret = 0;
98 98
99 spin_lock_irqsave(&tick_broadcast_lock, flags); 99 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
100 100
101 /* 101 /*
102 * Devices might be registered with both periodic and oneshot 102 * Devices might be registered with both periodic and oneshot
@@ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
122 tick_broadcast_clear_oneshot(cpu); 122 tick_broadcast_clear_oneshot(cpu);
123 } 123 }
124 } 124 }
125 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 125 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
126 return ret; 126 return ret;
127} 127}
128 128
@@ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask)
161 */ 161 */
162static void tick_do_periodic_broadcast(void) 162static void tick_do_periodic_broadcast(void)
163{ 163{
164 spin_lock(&tick_broadcast_lock); 164 raw_spin_lock(&tick_broadcast_lock);
165 165
166 cpumask_and(to_cpumask(tmpmask), 166 cpumask_and(to_cpumask(tmpmask),
167 cpu_online_mask, tick_get_broadcast_mask()); 167 cpu_online_mask, tick_get_broadcast_mask());
168 tick_do_broadcast(to_cpumask(tmpmask)); 168 tick_do_broadcast(to_cpumask(tmpmask));
169 169
170 spin_unlock(&tick_broadcast_lock); 170 raw_spin_unlock(&tick_broadcast_lock);
171} 171}
172 172
173/* 173/*
@@ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
212 unsigned long flags; 212 unsigned long flags;
213 int cpu, bc_stopped; 213 int cpu, bc_stopped;
214 214
215 spin_lock_irqsave(&tick_broadcast_lock, flags); 215 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
216 216
217 cpu = smp_processor_id(); 217 cpu = smp_processor_id();
218 td = &per_cpu(tick_cpu_device, cpu); 218 td = &per_cpu(tick_cpu_device, cpu);
@@ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
263 tick_broadcast_setup_oneshot(bc); 263 tick_broadcast_setup_oneshot(bc);
264 } 264 }
265out: 265out:
266 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 266 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
267} 267}
268 268
269/* 269/*
@@ -299,7 +299,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
299 unsigned long flags; 299 unsigned long flags;
300 unsigned int cpu = *cpup; 300 unsigned int cpu = *cpup;
301 301
302 spin_lock_irqsave(&tick_broadcast_lock, flags); 302 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
303 303
304 bc = tick_broadcast_device.evtdev; 304 bc = tick_broadcast_device.evtdev;
305 cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 305 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
@@ -309,7 +309,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
309 clockevents_shutdown(bc); 309 clockevents_shutdown(bc);
310 } 310 }
311 311
312 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 312 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
313} 313}
314 314
315void tick_suspend_broadcast(void) 315void tick_suspend_broadcast(void)
@@ -317,13 +317,13 @@ void tick_suspend_broadcast(void)
317 struct clock_event_device *bc; 317 struct clock_event_device *bc;
318 unsigned long flags; 318 unsigned long flags;
319 319
320 spin_lock_irqsave(&tick_broadcast_lock, flags); 320 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
321 321
322 bc = tick_broadcast_device.evtdev; 322 bc = tick_broadcast_device.evtdev;
323 if (bc) 323 if (bc)
324 clockevents_shutdown(bc); 324 clockevents_shutdown(bc);
325 325
326 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 326 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
327} 327}
328 328
329int tick_resume_broadcast(void) 329int tick_resume_broadcast(void)
@@ -332,7 +332,7 @@ int tick_resume_broadcast(void)
332 unsigned long flags; 332 unsigned long flags;
333 int broadcast = 0; 333 int broadcast = 0;
334 334
335 spin_lock_irqsave(&tick_broadcast_lock, flags); 335 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
336 336
337 bc = tick_broadcast_device.evtdev; 337 bc = tick_broadcast_device.evtdev;
338 338
@@ -351,7 +351,7 @@ int tick_resume_broadcast(void)
351 break; 351 break;
352 } 352 }
353 } 353 }
354 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 354 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
355 355
356 return broadcast; 356 return broadcast;
357} 357}
@@ -405,7 +405,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
405 ktime_t now, next_event; 405 ktime_t now, next_event;
406 int cpu; 406 int cpu;
407 407
408 spin_lock(&tick_broadcast_lock); 408 raw_spin_lock(&tick_broadcast_lock);
409again: 409again:
410 dev->next_event.tv64 = KTIME_MAX; 410 dev->next_event.tv64 = KTIME_MAX;
411 next_event.tv64 = KTIME_MAX; 411 next_event.tv64 = KTIME_MAX;
@@ -443,7 +443,7 @@ again:
443 if (tick_broadcast_set_event(next_event, 0)) 443 if (tick_broadcast_set_event(next_event, 0))
444 goto again; 444 goto again;
445 } 445 }
446 spin_unlock(&tick_broadcast_lock); 446 raw_spin_unlock(&tick_broadcast_lock);
447} 447}
448 448
449/* 449/*
@@ -457,7 +457,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
457 unsigned long flags; 457 unsigned long flags;
458 int cpu; 458 int cpu;
459 459
460 spin_lock_irqsave(&tick_broadcast_lock, flags); 460 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
461 461
462 /* 462 /*
463 * Periodic mode does not care about the enter/exit of power 463 * Periodic mode does not care about the enter/exit of power
@@ -492,7 +492,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
492 } 492 }
493 493
494out: 494out:
495 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 495 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
496} 496}
497 497
498/* 498/*
@@ -563,13 +563,13 @@ void tick_broadcast_switch_to_oneshot(void)
563 struct clock_event_device *bc; 563 struct clock_event_device *bc;
564 unsigned long flags; 564 unsigned long flags;
565 565
566 spin_lock_irqsave(&tick_broadcast_lock, flags); 566 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
567 567
568 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 568 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
569 bc = tick_broadcast_device.evtdev; 569 bc = tick_broadcast_device.evtdev;
570 if (bc) 570 if (bc)
571 tick_broadcast_setup_oneshot(bc); 571 tick_broadcast_setup_oneshot(bc);
572 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 572 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
573} 573}
574 574
575 575
@@ -581,7 +581,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
581 unsigned long flags; 581 unsigned long flags;
582 unsigned int cpu = *cpup; 582 unsigned int cpu = *cpup;
583 583
584 spin_lock_irqsave(&tick_broadcast_lock, flags); 584 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
585 585
586 /* 586 /*
587 * Clear the broadcast mask flag for the dead cpu, but do not 587 * Clear the broadcast mask flag for the dead cpu, but do not
@@ -589,7 +589,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
589 */ 589 */
590 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); 590 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
591 591
592 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 592 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
593} 593}
594 594
595/* 595/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 83c4417b6a3c..b6b898d2eeef 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
34ktime_t tick_next_period; 34ktime_t tick_next_period;
35ktime_t tick_period; 35ktime_t tick_period;
36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; 36int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
37DEFINE_SPINLOCK(tick_device_lock); 37static DEFINE_RAW_SPINLOCK(tick_device_lock);
38 38
39/* 39/*
40 * Debugging: see timer_list.c 40 * Debugging: see timer_list.c
@@ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
209 int cpu, ret = NOTIFY_OK; 209 int cpu, ret = NOTIFY_OK;
210 unsigned long flags; 210 unsigned long flags;
211 211
212 spin_lock_irqsave(&tick_device_lock, flags); 212 raw_spin_lock_irqsave(&tick_device_lock, flags);
213 213
214 cpu = smp_processor_id(); 214 cpu = smp_processor_id();
215 if (!cpumask_test_cpu(cpu, newdev->cpumask)) 215 if (!cpumask_test_cpu(cpu, newdev->cpumask))
@@ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
268 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 268 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
269 tick_oneshot_notify(); 269 tick_oneshot_notify();
270 270
271 spin_unlock_irqrestore(&tick_device_lock, flags); 271 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
272 return NOTIFY_STOP; 272 return NOTIFY_STOP;
273 273
274out_bc: 274out_bc:
@@ -278,7 +278,7 @@ out_bc:
278 if (tick_check_broadcast_device(newdev)) 278 if (tick_check_broadcast_device(newdev))
279 ret = NOTIFY_STOP; 279 ret = NOTIFY_STOP;
280 280
281 spin_unlock_irqrestore(&tick_device_lock, flags); 281 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
282 282
283 return ret; 283 return ret;
284} 284}
@@ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup)
311 struct clock_event_device *dev = td->evtdev; 311 struct clock_event_device *dev = td->evtdev;
312 unsigned long flags; 312 unsigned long flags;
313 313
314 spin_lock_irqsave(&tick_device_lock, flags); 314 raw_spin_lock_irqsave(&tick_device_lock, flags);
315 td->mode = TICKDEV_MODE_PERIODIC; 315 td->mode = TICKDEV_MODE_PERIODIC;
316 if (dev) { 316 if (dev) {
317 /* 317 /*
@@ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup)
322 clockevents_exchange_device(dev, NULL); 322 clockevents_exchange_device(dev, NULL);
323 td->evtdev = NULL; 323 td->evtdev = NULL;
324 } 324 }
325 spin_unlock_irqrestore(&tick_device_lock, flags); 325 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
326} 326}
327 327
328static void tick_suspend(void) 328static void tick_suspend(void)
@@ -330,9 +330,9 @@ static void tick_suspend(void)
330 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 330 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
331 unsigned long flags; 331 unsigned long flags;
332 332
333 spin_lock_irqsave(&tick_device_lock, flags); 333 raw_spin_lock_irqsave(&tick_device_lock, flags);
334 clockevents_shutdown(td->evtdev); 334 clockevents_shutdown(td->evtdev);
335 spin_unlock_irqrestore(&tick_device_lock, flags); 335 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
336} 336}
337 337
338static void tick_resume(void) 338static void tick_resume(void)
@@ -341,7 +341,7 @@ static void tick_resume(void)
341 unsigned long flags; 341 unsigned long flags;
342 int broadcast = tick_resume_broadcast(); 342 int broadcast = tick_resume_broadcast();
343 343
344 spin_lock_irqsave(&tick_device_lock, flags); 344 raw_spin_lock_irqsave(&tick_device_lock, flags);
345 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); 345 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
346 346
347 if (!broadcast) { 347 if (!broadcast) {
@@ -350,7 +350,7 @@ static void tick_resume(void)
350 else 350 else
351 tick_resume_oneshot(); 351 tick_resume_oneshot();
352 } 352 }
353 spin_unlock_irqrestore(&tick_device_lock, flags); 353 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
354} 354}
355 355
356/* 356/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index b1c05bf75ee0..290eefbc1f60 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -6,7 +6,6 @@
6#define TICK_DO_TIMER_BOOT -2 6#define TICK_DO_TIMER_BOOT -2
7 7
8DECLARE_PER_CPU(struct tick_device, tick_cpu_device); 8DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
9extern spinlock_t tick_device_lock;
10extern ktime_t tick_next_period; 9extern ktime_t tick_next_period;
11extern ktime_t tick_period; 10extern ktime_t tick_period;
12extern int tick_do_timer_cpu __read_mostly; 11extern int tick_do_timer_cpu __read_mostly;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 9d80db4747d4..28265636b6c2 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
84 84
85next_one: 85next_one:
86 i = 0; 86 i = 0;
87 spin_lock_irqsave(&base->cpu_base->lock, flags); 87 raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
88 88
89 curr = base->first; 89 curr = base->first;
90 /* 90 /*
@@ -100,13 +100,13 @@ next_one:
100 100
101 timer = rb_entry(curr, struct hrtimer, node); 101 timer = rb_entry(curr, struct hrtimer, node);
102 tmp = *timer; 102 tmp = *timer;
103 spin_unlock_irqrestore(&base->cpu_base->lock, flags); 103 raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
104 104
105 print_timer(m, timer, &tmp, i, now); 105 print_timer(m, timer, &tmp, i, now);
106 next++; 106 next++;
107 goto next_one; 107 goto next_one;
108 } 108 }
109 spin_unlock_irqrestore(&base->cpu_base->lock, flags); 109 raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
110} 110}
111 111
112static void 112static void
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 63b117e9eba1..2f3b585b8d7d 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
86/* 86/*
87 * Per-CPU lookup locks for fast hash lookup: 87 * Per-CPU lookup locks for fast hash lookup:
88 */ 88 */
89static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock); 89static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock);
90 90
91/* 91/*
92 * Mutex to serialize state changes with show-stats activities: 92 * Mutex to serialize state changes with show-stats activities:
@@ -238,7 +238,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
238 /* 238 /*
239 * It doesnt matter which lock we take: 239 * It doesnt matter which lock we take:
240 */ 240 */
241 spinlock_t *lock; 241 raw_spinlock_t *lock;
242 struct entry *entry, input; 242 struct entry *entry, input;
243 unsigned long flags; 243 unsigned long flags;
244 244
@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
253 input.pid = pid; 253 input.pid = pid;
254 input.timer_flag = timer_flag; 254 input.timer_flag = timer_flag;
255 255
256 spin_lock_irqsave(lock, flags); 256 raw_spin_lock_irqsave(lock, flags);
257 if (!timer_stats_active) 257 if (!timer_stats_active)
258 goto out_unlock; 258 goto out_unlock;
259 259
@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
264 atomic_inc(&overflow_count); 264 atomic_inc(&overflow_count);
265 265
266 out_unlock: 266 out_unlock:
267 spin_unlock_irqrestore(lock, flags); 267 raw_spin_unlock_irqrestore(lock, flags);
268} 268}
269 269
270static void print_name_offset(struct seq_file *m, unsigned long addr) 270static void print_name_offset(struct seq_file *m, unsigned long addr)
@@ -348,10 +348,11 @@ static void sync_access(void)
348 int cpu; 348 int cpu;
349 349
350 for_each_online_cpu(cpu) { 350 for_each_online_cpu(cpu) {
351 spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); 351 raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
352 spin_lock_irqsave(lock, flags); 352
353 raw_spin_lock_irqsave(lock, flags);
353 /* nothing */ 354 /* nothing */
354 spin_unlock_irqrestore(lock, flags); 355 raw_spin_unlock_irqrestore(lock, flags);
355 } 356 }
356} 357}
357 358
@@ -409,7 +410,7 @@ void __init init_timer_stats(void)
409 int cpu; 410 int cpu;
410 411
411 for_each_possible_cpu(cpu) 412 for_each_possible_cpu(cpu)
412 spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); 413 raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
413} 414}
414 415
415static int __init init_tstats_procfs(void) 416static int __init init_tstats_procfs(void)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5e..f58c9ad15830 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
423 int cpu; 423 int cpu;
424 struct ring_buffer *buffer; 424 struct ring_buffer *buffer;
425 spinlock_t reader_lock; /* serialize readers */ 425 spinlock_t reader_lock; /* serialize readers */
426 raw_spinlock_t lock; 426 arch_spinlock_t lock;
427 struct lock_class_key lock_key; 427 struct lock_class_key lock_key;
428 struct list_head *pages; 428 struct list_head *pages;
429 struct buffer_page *head_page; /* read from head */ 429 struct buffer_page *head_page; /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
998 cpu_buffer->buffer = buffer; 998 cpu_buffer->buffer = buffer;
999 spin_lock_init(&cpu_buffer->reader_lock); 999 spin_lock_init(&cpu_buffer->reader_lock);
1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1001 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1001 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1002 1002
1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1004 GFP_KERNEL, cpu_to_node(cpu)); 1004 GFP_KERNEL, cpu_to_node(cpu));
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2834 int ret; 2834 int ret;
2835 2835
2836 local_irq_save(flags); 2836 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock); 2837 arch_spin_lock(&cpu_buffer->lock);
2838 2838
2839 again: 2839 again:
2840 /* 2840 /*
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2923 goto again; 2923 goto again;
2924 2924
2925 out: 2925 out:
2926 __raw_spin_unlock(&cpu_buffer->lock); 2926 arch_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags); 2927 local_irq_restore(flags);
2928 2928
2929 return reader; 2929 return reader;
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3286 synchronize_sched(); 3286 synchronize_sched();
3287 3287
3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock); 3289 arch_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter); 3290 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock); 3291 arch_spin_unlock(&cpu_buffer->lock);
3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293 3293
3294 return iter; 3294 return iter;
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out; 3409 goto out;
3410 3410
3411 __raw_spin_lock(&cpu_buffer->lock); 3411 arch_spin_lock(&cpu_buffer->lock);
3412 3412
3413 rb_reset_cpu(cpu_buffer); 3413 rb_reset_cpu(cpu_buffer);
3414 3414
3415 __raw_spin_unlock(&cpu_buffer->lock); 3415 arch_spin_unlock(&cpu_buffer->lock);
3416 3416
3417 out: 3417 out:
3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c82dfd92fdfd..bb6b5e7fa2a2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
493 * protected by per_cpu spinlocks. But the action of the swap 493 * protected by per_cpu spinlocks. But the action of the swap
494 * needs its own lock. 494 * needs its own lock.
495 * 495 *
496 * This is defined as a raw_spinlock_t in order to help 496 * This is defined as a arch_spinlock_t in order to help
497 * with performance when lockdep debugging is enabled. 497 * with performance when lockdep debugging is enabled.
498 * 498 *
499 * It is also used in other places outside the update_max_tr 499 * It is also used in other places outside the update_max_tr
500 * so it needs to be defined outside of the 500 * so it needs to be defined outside of the
501 * CONFIG_TRACER_MAX_TRACE. 501 * CONFIG_TRACER_MAX_TRACE.
502 */ 502 */
503static raw_spinlock_t ftrace_max_lock = 503static arch_spinlock_t ftrace_max_lock =
504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 504 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
505 505
506#ifdef CONFIG_TRACER_MAX_TRACE 506#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency; 507unsigned long __read_mostly tracing_max_latency;
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
555 return; 555 return;
556 556
557 WARN_ON_ONCE(!irqs_disabled()); 557 WARN_ON_ONCE(!irqs_disabled());
558 __raw_spin_lock(&ftrace_max_lock); 558 arch_spin_lock(&ftrace_max_lock);
559 559
560 tr->buffer = max_tr.buffer; 560 tr->buffer = max_tr.buffer;
561 max_tr.buffer = buf; 561 max_tr.buffer = buf;
562 562
563 __update_max_tr(tr, tsk, cpu); 563 __update_max_tr(tr, tsk, cpu);
564 __raw_spin_unlock(&ftrace_max_lock); 564 arch_spin_unlock(&ftrace_max_lock);
565} 565}
566 566
567/** 567/**
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
581 return; 581 return;
582 582
583 WARN_ON_ONCE(!irqs_disabled()); 583 WARN_ON_ONCE(!irqs_disabled());
584 __raw_spin_lock(&ftrace_max_lock); 584 arch_spin_lock(&ftrace_max_lock);
585 585
586 ftrace_disable_cpu(); 586 ftrace_disable_cpu();
587 587
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
604 604
605 __update_max_tr(tr, tsk, cpu); 605 __update_max_tr(tr, tsk, cpu);
606 __raw_spin_unlock(&ftrace_max_lock); 606 arch_spin_unlock(&ftrace_max_lock);
607} 607}
608#endif /* CONFIG_TRACER_MAX_TRACE */ 608#endif /* CONFIG_TRACER_MAX_TRACE */
609 609
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
802static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 802static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
804static int cmdline_idx; 804static int cmdline_idx;
805static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 805static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
806 806
807/* temporary disable recording */ 807/* temporary disable recording */
808static atomic_t trace_record_cmdline_disabled __read_mostly; 808static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
915 * nor do we want to disable interrupts, 915 * nor do we want to disable interrupts,
916 * so if we miss here, then better luck next time. 916 * so if we miss here, then better luck next time.
917 */ 917 */
918 if (!__raw_spin_trylock(&trace_cmdline_lock)) 918 if (!arch_spin_trylock(&trace_cmdline_lock))
919 return; 919 return;
920 920
921 idx = map_pid_to_cmdline[tsk->pid]; 921 idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
940 940
941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
942 942
943 __raw_spin_unlock(&trace_cmdline_lock); 943 arch_spin_unlock(&trace_cmdline_lock);
944} 944}
945 945
946void trace_find_cmdline(int pid, char comm[]) 946void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[])
958 } 958 }
959 959
960 preempt_disable(); 960 preempt_disable();
961 __raw_spin_lock(&trace_cmdline_lock); 961 arch_spin_lock(&trace_cmdline_lock);
962 map = map_pid_to_cmdline[pid]; 962 map = map_pid_to_cmdline[pid];
963 if (map != NO_CMDLINE_MAP) 963 if (map != NO_CMDLINE_MAP)
964 strcpy(comm, saved_cmdlines[map]); 964 strcpy(comm, saved_cmdlines[map]);
965 else 965 else
966 strcpy(comm, "<...>"); 966 strcpy(comm, "<...>");
967 967
968 __raw_spin_unlock(&trace_cmdline_lock); 968 arch_spin_unlock(&trace_cmdline_lock);
969 preempt_enable(); 969 preempt_enable();
970} 970}
971 971
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1251 */ 1251 */
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1253{ 1253{
1254 static raw_spinlock_t trace_buf_lock = 1254 static arch_spinlock_t trace_buf_lock =
1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1255 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1256 static u32 trace_buf[TRACE_BUF_SIZE];
1257 1257
1258 struct ftrace_event_call *call = &event_bprint; 1258 struct ftrace_event_call *call = &event_bprint;
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1283 1283
1284 /* Lockdep uses trace_printk for lock tracing */ 1284 /* Lockdep uses trace_printk for lock tracing */
1285 local_irq_save(flags); 1285 local_irq_save(flags);
1286 __raw_spin_lock(&trace_buf_lock); 1286 arch_spin_lock(&trace_buf_lock);
1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1288 1288
1289 if (len > TRACE_BUF_SIZE || len < 0) 1289 if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1304 ring_buffer_unlock_commit(buffer, event); 1304 ring_buffer_unlock_commit(buffer, event);
1305 1305
1306out_unlock: 1306out_unlock:
1307 __raw_spin_unlock(&trace_buf_lock); 1307 arch_spin_unlock(&trace_buf_lock);
1308 local_irq_restore(flags); 1308 local_irq_restore(flags);
1309 1309
1310out: 1310out:
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
1334int trace_array_vprintk(struct trace_array *tr, 1334int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args) 1335 unsigned long ip, const char *fmt, va_list args)
1336{ 1336{
1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1337 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE]; 1338 static char trace_buf[TRACE_BUF_SIZE];
1339 1339
1340 struct ftrace_event_call *call = &event_print; 1340 struct ftrace_event_call *call = &event_print;
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr,
1360 1360
1361 pause_graph_tracing(); 1361 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1363 arch_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1365 1365
1366 size = sizeof(*entry) + len + 1; 1366 size = sizeof(*entry) + len + 1;
@@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr,
1378 ring_buffer_unlock_commit(buffer, event); 1378 ring_buffer_unlock_commit(buffer, event);
1379 1379
1380 out_unlock: 1380 out_unlock:
1381 __raw_spin_unlock(&trace_buf_lock); 1381 arch_spin_unlock(&trace_buf_lock);
1382 raw_local_irq_restore(irq_flags); 1382 raw_local_irq_restore(irq_flags);
1383 unpause_graph_tracing(); 1383 unpause_graph_tracing();
1384 out: 1384 out:
@@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2279 mutex_lock(&tracing_cpumask_update_lock); 2279 mutex_lock(&tracing_cpumask_update_lock);
2280 2280
2281 local_irq_disable(); 2281 local_irq_disable();
2282 __raw_spin_lock(&ftrace_max_lock); 2282 arch_spin_lock(&ftrace_max_lock);
2283 for_each_tracing_cpu(cpu) { 2283 for_each_tracing_cpu(cpu) {
2284 /* 2284 /*
2285 * Increase/decrease the disabled counter if we are 2285 * Increase/decrease the disabled counter if we are
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2294 atomic_dec(&global_trace.data[cpu]->disabled); 2294 atomic_dec(&global_trace.data[cpu]->disabled);
2295 } 2295 }
2296 } 2296 }
2297 __raw_spin_unlock(&ftrace_max_lock); 2297 arch_spin_unlock(&ftrace_max_lock);
2298 local_irq_enable(); 2298 local_irq_enable();
2299 2299
2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
4307 4307
4308static void __ftrace_dump(bool disable_tracing) 4308static void __ftrace_dump(bool disable_tracing)
4309{ 4309{
4310 static raw_spinlock_t ftrace_dump_lock = 4310 static arch_spinlock_t ftrace_dump_lock =
4311 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4311 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4312 /* use static because iter can be a bit big for the stack */ 4312 /* use static because iter can be a bit big for the stack */
4313 static struct trace_iterator iter; 4313 static struct trace_iterator iter;
4314 unsigned int old_userobj; 4314 unsigned int old_userobj;
@@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing)
4318 4318
4319 /* only one dump */ 4319 /* only one dump */
4320 local_irq_save(flags); 4320 local_irq_save(flags);
4321 __raw_spin_lock(&ftrace_dump_lock); 4321 arch_spin_lock(&ftrace_dump_lock);
4322 if (dump_ran) 4322 if (dump_ran)
4323 goto out; 4323 goto out;
4324 4324
@@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing)
4393 } 4393 }
4394 4394
4395 out: 4395 out:
4396 __raw_spin_unlock(&ftrace_dump_lock); 4396 arch_spin_unlock(&ftrace_dump_lock);
4397 local_irq_restore(flags); 4397 local_irq_restore(flags);
4398} 4398}
4399 4399
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 878c03f386ba..84a3a7ba072a 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
71/* keep prev_time and lock in the same cacheline. */ 71/* keep prev_time and lock in the same cacheline. */
72static struct { 72static struct {
73 u64 prev_time; 73 u64 prev_time;
74 raw_spinlock_t lock; 74 arch_spinlock_t lock;
75} trace_clock_struct ____cacheline_aligned_in_smp = 75} trace_clock_struct ____cacheline_aligned_in_smp =
76 { 76 {
77 .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, 77 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
78 }; 78 };
79 79
80u64 notrace trace_clock_global(void) 80u64 notrace trace_clock_global(void)
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void)
94 if (unlikely(in_nmi())) 94 if (unlikely(in_nmi()))
95 goto out; 95 goto out;
96 96
97 __raw_spin_lock(&trace_clock_struct.lock); 97 arch_spin_lock(&trace_clock_struct.lock);
98 98
99 /* 99 /*
100 * TODO: if this happens often then maybe we should reset 100 * TODO: if this happens often then maybe we should reset
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void)
106 106
107 trace_clock_struct.prev_time = now; 107 trace_clock_struct.prev_time = now;
108 108
109 __raw_spin_unlock(&trace_clock_struct.lock); 109 arch_spin_unlock(&trace_clock_struct.lock);
110 110
111 out: 111 out:
112 raw_local_irq_restore(flags); 112 raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 26185d727676..0271742abb8d 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -28,8 +28,8 @@ static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1; 28static unsigned wakeup_prio = -1;
29static int wakeup_rt; 29static int wakeup_rt;
30 30
31static raw_spinlock_t wakeup_lock = 31static arch_spinlock_t wakeup_lock =
32 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
33 33
34static void __wakeup_reset(struct trace_array *tr); 34static void __wakeup_reset(struct trace_array *tr);
35 35
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
143 goto out; 143 goto out;
144 144
145 local_irq_save(flags); 145 local_irq_save(flags);
146 __raw_spin_lock(&wakeup_lock); 146 arch_spin_lock(&wakeup_lock);
147 147
148 /* We could race with grabbing wakeup_lock */ 148 /* We could race with grabbing wakeup_lock */
149 if (unlikely(!tracer_enabled || next != wakeup_task)) 149 if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
169 169
170out_unlock: 170out_unlock:
171 __wakeup_reset(wakeup_trace); 171 __wakeup_reset(wakeup_trace);
172 __raw_spin_unlock(&wakeup_lock); 172 arch_spin_unlock(&wakeup_lock);
173 local_irq_restore(flags); 173 local_irq_restore(flags);
174out: 174out:
175 atomic_dec(&wakeup_trace->data[cpu]->disabled); 175 atomic_dec(&wakeup_trace->data[cpu]->disabled);
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr)
193 tracing_reset_online_cpus(tr); 193 tracing_reset_online_cpus(tr);
194 194
195 local_irq_save(flags); 195 local_irq_save(flags);
196 __raw_spin_lock(&wakeup_lock); 196 arch_spin_lock(&wakeup_lock);
197 __wakeup_reset(tr); 197 __wakeup_reset(tr);
198 __raw_spin_unlock(&wakeup_lock); 198 arch_spin_unlock(&wakeup_lock);
199 local_irq_restore(flags); 199 local_irq_restore(flags);
200} 200}
201 201
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
225 goto out; 225 goto out;
226 226
227 /* interrupts should be off from try_to_wake_up */ 227 /* interrupts should be off from try_to_wake_up */
228 __raw_spin_lock(&wakeup_lock); 228 arch_spin_lock(&wakeup_lock);
229 229
230 /* check for races. */ 230 /* check for races. */
231 if (!tracer_enabled || p->prio >= wakeup_prio) 231 if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
256 256
257out_locked: 257out_locked:
258 __raw_spin_unlock(&wakeup_lock); 258 arch_spin_unlock(&wakeup_lock);
259out: 259out:
260 atomic_dec(&wakeup_trace->data[cpu]->disabled); 260 atomic_dec(&wakeup_trace->data[cpu]->disabled);
261} 261}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index dc98309e839a..280fea470d67 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
67 67
68 /* Don't allow flipping of max traces now */ 68 /* Don't allow flipping of max traces now */
69 local_irq_save(flags); 69 local_irq_save(flags);
70 __raw_spin_lock(&ftrace_max_lock); 70 arch_spin_lock(&ftrace_max_lock);
71 71
72 cnt = ring_buffer_entries(tr->buffer); 72 cnt = ring_buffer_entries(tr->buffer);
73 73
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
85 break; 85 break;
86 } 86 }
87 tracing_on(); 87 tracing_on();
88 __raw_spin_unlock(&ftrace_max_lock); 88 arch_spin_unlock(&ftrace_max_lock);
89 local_irq_restore(flags); 89 local_irq_restore(flags);
90 90
91 if (count) 91 if (count)
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 8504ac71e4e8..678a5120ee30 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = {
27}; 27};
28 28
29static unsigned long max_stack_size; 29static unsigned long max_stack_size;
30static raw_spinlock_t max_stack_lock = 30static arch_spinlock_t max_stack_lock =
31 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
32 32
33static int stack_trace_disabled __read_mostly; 33static int stack_trace_disabled __read_mostly;
34static DEFINE_PER_CPU(int, trace_active); 34static DEFINE_PER_CPU(int, trace_active);
@@ -54,7 +54,7 @@ static inline void check_stack(void)
54 return; 54 return;
55 55
56 local_irq_save(flags); 56 local_irq_save(flags);
57 __raw_spin_lock(&max_stack_lock); 57 arch_spin_lock(&max_stack_lock);
58 58
59 /* a race could have already updated it */ 59 /* a race could have already updated it */
60 if (this_size <= max_stack_size) 60 if (this_size <= max_stack_size)
@@ -103,7 +103,7 @@ static inline void check_stack(void)
103 } 103 }
104 104
105 out: 105 out:
106 __raw_spin_unlock(&max_stack_lock); 106 arch_spin_unlock(&max_stack_lock);
107 local_irq_restore(flags); 107 local_irq_restore(flags);
108} 108}
109 109
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
171 return ret; 171 return ret;
172 172
173 local_irq_save(flags); 173 local_irq_save(flags);
174 __raw_spin_lock(&max_stack_lock); 174 arch_spin_lock(&max_stack_lock);
175 *ptr = val; 175 *ptr = val;
176 __raw_spin_unlock(&max_stack_lock); 176 arch_spin_unlock(&max_stack_lock);
177 local_irq_restore(flags); 177 local_irq_restore(flags);
178 178
179 return count; 179 return count;
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
207static void *t_start(struct seq_file *m, loff_t *pos) 207static void *t_start(struct seq_file *m, loff_t *pos)
208{ 208{
209 local_irq_disable(); 209 local_irq_disable();
210 __raw_spin_lock(&max_stack_lock); 210 arch_spin_lock(&max_stack_lock);
211 211
212 if (*pos == 0) 212 if (*pos == 0)
213 return SEQ_START_TOKEN; 213 return SEQ_START_TOKEN;
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
217 217
218static void t_stop(struct seq_file *m, void *p) 218static void t_stop(struct seq_file *m, void *p)
219{ 219{
220 __raw_spin_unlock(&max_stack_lock); 220 arch_spin_unlock(&max_stack_lock);
221 local_irq_enable(); 221 local_irq_enable();
222} 222}
223 223
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index eae56fddfa3b..a9a8996d286a 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -26,14 +26,14 @@
26 26
27struct debug_bucket { 27struct debug_bucket {
28 struct hlist_head list; 28 struct hlist_head list;
29 spinlock_t lock; 29 raw_spinlock_t lock;
30}; 30};
31 31
32static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
33 33
34static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 34static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
35 35
36static DEFINE_SPINLOCK(pool_lock); 36static DEFINE_RAW_SPINLOCK(pool_lock);
37 37
38static HLIST_HEAD(obj_pool); 38static HLIST_HEAD(obj_pool);
39 39
@@ -96,10 +96,10 @@ static int fill_pool(void)
96 if (!new) 96 if (!new)
97 return obj_pool_free; 97 return obj_pool_free;
98 98
99 spin_lock_irqsave(&pool_lock, flags); 99 raw_spin_lock_irqsave(&pool_lock, flags);
100 hlist_add_head(&new->node, &obj_pool); 100 hlist_add_head(&new->node, &obj_pool);
101 obj_pool_free++; 101 obj_pool_free++;
102 spin_unlock_irqrestore(&pool_lock, flags); 102 raw_spin_unlock_irqrestore(&pool_lock, flags);
103 } 103 }
104 return obj_pool_free; 104 return obj_pool_free;
105} 105}
@@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
133{ 133{
134 struct debug_obj *obj = NULL; 134 struct debug_obj *obj = NULL;
135 135
136 spin_lock(&pool_lock); 136 raw_spin_lock(&pool_lock);
137 if (obj_pool.first) { 137 if (obj_pool.first) {
138 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 138 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
139 139
@@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
152 if (obj_pool_free < obj_pool_min_free) 152 if (obj_pool_free < obj_pool_min_free)
153 obj_pool_min_free = obj_pool_free; 153 obj_pool_min_free = obj_pool_free;
154 } 154 }
155 spin_unlock(&pool_lock); 155 raw_spin_unlock(&pool_lock);
156 156
157 return obj; 157 return obj;
158} 158}
@@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work)
165 struct debug_obj *obj; 165 struct debug_obj *obj;
166 unsigned long flags; 166 unsigned long flags;
167 167
168 spin_lock_irqsave(&pool_lock, flags); 168 raw_spin_lock_irqsave(&pool_lock, flags);
169 while (obj_pool_free > ODEBUG_POOL_SIZE) { 169 while (obj_pool_free > ODEBUG_POOL_SIZE) {
170 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 170 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
171 hlist_del(&obj->node); 171 hlist_del(&obj->node);
@@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work)
174 * We release pool_lock across kmem_cache_free() to 174 * We release pool_lock across kmem_cache_free() to
175 * avoid contention on pool_lock. 175 * avoid contention on pool_lock.
176 */ 176 */
177 spin_unlock_irqrestore(&pool_lock, flags); 177 raw_spin_unlock_irqrestore(&pool_lock, flags);
178 kmem_cache_free(obj_cache, obj); 178 kmem_cache_free(obj_cache, obj);
179 spin_lock_irqsave(&pool_lock, flags); 179 raw_spin_lock_irqsave(&pool_lock, flags);
180 } 180 }
181 spin_unlock_irqrestore(&pool_lock, flags); 181 raw_spin_unlock_irqrestore(&pool_lock, flags);
182} 182}
183 183
184/* 184/*
@@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj)
190 unsigned long flags; 190 unsigned long flags;
191 int sched = 0; 191 int sched = 0;
192 192
193 spin_lock_irqsave(&pool_lock, flags); 193 raw_spin_lock_irqsave(&pool_lock, flags);
194 /* 194 /*
195 * schedule work when the pool is filled and the cache is 195 * schedule work when the pool is filled and the cache is
196 * initialized: 196 * initialized:
@@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj)
200 hlist_add_head(&obj->node, &obj_pool); 200 hlist_add_head(&obj->node, &obj_pool);
201 obj_pool_free++; 201 obj_pool_free++;
202 obj_pool_used--; 202 obj_pool_used--;
203 spin_unlock_irqrestore(&pool_lock, flags); 203 raw_spin_unlock_irqrestore(&pool_lock, flags);
204 if (sched) 204 if (sched)
205 schedule_work(&debug_obj_work); 205 schedule_work(&debug_obj_work);
206} 206}
@@ -221,9 +221,9 @@ static void debug_objects_oom(void)
221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
222 222
223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
224 spin_lock_irqsave(&db->lock, flags); 224 raw_spin_lock_irqsave(&db->lock, flags);
225 hlist_move_list(&db->list, &freelist); 225 hlist_move_list(&db->list, &freelist);
226 spin_unlock_irqrestore(&db->lock, flags); 226 raw_spin_unlock_irqrestore(&db->lock, flags);
227 227
228 /* Now free them */ 228 /* Now free them */
229 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 229 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
303 303
304 db = get_bucket((unsigned long) addr); 304 db = get_bucket((unsigned long) addr);
305 305
306 spin_lock_irqsave(&db->lock, flags); 306 raw_spin_lock_irqsave(&db->lock, flags);
307 307
308 obj = lookup_object(addr, db); 308 obj = lookup_object(addr, db);
309 if (!obj) { 309 if (!obj) {
310 obj = alloc_object(addr, db, descr); 310 obj = alloc_object(addr, db, descr);
311 if (!obj) { 311 if (!obj) {
312 debug_objects_enabled = 0; 312 debug_objects_enabled = 0;
313 spin_unlock_irqrestore(&db->lock, flags); 313 raw_spin_unlock_irqrestore(&db->lock, flags);
314 debug_objects_oom(); 314 debug_objects_oom();
315 return; 315 return;
316 } 316 }
@@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
327 case ODEBUG_STATE_ACTIVE: 327 case ODEBUG_STATE_ACTIVE:
328 debug_print_object(obj, "init"); 328 debug_print_object(obj, "init");
329 state = obj->state; 329 state = obj->state;
330 spin_unlock_irqrestore(&db->lock, flags); 330 raw_spin_unlock_irqrestore(&db->lock, flags);
331 debug_object_fixup(descr->fixup_init, addr, state); 331 debug_object_fixup(descr->fixup_init, addr, state);
332 return; 332 return;
333 333
@@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
338 break; 338 break;
339 } 339 }
340 340
341 spin_unlock_irqrestore(&db->lock, flags); 341 raw_spin_unlock_irqrestore(&db->lock, flags);
342} 342}
343 343
344/** 344/**
@@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
385 385
386 db = get_bucket((unsigned long) addr); 386 db = get_bucket((unsigned long) addr);
387 387
388 spin_lock_irqsave(&db->lock, flags); 388 raw_spin_lock_irqsave(&db->lock, flags);
389 389
390 obj = lookup_object(addr, db); 390 obj = lookup_object(addr, db);
391 if (obj) { 391 if (obj) {
@@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
398 case ODEBUG_STATE_ACTIVE: 398 case ODEBUG_STATE_ACTIVE:
399 debug_print_object(obj, "activate"); 399 debug_print_object(obj, "activate");
400 state = obj->state; 400 state = obj->state;
401 spin_unlock_irqrestore(&db->lock, flags); 401 raw_spin_unlock_irqrestore(&db->lock, flags);
402 debug_object_fixup(descr->fixup_activate, addr, state); 402 debug_object_fixup(descr->fixup_activate, addr, state);
403 return; 403 return;
404 404
@@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
408 default: 408 default:
409 break; 409 break;
410 } 410 }
411 spin_unlock_irqrestore(&db->lock, flags); 411 raw_spin_unlock_irqrestore(&db->lock, flags);
412 return; 412 return;
413 } 413 }
414 414
415 spin_unlock_irqrestore(&db->lock, flags); 415 raw_spin_unlock_irqrestore(&db->lock, flags);
416 /* 416 /*
417 * This happens when a static object is activated. We 417 * This happens when a static object is activated. We
418 * let the type specific code decide whether this is 418 * let the type specific code decide whether this is
@@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
438 438
439 db = get_bucket((unsigned long) addr); 439 db = get_bucket((unsigned long) addr);
440 440
441 spin_lock_irqsave(&db->lock, flags); 441 raw_spin_lock_irqsave(&db->lock, flags);
442 442
443 obj = lookup_object(addr, db); 443 obj = lookup_object(addr, db);
444 if (obj) { 444 if (obj) {
@@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
463 debug_print_object(&o, "deactivate"); 463 debug_print_object(&o, "deactivate");
464 } 464 }
465 465
466 spin_unlock_irqrestore(&db->lock, flags); 466 raw_spin_unlock_irqrestore(&db->lock, flags);
467} 467}
468 468
469/** 469/**
@@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
483 483
484 db = get_bucket((unsigned long) addr); 484 db = get_bucket((unsigned long) addr);
485 485
486 spin_lock_irqsave(&db->lock, flags); 486 raw_spin_lock_irqsave(&db->lock, flags);
487 487
488 obj = lookup_object(addr, db); 488 obj = lookup_object(addr, db);
489 if (!obj) 489 if (!obj)
@@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
498 case ODEBUG_STATE_ACTIVE: 498 case ODEBUG_STATE_ACTIVE:
499 debug_print_object(obj, "destroy"); 499 debug_print_object(obj, "destroy");
500 state = obj->state; 500 state = obj->state;
501 spin_unlock_irqrestore(&db->lock, flags); 501 raw_spin_unlock_irqrestore(&db->lock, flags);
502 debug_object_fixup(descr->fixup_destroy, addr, state); 502 debug_object_fixup(descr->fixup_destroy, addr, state);
503 return; 503 return;
504 504
@@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
509 break; 509 break;
510 } 510 }
511out_unlock: 511out_unlock:
512 spin_unlock_irqrestore(&db->lock, flags); 512 raw_spin_unlock_irqrestore(&db->lock, flags);
513} 513}
514 514
515/** 515/**
@@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
529 529
530 db = get_bucket((unsigned long) addr); 530 db = get_bucket((unsigned long) addr);
531 531
532 spin_lock_irqsave(&db->lock, flags); 532 raw_spin_lock_irqsave(&db->lock, flags);
533 533
534 obj = lookup_object(addr, db); 534 obj = lookup_object(addr, db);
535 if (!obj) 535 if (!obj)
@@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
539 case ODEBUG_STATE_ACTIVE: 539 case ODEBUG_STATE_ACTIVE:
540 debug_print_object(obj, "free"); 540 debug_print_object(obj, "free");
541 state = obj->state; 541 state = obj->state;
542 spin_unlock_irqrestore(&db->lock, flags); 542 raw_spin_unlock_irqrestore(&db->lock, flags);
543 debug_object_fixup(descr->fixup_free, addr, state); 543 debug_object_fixup(descr->fixup_free, addr, state);
544 return; 544 return;
545 default: 545 default:
546 hlist_del(&obj->node); 546 hlist_del(&obj->node);
547 spin_unlock_irqrestore(&db->lock, flags); 547 raw_spin_unlock_irqrestore(&db->lock, flags);
548 free_object(obj); 548 free_object(obj);
549 return; 549 return;
550 } 550 }
551out_unlock: 551out_unlock:
552 spin_unlock_irqrestore(&db->lock, flags); 552 raw_spin_unlock_irqrestore(&db->lock, flags);
553} 553}
554 554
555#ifdef CONFIG_DEBUG_OBJECTS_FREE 555#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
575 575
576repeat: 576repeat:
577 cnt = 0; 577 cnt = 0;
578 spin_lock_irqsave(&db->lock, flags); 578 raw_spin_lock_irqsave(&db->lock, flags);
579 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 579 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
580 cnt++; 580 cnt++;
581 oaddr = (unsigned long) obj->object; 581 oaddr = (unsigned long) obj->object;
@@ -587,7 +587,7 @@ repeat:
587 debug_print_object(obj, "free"); 587 debug_print_object(obj, "free");
588 descr = obj->descr; 588 descr = obj->descr;
589 state = obj->state; 589 state = obj->state;
590 spin_unlock_irqrestore(&db->lock, flags); 590 raw_spin_unlock_irqrestore(&db->lock, flags);
591 debug_object_fixup(descr->fixup_free, 591 debug_object_fixup(descr->fixup_free,
592 (void *) oaddr, state); 592 (void *) oaddr, state);
593 goto repeat; 593 goto repeat;
@@ -597,7 +597,7 @@ repeat:
597 break; 597 break;
598 } 598 }
599 } 599 }
600 spin_unlock_irqrestore(&db->lock, flags); 600 raw_spin_unlock_irqrestore(&db->lock, flags);
601 601
602 /* Now free them */ 602 /* Now free them */
603 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 603 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
783 783
784 db = get_bucket((unsigned long) addr); 784 db = get_bucket((unsigned long) addr);
785 785
786 spin_lock_irqsave(&db->lock, flags); 786 raw_spin_lock_irqsave(&db->lock, flags);
787 787
788 obj = lookup_object(addr, db); 788 obj = lookup_object(addr, db);
789 if (!obj && state != ODEBUG_STATE_NONE) { 789 if (!obj && state != ODEBUG_STATE_NONE) {
@@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
807 } 807 }
808 res = 0; 808 res = 0;
809out: 809out:
810 spin_unlock_irqrestore(&db->lock, flags); 810 raw_spin_unlock_irqrestore(&db->lock, flags);
811 if (res) 811 if (res)
812 debug_objects_enabled = 0; 812 debug_objects_enabled = 0;
813 return res; 813 return res;
@@ -907,7 +907,7 @@ void __init debug_objects_early_init(void)
907 int i; 907 int i;
908 908
909 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 909 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
910 spin_lock_init(&obj_hash[i].lock); 910 raw_spin_lock_init(&obj_hash[i].lock);
911 911
912 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 912 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
913 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 913 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 5526b46aba94..b135d04aa48a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -23,7 +23,7 @@
23 * 23 *
24 * Don't use in new code. 24 * Don't use in new code.
25 */ 25 */
26static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); 26static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
27 27
28 28
29/* 29/*
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
36 * If it successfully gets the lock, it should increment 36 * If it successfully gets the lock, it should increment
37 * the preemption count like any spinlock does. 37 * the preemption count like any spinlock does.
38 * 38 *
39 * (This works on UP too - _raw_spin_trylock will never 39 * (This works on UP too - do_raw_spin_trylock will never
40 * return false in that case) 40 * return false in that case)
41 */ 41 */
42int __lockfunc __reacquire_kernel_lock(void) 42int __lockfunc __reacquire_kernel_lock(void)
43{ 43{
44 while (!_raw_spin_trylock(&kernel_flag)) { 44 while (!do_raw_spin_trylock(&kernel_flag)) {
45 if (need_resched()) 45 if (need_resched())
46 return -EAGAIN; 46 return -EAGAIN;
47 cpu_relax(); 47 cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
52 52
53void __lockfunc __release_kernel_lock(void) 53void __lockfunc __release_kernel_lock(void)
54{ 54{
55 _raw_spin_unlock(&kernel_flag); 55 do_raw_spin_unlock(&kernel_flag);
56 preempt_enable_no_resched(); 56 preempt_enable_no_resched();
57} 57}
58 58
59/* 59/*
60 * These are the BKL spinlocks - we try to be polite about preemption. 60 * These are the BKL spinlocks - we try to be polite about preemption.
61 * If SMP is not on (ie UP preemption), this all goes away because the 61 * If SMP is not on (ie UP preemption), this all goes away because the
62 * _raw_spin_trylock() will always succeed. 62 * do_raw_spin_trylock() will always succeed.
63 */ 63 */
64#ifdef CONFIG_PREEMPT 64#ifdef CONFIG_PREEMPT
65static inline void __lock_kernel(void) 65static inline void __lock_kernel(void)
66{ 66{
67 preempt_disable(); 67 preempt_disable();
68 if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
69 /* 69 /*
70 * If preemption was disabled even before this 70 * If preemption was disabled even before this
71 * was called, there's nothing we can be polite 71 * was called, there's nothing we can be polite
72 * about - just spin. 72 * about - just spin.
73 */ 73 */
74 if (preempt_count() > 1) { 74 if (preempt_count() > 1) {
75 _raw_spin_lock(&kernel_flag); 75 do_raw_spin_lock(&kernel_flag);
76 return; 76 return;
77 } 77 }
78 78
@@ -82,10 +82,10 @@ static inline void __lock_kernel(void)
82 */ 82 */
83 do { 83 do {
84 preempt_enable(); 84 preempt_enable();
85 while (spin_is_locked(&kernel_flag)) 85 while (raw_spin_is_locked(&kernel_flag))
86 cpu_relax(); 86 cpu_relax();
87 preempt_disable(); 87 preempt_disable();
88 } while (!_raw_spin_trylock(&kernel_flag)); 88 } while (!do_raw_spin_trylock(&kernel_flag));
89 } 89 }
90} 90}
91 91
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
96 */ 96 */
97static inline void __lock_kernel(void) 97static inline void __lock_kernel(void)
98{ 98{
99 _raw_spin_lock(&kernel_flag); 99 do_raw_spin_lock(&kernel_flag);
100} 100}
101#endif 101#endif
102 102
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
106 * the BKL is not covered by lockdep, so we open-code the 106 * the BKL is not covered by lockdep, so we open-code the
107 * unlocking sequence (and thus avoid the dep-chain ops): 107 * unlocking sequence (and thus avoid the dep-chain ops):
108 */ 108 */
109 _raw_spin_unlock(&kernel_flag); 109 do_raw_spin_unlock(&kernel_flag);
110 preempt_enable(); 110 preempt_enable();
111} 111}
112 112
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1d..1471988d9190 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
54 54
55static void plist_check_head(struct plist_head *head) 55static void plist_check_head(struct plist_head *head)
56{ 56{
57 WARN_ON(!head->lock); 57 WARN_ON(!head->rawlock && !head->spinlock);
58 if (head->lock) 58 if (head->rawlock)
59 WARN_ON_SMP(!spin_is_locked(head->lock)); 59 WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
60 if (head->spinlock)
61 WARN_ON_SMP(!spin_is_locked(head->spinlock));
60 plist_check_list(&head->prio_list); 62 plist_check_list(&head->prio_list);
61 plist_check_list(&head->node_list); 63 plist_check_list(&head->node_list);
62} 64}
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16void __spin_lock_init(spinlock_t *lock, const char *name, 16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key) 17 struct lock_class_key *key)
18{ 18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC 19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /* 20 /*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0); 24 lockdep_init_map(&lock->dep_map, name, key, 0);
25#endif 25#endif
26 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC; 27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT; 28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1; 29 lock->owner_cpu = -1;
30} 30}
31 31
32EXPORT_SYMBOL(__spin_lock_init); 32EXPORT_SYMBOL(__raw_spin_lock_init);
33 33
34void __rwlock_init(rwlock_t *lock, const char *name, 34void __rwlock_init(rwlock_t *lock, const char *name,
35 struct lock_class_key *key) 35 struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 41 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
42 lockdep_init_map(&lock->dep_map, name, key, 0); 42 lockdep_init_map(&lock->dep_map, name, key, 0);
43#endif 43#endif
44 lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; 44 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
45 lock->magic = RWLOCK_MAGIC; 45 lock->magic = RWLOCK_MAGIC;
46 lock->owner = SPINLOCK_OWNER_INIT; 46 lock->owner = SPINLOCK_OWNER_INIT;
47 lock->owner_cpu = -1; 47 lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
49 49
50EXPORT_SYMBOL(__rwlock_init); 50EXPORT_SYMBOL(__rwlock_init);
51 51
52static void spin_bug(spinlock_t *lock, const char *msg) 52static void spin_bug(raw_spinlock_t *lock, const char *msg)
53{ 53{
54 struct task_struct *owner = NULL; 54 struct task_struct *owner = NULL;
55 55
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) 73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
74 74
75static inline void 75static inline void
76debug_spin_lock_before(spinlock_t *lock) 76debug_spin_lock_before(raw_spinlock_t *lock)
77{ 77{
78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
79 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); 79 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
81 lock, "cpu recursion"); 81 lock, "cpu recursion");
82} 82}
83 83
84static inline void debug_spin_lock_after(spinlock_t *lock) 84static inline void debug_spin_lock_after(raw_spinlock_t *lock)
85{ 85{
86 lock->owner_cpu = raw_smp_processor_id(); 86 lock->owner_cpu = raw_smp_processor_id();
87 lock->owner = current; 87 lock->owner = current;
88} 88}
89 89
90static inline void debug_spin_unlock(spinlock_t *lock) 90static inline void debug_spin_unlock(raw_spinlock_t *lock)
91{ 91{
92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
93 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); 93 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); 94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), 95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
96 lock, "wrong CPU"); 96 lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
98 lock->owner_cpu = -1; 98 lock->owner_cpu = -1;
99} 99}
100 100
101static void __spin_lock_debug(spinlock_t *lock) 101static void __spin_lock_debug(raw_spinlock_t *lock)
102{ 102{
103 u64 i; 103 u64 i;
104 u64 loops = loops_per_jiffy * HZ; 104 u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
106 106
107 for (;;) { 107 for (;;) {
108 for (i = 0; i < loops; i++) { 108 for (i = 0; i < loops; i++) {
109 if (__raw_spin_trylock(&lock->raw_lock)) 109 if (arch_spin_trylock(&lock->raw_lock))
110 return; 110 return;
111 __delay(1); 111 __delay(1);
112 } 112 }
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
125 } 125 }
126} 126}
127 127
128void _raw_spin_lock(spinlock_t *lock) 128void do_raw_spin_lock(raw_spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
132 __spin_lock_debug(lock); 132 __spin_lock_debug(lock);
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(spinlock_t *lock) 136int do_raw_spin_trylock(raw_spinlock_t *lock)
137{ 137{
138 int ret = __raw_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
140 if (ret) 140 if (ret)
141 debug_spin_lock_after(lock); 141 debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
148 return ret; 148 return ret;
149} 149}
150 150
151void _raw_spin_unlock(spinlock_t *lock) 151void do_raw_spin_unlock(raw_spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 __raw_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
155} 155}
156 156
157static void rwlock_bug(rwlock_t *lock, const char *msg) 157static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
176 176
177 for (;;) { 177 for (;;) {
178 for (i = 0; i < loops; i++) { 178 for (i = 0; i < loops; i++) {
179 if (__raw_read_trylock(&lock->raw_lock)) 179 if (arch_read_trylock(&lock->raw_lock))
180 return; 180 return;
181 __delay(1); 181 __delay(1);
182 } 182 }
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
193} 193}
194#endif 194#endif
195 195
196void _raw_read_lock(rwlock_t *lock) 196void do_raw_read_lock(rwlock_t *lock)
197{ 197{
198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
199 __raw_read_lock(&lock->raw_lock); 199 arch_read_lock(&lock->raw_lock);
200} 200}
201 201
202int _raw_read_trylock(rwlock_t *lock) 202int do_raw_read_trylock(rwlock_t *lock)
203{ 203{
204 int ret = __raw_read_trylock(&lock->raw_lock); 204 int ret = arch_read_trylock(&lock->raw_lock);
205 205
206#ifndef CONFIG_SMP 206#ifndef CONFIG_SMP
207 /* 207 /*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
212 return ret; 212 return ret;
213} 213}
214 214
215void _raw_read_unlock(rwlock_t *lock) 215void do_raw_read_unlock(rwlock_t *lock)
216{ 216{
217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218 __raw_read_unlock(&lock->raw_lock); 218 arch_read_unlock(&lock->raw_lock);
219} 219}
220 220
221static inline void debug_write_lock_before(rwlock_t *lock) 221static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
251 251
252 for (;;) { 252 for (;;) {
253 for (i = 0; i < loops; i++) { 253 for (i = 0; i < loops; i++) {
254 if (__raw_write_trylock(&lock->raw_lock)) 254 if (arch_write_trylock(&lock->raw_lock))
255 return; 255 return;
256 __delay(1); 256 __delay(1);
257 } 257 }
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
268} 268}
269#endif 269#endif
270 270
271void _raw_write_lock(rwlock_t *lock) 271void do_raw_write_lock(rwlock_t *lock)
272{ 272{
273 debug_write_lock_before(lock); 273 debug_write_lock_before(lock);
274 __raw_write_lock(&lock->raw_lock); 274 arch_write_lock(&lock->raw_lock);
275 debug_write_lock_after(lock); 275 debug_write_lock_after(lock);
276} 276}
277 277
278int _raw_write_trylock(rwlock_t *lock) 278int do_raw_write_trylock(rwlock_t *lock)
279{ 279{
280 int ret = __raw_write_trylock(&lock->raw_lock); 280 int ret = arch_write_trylock(&lock->raw_lock);
281 281
282 if (ret) 282 if (ret)
283 debug_write_lock_after(lock); 283 debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
290 return ret; 290 return ret;
291} 291}
292 292
293void _raw_write_unlock(rwlock_t *lock) 293void do_raw_write_unlock(rwlock_t *lock)
294{ 294{
295 debug_write_unlock(lock); 295 debug_write_unlock(lock);
296 __raw_write_unlock(&lock->raw_lock); 296 arch_write_unlock(&lock->raw_lock);
297} 297}