aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/spinlock.h18
-rw-r--r--arch/arm/include/asm/spinlock.h20
-rw-r--r--arch/blackfin/include/asm/spinlock.h20
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h46
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h26
-rw-r--r--arch/m32r/include/asm/spinlock.h28
-rw-r--r--arch/mips/include/asm/spinlock.h36
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/include/asm/spinlock.h44
-rw-r--r--arch/powerpc/include/asm/spinlock.h32
-rw-r--r--arch/powerpc/kernel/rtas.c12
-rw-r--r--arch/powerpc/lib/locks.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c8
-rw-r--r--arch/s390/include/asm/spinlock.h34
-rw-r--r--arch/s390/lib/spinlock.c22
-rw-r--r--arch/sh/include/asm/spinlock.h26
-rw-r--r--arch/sparc/include/asm/spinlock_32.h20
-rw-r--r--arch/sparc/include/asm/spinlock_64.h18
-rw-r--r--arch/x86/include/asm/paravirt.h14
-rw-r--r--arch/x86/include/asm/spinlock.h26
-rw-r--r--arch/x86/kernel/dumpstack.c6
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c8
-rw-r--r--include/asm-generic/bitops/atomic.h4
-rw-r--r--include/linux/spinlock.h22
-rw-r--r--include/linux/spinlock_up.h26
-rw-r--r--kernel/lockdep.c18
-rw-r--r--kernel/mutex-debug.h4
-rw-r--r--kernel/spinlock.c4
-rw-r--r--kernel/trace/ring_buffer.c12
-rw-r--r--kernel/trace/trace.c32
-rw-r--r--kernel/trace/trace_clock.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c12
-rw-r--r--kernel/trace/trace_selftest.c4
-rw-r--r--kernel/trace/trace_stack.c12
-rw-r--r--lib/spinlock_debug.c8
37 files changed, 319 insertions, 319 deletions
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index bdb26a1940b4..4dac79f504c3 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -12,18 +12,18 @@
12 * We make no fairness assumptions. They have a cost. 12 * We make no fairness assumptions. They have a cost.
13 */ 13 */
14 14
15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 15#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
16#define __raw_spin_is_locked(x) ((x)->lock != 0) 16#define arch_spin_is_locked(x) ((x)->lock != 0)
17#define __raw_spin_unlock_wait(x) \ 17#define arch_spin_unlock_wait(x) \
18 do { cpu_relax(); } while ((x)->lock) 18 do { cpu_relax(); } while ((x)->lock)
19 19
20static inline void __raw_spin_unlock(arch_spinlock_t * lock) 20static inline void arch_spin_unlock(arch_spinlock_t * lock)
21{ 21{
22 mb(); 22 mb();
23 lock->lock = 0; 23 lock->lock = 0;
24} 24}
25 25
26static inline void __raw_spin_lock(arch_spinlock_t * lock) 26static inline void arch_spin_lock(arch_spinlock_t * lock)
27{ 27{
28 long tmp; 28 long tmp;
29 29
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t * lock)
43 : "m"(lock->lock) : "memory"); 43 : "m"(lock->lock) : "memory");
44} 44}
45 45
46static inline int __raw_spin_trylock(arch_spinlock_t *lock) 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
47{ 47{
48 return !test_and_set_bit(0, &lock->lock); 48 return !test_and_set_bit(0, &lock->lock);
49} 49}
@@ -169,8 +169,8 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock)
169#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 169#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
170#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 170#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
171 171
172#define _raw_spin_relax(lock) cpu_relax() 172#define arch_spin_relax(lock) cpu_relax()
173#define _raw_read_relax(lock) cpu_relax() 173#define arch_read_relax(lock) cpu_relax()
174#define _raw_write_relax(lock) cpu_relax() 174#define arch_write_relax(lock) cpu_relax()
175 175
176#endif /* _ALPHA_SPINLOCK_H */ 176#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4e7712ee9394..de62eb098f68 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -17,13 +17,13 @@
17 * Locked value: 1 17 * Locked value: 1
18 */ 18 */
19 19
20#define __raw_spin_is_locked(x) ((x)->lock != 0) 20#define arch_spin_is_locked(x) ((x)->lock != 0)
21#define __raw_spin_unlock_wait(lock) \ 21#define arch_spin_unlock_wait(lock) \
22 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 22 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
23 23
24#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 24#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
25 25
26static inline void __raw_spin_lock(arch_spinlock_t *lock) 26static inline void arch_spin_lock(arch_spinlock_t *lock)
27{ 27{
28 unsigned long tmp; 28 unsigned long tmp;
29 29
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
43 smp_mb(); 43 smp_mb();
44} 44}
45 45
46static inline int __raw_spin_trylock(arch_spinlock_t *lock) 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
47{ 47{
48 unsigned long tmp; 48 unsigned long tmp;
49 49
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
63 } 63 }
64} 64}
65 65
66static inline void __raw_spin_unlock(arch_spinlock_t *lock) 66static inline void arch_spin_unlock(arch_spinlock_t *lock)
67{ 67{
68 smp_mb(); 68 smp_mb();
69 69
@@ -220,8 +220,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
220#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 220#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
221#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 221#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
222 222
223#define _raw_spin_relax(lock) cpu_relax() 223#define arch_spin_relax(lock) cpu_relax()
224#define _raw_read_relax(lock) cpu_relax() 224#define arch_read_relax(lock) cpu_relax()
225#define _raw_write_relax(lock) cpu_relax() 225#define arch_write_relax(lock) cpu_relax()
226 226
227#endif /* __ASM_SPINLOCK_H */ 227#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index fc16b4c5309b..62d49540e02b 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -24,31 +24,31 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
24asmlinkage int __raw_write_trylock_asm(volatile int *ptr); 24asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
25asmlinkage void __raw_write_unlock_asm(volatile int *ptr); 25asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
26 26
27static inline int __raw_spin_is_locked(arch_spinlock_t *lock) 27static inline int arch_spin_is_locked(arch_spinlock_t *lock)
28{ 28{
29 return __raw_spin_is_locked_asm(&lock->lock); 29 return __raw_spin_is_locked_asm(&lock->lock);
30} 30}
31 31
32static inline void __raw_spin_lock(arch_spinlock_t *lock) 32static inline void arch_spin_lock(arch_spinlock_t *lock)
33{ 33{
34 __raw_spin_lock_asm(&lock->lock); 34 __raw_spin_lock_asm(&lock->lock);
35} 35}
36 36
37#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 37#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
38 38
39static inline int __raw_spin_trylock(arch_spinlock_t *lock) 39static inline int arch_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 return __raw_spin_trylock_asm(&lock->lock); 41 return __raw_spin_trylock_asm(&lock->lock);
42} 42}
43 43
44static inline void __raw_spin_unlock(arch_spinlock_t *lock) 44static inline void arch_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 __raw_spin_unlock_asm(&lock->lock); 46 __raw_spin_unlock_asm(&lock->lock);
47} 47}
48 48
49static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) 49static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
50{ 50{
51 while (__raw_spin_is_locked(lock)) 51 while (arch_spin_is_locked(lock))
52 cpu_relax(); 52 cpu_relax();
53} 53}
54 54
@@ -92,9 +92,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
92 __raw_write_unlock_asm(&rw->lock); 92 __raw_write_unlock_asm(&rw->lock);
93} 93}
94 94
95#define _raw_spin_relax(lock) cpu_relax() 95#define arch_spin_relax(lock) cpu_relax()
96#define _raw_read_relax(lock) cpu_relax() 96#define arch_read_relax(lock) cpu_relax()
97#define _raw_write_relax(lock) cpu_relax() 97#define arch_write_relax(lock) cpu_relax()
98 98
99#endif 99#endif
100 100
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index e253457765f2..a2e8a394d555 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
9extern void cris_spin_lock(void *l); 9extern void cris_spin_lock(void *l);
10extern int cris_spin_trylock(void *l); 10extern int cris_spin_trylock(void *l);
11 11
12static inline int __raw_spin_is_locked(arch_spinlock_t *x) 12static inline int arch_spin_is_locked(arch_spinlock_t *x)
13{ 13{
14 return *(volatile signed char *)(&(x)->slock) <= 0; 14 return *(volatile signed char *)(&(x)->slock) <= 0;
15} 15}
16 16
17static inline void __raw_spin_unlock(arch_spinlock_t *lock) 17static inline void arch_spin_unlock(arch_spinlock_t *lock)
18{ 18{
19 __asm__ volatile ("move.d %1,%0" \ 19 __asm__ volatile ("move.d %1,%0" \
20 : "=m" (lock->slock) \ 20 : "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
22 : "memory"); 22 : "memory");
23} 23}
24 24
25static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) 25static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
26{ 26{
27 while (__raw_spin_is_locked(lock)) 27 while (arch_spin_is_locked(lock))
28 cpu_relax(); 28 cpu_relax();
29} 29}
30 30
31static inline int __raw_spin_trylock(arch_spinlock_t *lock) 31static inline int arch_spin_trylock(arch_spinlock_t *lock)
32{ 32{
33 return cris_spin_trylock((void *)&lock->slock); 33 return cris_spin_trylock((void *)&lock->slock);
34} 34}
35 35
36static inline void __raw_spin_lock(arch_spinlock_t *lock) 36static inline void arch_spin_lock(arch_spinlock_t *lock)
37{ 37{
38 cris_spin_lock((void *)&lock->slock); 38 cris_spin_lock((void *)&lock->slock);
39} 39}
40 40
41static inline void 41static inline void
42__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 42arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
43{ 43{
44 __raw_spin_lock(lock); 44 arch_spin_lock(lock);
45} 45}
46 46
47/* 47/*
@@ -68,64 +68,64 @@ static inline int __raw_write_can_lock(raw_rwlock_t *x)
68 68
69static inline void __raw_read_lock(raw_rwlock_t *rw) 69static inline void __raw_read_lock(raw_rwlock_t *rw)
70{ 70{
71 __raw_spin_lock(&rw->slock); 71 arch_spin_lock(&rw->slock);
72 while (rw->lock == 0); 72 while (rw->lock == 0);
73 rw->lock--; 73 rw->lock--;
74 __raw_spin_unlock(&rw->slock); 74 arch_spin_unlock(&rw->slock);
75} 75}
76 76
77static inline void __raw_write_lock(raw_rwlock_t *rw) 77static inline void __raw_write_lock(raw_rwlock_t *rw)
78{ 78{
79 __raw_spin_lock(&rw->slock); 79 arch_spin_lock(&rw->slock);
80 while (rw->lock != RW_LOCK_BIAS); 80 while (rw->lock != RW_LOCK_BIAS);
81 rw->lock = 0; 81 rw->lock = 0;
82 __raw_spin_unlock(&rw->slock); 82 arch_spin_unlock(&rw->slock);
83} 83}
84 84
85static inline void __raw_read_unlock(raw_rwlock_t *rw) 85static inline void __raw_read_unlock(raw_rwlock_t *rw)
86{ 86{
87 __raw_spin_lock(&rw->slock); 87 arch_spin_lock(&rw->slock);
88 rw->lock++; 88 rw->lock++;
89 __raw_spin_unlock(&rw->slock); 89 arch_spin_unlock(&rw->slock);
90} 90}
91 91
92static inline void __raw_write_unlock(raw_rwlock_t *rw) 92static inline void __raw_write_unlock(raw_rwlock_t *rw)
93{ 93{
94 __raw_spin_lock(&rw->slock); 94 arch_spin_lock(&rw->slock);
95 while (rw->lock != RW_LOCK_BIAS); 95 while (rw->lock != RW_LOCK_BIAS);
96 rw->lock = RW_LOCK_BIAS; 96 rw->lock = RW_LOCK_BIAS;
97 __raw_spin_unlock(&rw->slock); 97 arch_spin_unlock(&rw->slock);
98} 98}
99 99
100static inline int __raw_read_trylock(raw_rwlock_t *rw) 100static inline int __raw_read_trylock(raw_rwlock_t *rw)
101{ 101{
102 int ret = 0; 102 int ret = 0;
103 __raw_spin_lock(&rw->slock); 103 arch_spin_lock(&rw->slock);
104 if (rw->lock != 0) { 104 if (rw->lock != 0) {
105 rw->lock--; 105 rw->lock--;
106 ret = 1; 106 ret = 1;
107 } 107 }
108 __raw_spin_unlock(&rw->slock); 108 arch_spin_unlock(&rw->slock);
109 return ret; 109 return ret;
110} 110}
111 111
112static inline int __raw_write_trylock(raw_rwlock_t *rw) 112static inline int __raw_write_trylock(raw_rwlock_t *rw)
113{ 113{
114 int ret = 0; 114 int ret = 0;
115 __raw_spin_lock(&rw->slock); 115 arch_spin_lock(&rw->slock);
116 if (rw->lock == RW_LOCK_BIAS) { 116 if (rw->lock == RW_LOCK_BIAS) {
117 rw->lock = 0; 117 rw->lock = 0;
118 ret = 1; 118 ret = 1;
119 } 119 }
120 __raw_spin_unlock(&rw->slock); 120 arch_spin_unlock(&rw->slock);
121 return 1; 121 return 1;
122} 122}
123 123
124#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 124#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
125#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 125#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
126 126
127#define _raw_spin_relax(lock) cpu_relax() 127#define arch_spin_relax(lock) cpu_relax()
128#define _raw_read_relax(lock) cpu_relax() 128#define arch_read_relax(lock) cpu_relax()
129#define _raw_write_relax(lock) cpu_relax() 129#define arch_write_relax(lock) cpu_relax()
130 130
131#endif /* __ASM_ARCH_SPINLOCK_H */ 131#endif /* __ASM_ARCH_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
127 * @addr: Address to start counting from 127 * @addr: Address to start counting from
128 * 128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store 129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also __raw_spin_unlock(). 130 * with release semantics. See also arch_spin_unlock().
131 */ 131 */
132static __inline__ void 132static __inline__ void
133__clear_bit_unlock(int nr, void *addr) 133__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 9fbdf7e61087..b06165f6352f 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20#define __raw_spin_lock_init(x) ((x)->lock = 0) 20#define arch_spin_lock_init(x) ((x)->lock = 0)
21 21
22/* 22/*
23 * Ticket locks are conceptually two parts, one indicating the current head of 23 * Ticket locks are conceptually two parts, one indicating the current head of
@@ -103,39 +103,39 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
104} 104}
105 105
106static inline int __raw_spin_is_locked(arch_spinlock_t *lock) 106static inline int arch_spin_is_locked(arch_spinlock_t *lock)
107{ 107{
108 return __ticket_spin_is_locked(lock); 108 return __ticket_spin_is_locked(lock);
109} 109}
110 110
111static inline int __raw_spin_is_contended(arch_spinlock_t *lock) 111static inline int arch_spin_is_contended(arch_spinlock_t *lock)
112{ 112{
113 return __ticket_spin_is_contended(lock); 113 return __ticket_spin_is_contended(lock);
114} 114}
115#define __raw_spin_is_contended __raw_spin_is_contended 115#define arch_spin_is_contended arch_spin_is_contended
116 116
117static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) 117static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
118{ 118{
119 __ticket_spin_lock(lock); 119 __ticket_spin_lock(lock);
120} 120}
121 121
122static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) 122static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
123{ 123{
124 return __ticket_spin_trylock(lock); 124 return __ticket_spin_trylock(lock);
125} 125}
126 126
127static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) 127static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
128{ 128{
129 __ticket_spin_unlock(lock); 129 __ticket_spin_unlock(lock);
130} 130}
131 131
132static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, 132static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 __raw_spin_lock(lock); 135 arch_spin_lock(lock);
136} 136}
137 137
138static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) 138static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
139{ 139{
140 __ticket_spin_unlock_wait(lock); 140 __ticket_spin_unlock_wait(lock);
141} 141}
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
286} 286}
287 287
288#define _raw_spin_relax(lock) cpu_relax() 288#define arch_spin_relax(lock) cpu_relax()
289#define _raw_read_relax(lock) cpu_relax() 289#define arch_read_relax(lock) cpu_relax()
290#define _raw_write_relax(lock) cpu_relax() 290#define arch_write_relax(lock) cpu_relax()
291 291
292#endif /* _ASM_IA64_SPINLOCK_H */ 292#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index 0c0164225bc0..8acac950a43c 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -24,19 +24,19 @@
24 * We make no fairness assumptions. They have a cost. 24 * We make no fairness assumptions. They have a cost.
25 */ 25 */
26 26
27#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 27#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
28#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 28#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
29#define __raw_spin_unlock_wait(x) \ 29#define arch_spin_unlock_wait(x) \
30 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 30 do { cpu_relax(); } while (arch_spin_is_locked(x))
31 31
32/** 32/**
33 * __raw_spin_trylock - Try spin lock and return a result 33 * arch_spin_trylock - Try spin lock and return a result
34 * @lock: Pointer to the lock variable 34 * @lock: Pointer to the lock variable
35 * 35 *
36 * __raw_spin_trylock() tries to get the lock and returns a result. 36 * arch_spin_trylock() tries to get the lock and returns a result.
37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure). 37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
38 */ 38 */
39static inline int __raw_spin_trylock(arch_spinlock_t *lock) 39static inline int arch_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 int oldval; 41 int oldval;
42 unsigned long tmp1, tmp2; 42 unsigned long tmp1, tmp2;
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
50 * } 50 * }
51 */ 51 */
52 __asm__ __volatile__ ( 52 __asm__ __volatile__ (
53 "# __raw_spin_trylock \n\t" 53 "# arch_spin_trylock \n\t"
54 "ldi %1, #0; \n\t" 54 "ldi %1, #0; \n\t"
55 "mvfc %2, psw; \n\t" 55 "mvfc %2, psw; \n\t"
56 "clrpsw #0x40 -> nop; \n\t" 56 "clrpsw #0x40 -> nop; \n\t"
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
69 return (oldval > 0); 69 return (oldval > 0);
70} 70}
71 71
72static inline void __raw_spin_lock(arch_spinlock_t *lock) 72static inline void arch_spin_lock(arch_spinlock_t *lock)
73{ 73{
74 unsigned long tmp0, tmp1; 74 unsigned long tmp0, tmp1;
75 75
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
84 * } 84 * }
85 */ 85 */
86 __asm__ __volatile__ ( 86 __asm__ __volatile__ (
87 "# __raw_spin_lock \n\t" 87 "# arch_spin_lock \n\t"
88 ".fillinsn \n" 88 ".fillinsn \n"
89 "1: \n\t" 89 "1: \n\t"
90 "mvfc %1, psw; \n\t" 90 "mvfc %1, psw; \n\t"
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
111 ); 111 );
112} 112}
113 113
114static inline void __raw_spin_unlock(arch_spinlock_t *lock) 114static inline void arch_spin_unlock(arch_spinlock_t *lock)
115{ 115{
116 mb(); 116 mb();
117 lock->slock = 1; 117 lock->slock = 1;
@@ -319,8 +319,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
319#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 319#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
320#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 320#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
321 321
322#define _raw_spin_relax(lock) cpu_relax() 322#define arch_spin_relax(lock) cpu_relax()
323#define _raw_read_relax(lock) cpu_relax() 323#define arch_read_relax(lock) cpu_relax()
324#define _raw_write_relax(lock) cpu_relax() 324#define arch_write_relax(lock) cpu_relax()
325 325
326#endif /* _ASM_M32R_SPINLOCK_H */ 326#endif /* _ASM_M32R_SPINLOCK_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 0f16d0673b4a..95edebaaf22a 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -34,33 +34,33 @@
34 * becomes equal to the the initial value of the tail. 34 * becomes equal to the the initial value of the tail.
35 */ 35 */
36 36
37static inline int __raw_spin_is_locked(arch_spinlock_t *lock) 37static inline int arch_spin_is_locked(arch_spinlock_t *lock)
38{ 38{
39 unsigned int counters = ACCESS_ONCE(lock->lock); 39 unsigned int counters = ACCESS_ONCE(lock->lock);
40 40
41 return ((counters >> 14) ^ counters) & 0x1fff; 41 return ((counters >> 14) ^ counters) & 0x1fff;
42} 42}
43 43
44#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 44#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
45#define __raw_spin_unlock_wait(x) \ 45#define arch_spin_unlock_wait(x) \
46 while (__raw_spin_is_locked(x)) { cpu_relax(); } 46 while (arch_spin_is_locked(x)) { cpu_relax(); }
47 47
48static inline int __raw_spin_is_contended(arch_spinlock_t *lock) 48static inline int arch_spin_is_contended(arch_spinlock_t *lock)
49{ 49{
50 unsigned int counters = ACCESS_ONCE(lock->lock); 50 unsigned int counters = ACCESS_ONCE(lock->lock);
51 51
52 return (((counters >> 14) - counters) & 0x1fff) > 1; 52 return (((counters >> 14) - counters) & 0x1fff) > 1;
53} 53}
54#define __raw_spin_is_contended __raw_spin_is_contended 54#define arch_spin_is_contended arch_spin_is_contended
55 55
56static inline void __raw_spin_lock(arch_spinlock_t *lock) 56static inline void arch_spin_lock(arch_spinlock_t *lock)
57{ 57{
58 int my_ticket; 58 int my_ticket;
59 int tmp; 59 int tmp;
60 60
61 if (R10000_LLSC_WAR) { 61 if (R10000_LLSC_WAR) {
62 __asm__ __volatile__ ( 62 __asm__ __volatile__ (
63 " .set push # __raw_spin_lock \n" 63 " .set push # arch_spin_lock \n"
64 " .set noreorder \n" 64 " .set noreorder \n"
65 " \n" 65 " \n"
66 "1: ll %[ticket], %[ticket_ptr] \n" 66 "1: ll %[ticket], %[ticket_ptr] \n"
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
94 [my_ticket] "=&r" (my_ticket)); 94 [my_ticket] "=&r" (my_ticket));
95 } else { 95 } else {
96 __asm__ __volatile__ ( 96 __asm__ __volatile__ (
97 " .set push # __raw_spin_lock \n" 97 " .set push # arch_spin_lock \n"
98 " .set noreorder \n" 98 " .set noreorder \n"
99 " \n" 99 " \n"
100 " ll %[ticket], %[ticket_ptr] \n" 100 " ll %[ticket], %[ticket_ptr] \n"
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
134 smp_llsc_mb(); 134 smp_llsc_mb();
135} 135}
136 136
137static inline void __raw_spin_unlock(arch_spinlock_t *lock) 137static inline void arch_spin_unlock(arch_spinlock_t *lock)
138{ 138{
139 int tmp; 139 int tmp;
140 140
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
142 142
143 if (R10000_LLSC_WAR) { 143 if (R10000_LLSC_WAR) {
144 __asm__ __volatile__ ( 144 __asm__ __volatile__ (
145 " # __raw_spin_unlock \n" 145 " # arch_spin_unlock \n"
146 "1: ll %[ticket], %[ticket_ptr] \n" 146 "1: ll %[ticket], %[ticket_ptr] \n"
147 " addiu %[ticket], %[ticket], 1 \n" 147 " addiu %[ticket], %[ticket], 1 \n"
148 " ori %[ticket], %[ticket], 0x2000 \n" 148 " ori %[ticket], %[ticket], 0x2000 \n"
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
153 [ticket] "=&r" (tmp)); 153 [ticket] "=&r" (tmp));
154 } else { 154 } else {
155 __asm__ __volatile__ ( 155 __asm__ __volatile__ (
156 " .set push # __raw_spin_unlock \n" 156 " .set push # arch_spin_unlock \n"
157 " .set noreorder \n" 157 " .set noreorder \n"
158 " \n" 158 " \n"
159 " ll %[ticket], %[ticket_ptr] \n" 159 " ll %[ticket], %[ticket_ptr] \n"
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
174 } 174 }
175} 175}
176 176
177static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) 177static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
178{ 178{
179 int tmp, tmp2, tmp3; 179 int tmp, tmp2, tmp3;
180 180
181 if (R10000_LLSC_WAR) { 181 if (R10000_LLSC_WAR) {
182 __asm__ __volatile__ ( 182 __asm__ __volatile__ (
183 " .set push # __raw_spin_trylock \n" 183 " .set push # arch_spin_trylock \n"
184 " .set noreorder \n" 184 " .set noreorder \n"
185 " \n" 185 " \n"
186 "1: ll %[ticket], %[ticket_ptr] \n" 186 "1: ll %[ticket], %[ticket_ptr] \n"
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
204 [now_serving] "=&r" (tmp3)); 204 [now_serving] "=&r" (tmp3));
205 } else { 205 } else {
206 __asm__ __volatile__ ( 206 __asm__ __volatile__ (
207 " .set push # __raw_spin_trylock \n" 207 " .set push # arch_spin_trylock \n"
208 " .set noreorder \n" 208 " .set noreorder \n"
209 " \n" 209 " \n"
210 " ll %[ticket], %[ticket_ptr] \n" 210 " ll %[ticket], %[ticket_ptr] \n"
@@ -483,8 +483,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
483#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 483#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
484#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 484#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
485 485
486#define _raw_spin_relax(lock) cpu_relax() 486#define arch_spin_relax(lock) cpu_relax()
487#define _raw_read_relax(lock) cpu_relax() 487#define arch_read_relax(lock) cpu_relax()
488#define _raw_write_relax(lock) cpu_relax() 488#define arch_write_relax(lock) cpu_relax()
489 489
490#endif /* _ASM_SPINLOCK_H */ 490#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 3a4ea778d4b6..716634d1f546 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -34,12 +34,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
34#define _atomic_spin_lock_irqsave(l,f) do { \ 34#define _atomic_spin_lock_irqsave(l,f) do { \
35 arch_spinlock_t *s = ATOMIC_HASH(l); \ 35 arch_spinlock_t *s = ATOMIC_HASH(l); \
36 local_irq_save(f); \ 36 local_irq_save(f); \
37 __raw_spin_lock(s); \ 37 arch_spin_lock(s); \
38} while(0) 38} while(0)
39 39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \ 40#define _atomic_spin_unlock_irqrestore(l,f) do { \
41 arch_spinlock_t *s = ATOMIC_HASH(l); \ 41 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 __raw_spin_unlock(s); \ 42 arch_spin_unlock(s); \
43 local_irq_restore(f); \ 43 local_irq_restore(f); \
44} while(0) 44} while(0)
45 45
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 69e8dca26744..235e7e386e2a 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/spinlock_types.h> 6#include <asm/spinlock_types.h>
7 7
8static inline int __raw_spin_is_locked(arch_spinlock_t *x) 8static inline int arch_spin_is_locked(arch_spinlock_t *x)
9{ 9{
10 volatile unsigned int *a = __ldcw_align(x); 10 volatile unsigned int *a = __ldcw_align(x);
11 return *a == 0; 11 return *a == 0;
12} 12}
13 13
14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) 14#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
15#define __raw_spin_unlock_wait(x) \ 15#define arch_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (arch_spin_is_locked(x))
17 17
18static inline void __raw_spin_lock_flags(arch_spinlock_t *x, 18static inline void arch_spin_lock_flags(arch_spinlock_t *x,
19 unsigned long flags) 19 unsigned long flags)
20{ 20{
21 volatile unsigned int *a; 21 volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
33 mb(); 33 mb();
34} 34}
35 35
36static inline void __raw_spin_unlock(arch_spinlock_t *x) 36static inline void arch_spin_unlock(arch_spinlock_t *x)
37{ 37{
38 volatile unsigned int *a; 38 volatile unsigned int *a;
39 mb(); 39 mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *x)
42 mb(); 42 mb();
43} 43}
44 44
45static inline int __raw_spin_trylock(arch_spinlock_t *x) 45static inline int arch_spin_trylock(arch_spinlock_t *x)
46{ 46{
47 volatile unsigned int *a; 47 volatile unsigned int *a;
48 int ret; 48 int ret;
@@ -73,9 +73,9 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 local_irq_save(flags); 75 local_irq_save(flags);
76 __raw_spin_lock_flags(&rw->lock, flags); 76 arch_spin_lock_flags(&rw->lock, flags);
77 rw->counter++; 77 rw->counter++;
78 __raw_spin_unlock(&rw->lock); 78 arch_spin_unlock(&rw->lock);
79 local_irq_restore(flags); 79 local_irq_restore(flags);
80} 80}
81 81
@@ -85,9 +85,9 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
85{ 85{
86 unsigned long flags; 86 unsigned long flags;
87 local_irq_save(flags); 87 local_irq_save(flags);
88 __raw_spin_lock_flags(&rw->lock, flags); 88 arch_spin_lock_flags(&rw->lock, flags);
89 rw->counter--; 89 rw->counter--;
90 __raw_spin_unlock(&rw->lock); 90 arch_spin_unlock(&rw->lock);
91 local_irq_restore(flags); 91 local_irq_restore(flags);
92} 92}
93 93
@@ -98,9 +98,9 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
98 unsigned long flags; 98 unsigned long flags;
99 retry: 99 retry:
100 local_irq_save(flags); 100 local_irq_save(flags);
101 if (__raw_spin_trylock(&rw->lock)) { 101 if (arch_spin_trylock(&rw->lock)) {
102 rw->counter++; 102 rw->counter++;
103 __raw_spin_unlock(&rw->lock); 103 arch_spin_unlock(&rw->lock);
104 local_irq_restore(flags); 104 local_irq_restore(flags);
105 return 1; 105 return 1;
106 } 106 }
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
111 return 0; 111 return 0;
112 112
113 /* Wait until we have a realistic chance at the lock */ 113 /* Wait until we have a realistic chance at the lock */
114 while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) 114 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
115 cpu_relax(); 115 cpu_relax();
116 116
117 goto retry; 117 goto retry;
@@ -124,10 +124,10 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
124 unsigned long flags; 124 unsigned long flags;
125retry: 125retry:
126 local_irq_save(flags); 126 local_irq_save(flags);
127 __raw_spin_lock_flags(&rw->lock, flags); 127 arch_spin_lock_flags(&rw->lock, flags);
128 128
129 if (rw->counter != 0) { 129 if (rw->counter != 0) {
130 __raw_spin_unlock(&rw->lock); 130 arch_spin_unlock(&rw->lock);
131 local_irq_restore(flags); 131 local_irq_restore(flags);
132 132
133 while (rw->counter != 0) 133 while (rw->counter != 0)
@@ -144,7 +144,7 @@ retry:
144static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 144static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
145{ 145{
146 rw->counter = 0; 146 rw->counter = 0;
147 __raw_spin_unlock(&rw->lock); 147 arch_spin_unlock(&rw->lock);
148} 148}
149 149
150/* Note that we have to ensure interrupts are disabled in case we're 150/* Note that we have to ensure interrupts are disabled in case we're
@@ -155,13 +155,13 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
155 int result = 0; 155 int result = 0;
156 156
157 local_irq_save(flags); 157 local_irq_save(flags);
158 if (__raw_spin_trylock(&rw->lock)) { 158 if (arch_spin_trylock(&rw->lock)) {
159 if (rw->counter == 0) { 159 if (rw->counter == 0) {
160 rw->counter = -1; 160 rw->counter = -1;
161 result = 1; 161 result = 1;
162 } else { 162 } else {
163 /* Read-locked. Oh well. */ 163 /* Read-locked. Oh well. */
164 __raw_spin_unlock(&rw->lock); 164 arch_spin_unlock(&rw->lock);
165 } 165 }
166 } 166 }
167 local_irq_restore(flags); 167 local_irq_restore(flags);
@@ -190,8 +190,8 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
190#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 190#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
191#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 191#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
192 192
193#define _raw_spin_relax(lock) cpu_relax() 193#define arch_spin_relax(lock) cpu_relax()
194#define _raw_read_relax(lock) cpu_relax() 194#define arch_read_relax(lock) cpu_relax()
195#define _raw_write_relax(lock) cpu_relax() 195#define arch_write_relax(lock) cpu_relax()
196 196
197#endif /* __ASM_SPINLOCK_H */ 197#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index c0d44c92ff0e..cdcaf6b97087 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
28#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
29#include <asm/synch.h> 29#include <asm/synch.h>
30 30
31#define __raw_spin_is_locked(x) ((x)->slock != 0) 31#define arch_spin_is_locked(x) ((x)->slock != 0)
32 32
33#ifdef CONFIG_PPC64 33#ifdef CONFIG_PPC64
34/* use 0x800000yy when locked, where yy == CPU number */ 34/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) 57static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
73 return tmp; 73 return tmp;
74} 74}
75 75
76static inline int __raw_spin_trylock(arch_spinlock_t *lock) 76static inline int arch_spin_trylock(arch_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return arch_spin_trylock(lock) == 0; 79 return __arch_spin_trylock(lock) == 0;
80} 80}
81 81
82/* 82/*
@@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *lock);
104#define SHARED_PROCESSOR 0 104#define SHARED_PROCESSOR 0
105#endif 105#endif
106 106
107static inline void __raw_spin_lock(arch_spinlock_t *lock) 107static inline void arch_spin_lock(arch_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
111 if (likely(arch_spin_trylock(lock) == 0)) 111 if (likely(__arch_spin_trylock(lock) == 0))
112 break; 112 break;
113 do { 113 do {
114 HMT_low(); 114 HMT_low();
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
120} 120}
121 121
122static inline 122static inline
123void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 123void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
124{ 124{
125 unsigned long flags_dis; 125 unsigned long flags_dis;
126 126
127 CLEAR_IO_SYNC; 127 CLEAR_IO_SYNC;
128 while (1) { 128 while (1) {
129 if (likely(arch_spin_trylock(lock) == 0)) 129 if (likely(__arch_spin_trylock(lock) == 0))
130 break; 130 break;
131 local_save_flags(flags_dis); 131 local_save_flags(flags_dis);
132 local_irq_restore(flags); 132 local_irq_restore(flags);
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
140 } 140 }
141} 141}
142 142
143static inline void __raw_spin_unlock(arch_spinlock_t *lock) 143static inline void arch_spin_unlock(arch_spinlock_t *lock)
144{ 144{
145 SYNC_IO; 145 SYNC_IO;
146 __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 __asm__ __volatile__("# arch_spin_unlock\n\t"
147 LWSYNC_ON_SMP: : :"memory"); 147 LWSYNC_ON_SMP: : :"memory");
148 lock->slock = 0; 148 lock->slock = 0;
149} 149}
150 150
151#ifdef CONFIG_PPC64 151#ifdef CONFIG_PPC64
152extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); 152extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
153#else 153#else
154#define __raw_spin_unlock_wait(lock) \ 154#define arch_spin_unlock_wait(lock) \
155 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 155 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
156#endif 156#endif
157 157
158/* 158/*
@@ -290,9 +290,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
292 292
293#define _raw_spin_relax(lock) __spin_yield(lock) 293#define arch_spin_relax(lock) __spin_yield(lock)
294#define _raw_read_relax(lock) __rw_yield(lock) 294#define arch_read_relax(lock) __rw_yield(lock)
295#define _raw_write_relax(lock) __rw_yield(lock) 295#define arch_write_relax(lock) __rw_yield(lock)
296 296
297#endif /* __KERNEL__ */ 297#endif /* __KERNEL__ */
298#endif /* __ASM_SPINLOCK_H */ 298#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 57dfa414cfb8..fd0d29493fd6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void)
80 80
81 local_irq_save(flags); 81 local_irq_save(flags);
82 preempt_disable(); 82 preempt_disable();
83 __raw_spin_lock_flags(&rtas.lock, flags); 83 arch_spin_lock_flags(&rtas.lock, flags);
84 return flags; 84 return flags;
85} 85}
86 86
87static void unlock_rtas(unsigned long flags) 87static void unlock_rtas(unsigned long flags)
88{ 88{
89 __raw_spin_unlock(&rtas.lock); 89 arch_spin_unlock(&rtas.lock);
90 local_irq_restore(flags); 90 local_irq_restore(flags);
91 preempt_enable(); 91 preempt_enable();
92} 92}
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void)
987 987
988 local_irq_save(flags); 988 local_irq_save(flags);
989 hard_irq_disable(); 989 hard_irq_disable();
990 __raw_spin_lock(&timebase_lock); 990 arch_spin_lock(&timebase_lock);
991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); 991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
992 timebase = get_tb(); 992 timebase = get_tb();
993 __raw_spin_unlock(&timebase_lock); 993 arch_spin_unlock(&timebase_lock);
994 994
995 while (timebase) 995 while (timebase)
996 barrier(); 996 barrier();
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void)
1002{ 1002{
1003 while (!timebase) 1003 while (!timebase)
1004 barrier(); 1004 barrier();
1005 __raw_spin_lock(&timebase_lock); 1005 arch_spin_lock(&timebase_lock);
1006 set_tb(timebase >> 32, timebase & 0xffffffff); 1006 set_tb(timebase >> 32, timebase & 0xffffffff);
1007 timebase = 0; 1007 timebase = 0;
1008 __raw_spin_unlock(&timebase_lock); 1008 arch_spin_unlock(&timebase_lock);
1009} 1009}
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index b06294cde499..ee395e392115 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
82} 82}
83#endif 83#endif
84 84
85void __raw_spin_unlock_wait(arch_spinlock_t *lock) 85void arch_spin_unlock_wait(arch_spinlock_t *lock)
86{ 86{
87 while (lock->slock) { 87 while (lock->slock) {
88 HMT_low(); 88 HMT_low();
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(arch_spinlock_t *lock)
92 HMT_medium(); 92 HMT_medium();
93} 93}
94 94
95EXPORT_SYMBOL(__raw_spin_unlock_wait); 95EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index be36fece41d7..242f8095c2df 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void)
80 80
81 local_irq_save(flags); 81 local_irq_save(flags);
82 hard_irq_disable(); 82 hard_irq_disable();
83 __raw_spin_lock(&timebase_lock); 83 arch_spin_lock(&timebase_lock);
84 mtspr(SPRN_TBCTL, TBCTL_FREEZE); 84 mtspr(SPRN_TBCTL, TBCTL_FREEZE);
85 isync(); 85 isync();
86 timebase = get_tb(); 86 timebase = get_tb();
87 __raw_spin_unlock(&timebase_lock); 87 arch_spin_unlock(&timebase_lock);
88 88
89 while (timebase) 89 while (timebase)
90 barrier(); 90 barrier();
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void)
97 while (!timebase) 97 while (!timebase)
98 smp_rmb(); 98 smp_rmb();
99 99
100 __raw_spin_lock(&timebase_lock); 100 arch_spin_lock(&timebase_lock);
101 set_tb(timebase >> 32, timebase & 0xffffffff); 101 set_tb(timebase >> 32, timebase & 0xffffffff);
102 timebase = 0; 102 timebase = 0;
103 __raw_spin_unlock(&timebase_lock); 103 arch_spin_unlock(&timebase_lock);
104} 104}
105 105
106struct smp_ops_t pas_smp_ops = { 106struct smp_ops_t pas_smp_ops = {
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 6121fa4b83d9..a94c146657a9 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock,
52 * (the type definitions are in asm/spinlock_types.h) 52 * (the type definitions are in asm/spinlock_types.h)
53 */ 53 */
54 54
55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) 55#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
56#define __raw_spin_unlock_wait(lock) \ 56#define arch_spin_unlock_wait(lock) \
57 do { while (__raw_spin_is_locked(lock)) \ 57 do { while (arch_spin_is_locked(lock)) \
58 _raw_spin_relax(lock); } while (0) 58 arch_spin_relax(lock); } while (0)
59 59
60extern void _raw_spin_lock_wait(arch_spinlock_t *); 60extern void arch_spin_lock_wait(arch_spinlock_t *);
61extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 61extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
62extern int _raw_spin_trylock_retry(arch_spinlock_t *); 62extern int arch_spin_trylock_retry(arch_spinlock_t *);
63extern void _raw_spin_relax(arch_spinlock_t *lock); 63extern void arch_spin_relax(arch_spinlock_t *lock);
64 64
65static inline void __raw_spin_lock(arch_spinlock_t *lp) 65static inline void arch_spin_lock(arch_spinlock_t *lp)
66{ 66{
67 int old; 67 int old;
68 68
69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
70 if (likely(old == 0)) 70 if (likely(old == 0))
71 return; 71 return;
72 _raw_spin_lock_wait(lp); 72 arch_spin_lock_wait(lp);
73} 73}
74 74
75static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, 75static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
76 unsigned long flags) 76 unsigned long flags)
77{ 77{
78 int old; 78 int old;
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *lp,
80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
81 if (likely(old == 0)) 81 if (likely(old == 0))
82 return; 82 return;
83 _raw_spin_lock_wait_flags(lp, flags); 83 arch_spin_lock_wait_flags(lp, flags);
84} 84}
85 85
86static inline int __raw_spin_trylock(arch_spinlock_t *lp) 86static inline int arch_spin_trylock(arch_spinlock_t *lp)
87{ 87{
88 int old; 88 int old;
89 89
90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
91 if (likely(old == 0)) 91 if (likely(old == 0))
92 return 1; 92 return 1;
93 return _raw_spin_trylock_retry(lp); 93 return arch_spin_trylock_retry(lp);
94} 94}
95 95
96static inline void __raw_spin_unlock(arch_spinlock_t *lp) 96static inline void arch_spin_unlock(arch_spinlock_t *lp)
97{ 97{
98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
99} 99}
@@ -188,7 +188,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
188 return _raw_write_trylock_retry(rw); 188 return _raw_write_trylock_retry(rw);
189} 189}
190 190
191#define _raw_read_relax(lock) cpu_relax() 191#define arch_read_relax(lock) cpu_relax()
192#define _raw_write_relax(lock) cpu_relax() 192#define arch_write_relax(lock) cpu_relax()
193 193
194#endif /* __ASM_SPINLOCK_H */ 194#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index d4cbf71a6077..f4596452f072 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
39 _raw_yield(); 39 _raw_yield();
40} 40}
41 41
42void _raw_spin_lock_wait(arch_spinlock_t *lp) 42void arch_spin_lock_wait(arch_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(arch_spinlock_t *lp)
51 _raw_yield_cpu(~owner); 51 _raw_yield_cpu(~owner);
52 count = spin_retry; 52 count = spin_retry;
53 } 53 }
54 if (__raw_spin_is_locked(lp)) 54 if (arch_spin_is_locked(lp))
55 continue; 55 continue;
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 return; 57 return;
58 } 58 }
59} 59}
60EXPORT_SYMBOL(_raw_spin_lock_wait); 60EXPORT_SYMBOL(arch_spin_lock_wait);
61 61
62void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) 62void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63{ 63{
64 int count = spin_retry; 64 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id(); 65 unsigned int cpu = ~smp_processor_id();
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
72 _raw_yield_cpu(~owner); 72 _raw_yield_cpu(~owner);
73 count = spin_retry; 73 count = spin_retry;
74 } 74 }
75 if (__raw_spin_is_locked(lp)) 75 if (arch_spin_is_locked(lp))
76 continue; 76 continue;
77 local_irq_disable(); 77 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
@@ -80,30 +80,30 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
80 local_irq_restore(flags); 80 local_irq_restore(flags);
81 } 81 }
82} 82}
83EXPORT_SYMBOL(_raw_spin_lock_wait_flags); 83EXPORT_SYMBOL(arch_spin_lock_wait_flags);
84 84
85int _raw_spin_trylock_retry(arch_spinlock_t *lp) 85int arch_spin_trylock_retry(arch_spinlock_t *lp)
86{ 86{
87 unsigned int cpu = ~smp_processor_id(); 87 unsigned int cpu = ~smp_processor_id();
88 int count; 88 int count;
89 89
90 for (count = spin_retry; count > 0; count--) { 90 for (count = spin_retry; count > 0; count--) {
91 if (__raw_spin_is_locked(lp)) 91 if (arch_spin_is_locked(lp))
92 continue; 92 continue;
93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
94 return 1; 94 return 1;
95 } 95 }
96 return 0; 96 return 0;
97} 97}
98EXPORT_SYMBOL(_raw_spin_trylock_retry); 98EXPORT_SYMBOL(arch_spin_trylock_retry);
99 99
100void _raw_spin_relax(arch_spinlock_t *lock) 100void arch_spin_relax(arch_spinlock_t *lock)
101{ 101{
102 unsigned int cpu = lock->owner_cpu; 102 unsigned int cpu = lock->owner_cpu;
103 if (cpu != 0) 103 if (cpu != 0)
104 _raw_yield_cpu(~cpu); 104 _raw_yield_cpu(~cpu);
105} 105}
106EXPORT_SYMBOL(_raw_spin_relax); 106EXPORT_SYMBOL(arch_spin_relax);
107 107
108void _raw_read_lock_wait(raw_rwlock_t *rw) 108void _raw_read_lock_wait(raw_rwlock_t *rw)
109{ 109{
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index 5a05b3fcefbe..da1c6491ed4b 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -23,10 +23,10 @@
23 * Your basic SMP spinlocks, allowing only a single CPU anywhere 23 * Your basic SMP spinlocks, allowing only a single CPU anywhere
24 */ 24 */
25 25
26#define __raw_spin_is_locked(x) ((x)->lock <= 0) 26#define arch_spin_is_locked(x) ((x)->lock <= 0)
27#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 27#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
28#define __raw_spin_unlock_wait(x) \ 28#define arch_spin_unlock_wait(x) \
29 do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) 29 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
30 30
31/* 31/*
32 * Simple spin lock operations. There are two variants, one clears IRQ's 32 * Simple spin lock operations. There are two variants, one clears IRQ's
@@ -34,14 +34,14 @@
34 * 34 *
35 * We make no fairness assumptions. They have a cost. 35 * We make no fairness assumptions. They have a cost.
36 */ 36 */
37static inline void __raw_spin_lock(arch_spinlock_t *lock) 37static inline void arch_spin_lock(arch_spinlock_t *lock)
38{ 38{
39 unsigned long tmp; 39 unsigned long tmp;
40 unsigned long oldval; 40 unsigned long oldval;
41 41
42 __asm__ __volatile__ ( 42 __asm__ __volatile__ (
43 "1: \n\t" 43 "1: \n\t"
44 "movli.l @%2, %0 ! __raw_spin_lock \n\t" 44 "movli.l @%2, %0 ! arch_spin_lock \n\t"
45 "mov %0, %1 \n\t" 45 "mov %0, %1 \n\t"
46 "mov #0, %0 \n\t" 46 "mov #0, %0 \n\t"
47 "movco.l %0, @%2 \n\t" 47 "movco.l %0, @%2 \n\t"
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
54 ); 54 );
55} 55}
56 56
57static inline void __raw_spin_unlock(arch_spinlock_t *lock) 57static inline void arch_spin_unlock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp; 59 unsigned long tmp;
60 60
61 __asm__ __volatile__ ( 61 __asm__ __volatile__ (
62 "mov #1, %0 ! __raw_spin_unlock \n\t" 62 "mov #1, %0 ! arch_spin_unlock \n\t"
63 "mov.l %0, @%1 \n\t" 63 "mov.l %0, @%1 \n\t"
64 : "=&z" (tmp) 64 : "=&z" (tmp)
65 : "r" (&lock->lock) 65 : "r" (&lock->lock)
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
67 ); 67 );
68} 68}
69 69
70static inline int __raw_spin_trylock(arch_spinlock_t *lock) 70static inline int arch_spin_trylock(arch_spinlock_t *lock)
71{ 71{
72 unsigned long tmp, oldval; 72 unsigned long tmp, oldval;
73 73
74 __asm__ __volatile__ ( 74 __asm__ __volatile__ (
75 "1: \n\t" 75 "1: \n\t"
76 "movli.l @%2, %0 ! __raw_spin_trylock \n\t" 76 "movli.l @%2, %0 ! arch_spin_trylock \n\t"
77 "mov %0, %1 \n\t" 77 "mov %0, %1 \n\t"
78 "mov #0, %0 \n\t" 78 "mov #0, %0 \n\t"
79 "movco.l %0, @%2 \n\t" 79 "movco.l %0, @%2 \n\t"
@@ -219,8 +219,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
219#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 219#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
220#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 220#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
221 221
222#define _raw_spin_relax(lock) cpu_relax() 222#define arch_spin_relax(lock) cpu_relax()
223#define _raw_read_relax(lock) cpu_relax() 223#define arch_read_relax(lock) cpu_relax()
224#define _raw_write_relax(lock) cpu_relax() 224#define arch_write_relax(lock) cpu_relax()
225 225
226#endif /* __ASM_SH_SPINLOCK_H */ 226#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index b2d8a67f727e..9b0f2f53c81c 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
10 10
11#include <asm/psr.h> 11#include <asm/psr.h>
12 12
13#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 13#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
14 14
15#define __raw_spin_unlock_wait(lock) \ 15#define arch_spin_unlock_wait(lock) \
16 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 16 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
17 17
18static inline void __raw_spin_lock(arch_spinlock_t *lock) 18static inline void arch_spin_lock(arch_spinlock_t *lock)
19{ 19{
20 __asm__ __volatile__( 20 __asm__ __volatile__(
21 "\n1:\n\t" 21 "\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
35 : "g2", "memory", "cc"); 35 : "g2", "memory", "cc");
36} 36}
37 37
38static inline int __raw_spin_trylock(arch_spinlock_t *lock) 38static inline int arch_spin_trylock(arch_spinlock_t *lock)
39{ 39{
40 unsigned int result; 40 unsigned int result;
41 __asm__ __volatile__("ldstub [%1], %0" 41 __asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
45 return (result == 0); 45 return (result == 0);
46} 46}
47 47
48static inline void __raw_spin_unlock(arch_spinlock_t *lock) 48static inline void arch_spin_unlock(arch_spinlock_t *lock)
49{ 49{
50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51} 51}
@@ -176,13 +176,13 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
176 176
177#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 177#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
178 178
179#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 179#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
180#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 180#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
181#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) 181#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
182 182
183#define _raw_spin_relax(lock) cpu_relax() 183#define arch_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax() 184#define arch_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax() 185#define arch_write_relax(lock) cpu_relax()
186 186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) 187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock) 188#define __raw_write_can_lock(rw) (!(rw)->lock)
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 38e16c40efc4..7cf58a2fcda4 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
21 * the spinner sections must be pre-V9 branches. 21 * the spinner sections must be pre-V9 branches.
22 */ 22 */
23 23
24#define __raw_spin_is_locked(lp) ((lp)->lock != 0) 24#define arch_spin_is_locked(lp) ((lp)->lock != 0)
25 25
26#define __raw_spin_unlock_wait(lp) \ 26#define arch_spin_unlock_wait(lp) \
27 do { rmb(); \ 27 do { rmb(); \
28 } while((lp)->lock) 28 } while((lp)->lock)
29 29
30static inline void __raw_spin_lock(arch_spinlock_t *lock) 30static inline void arch_spin_lock(arch_spinlock_t *lock)
31{ 31{
32 unsigned long tmp; 32 unsigned long tmp;
33 33
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
46 : "memory"); 46 : "memory");
47} 47}
48 48
49static inline int __raw_spin_trylock(arch_spinlock_t *lock) 49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{ 50{
51 unsigned long result; 51 unsigned long result;
52 52
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
59 return (result == 0UL); 59 return (result == 0UL);
60} 60}
61 61
62static inline void __raw_spin_unlock(arch_spinlock_t *lock) 62static inline void arch_spin_unlock(arch_spinlock_t *lock)
63{ 63{
64 __asm__ __volatile__( 64 __asm__ __volatile__(
65" stb %%g0, [%0]" 65" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
68 : "memory"); 68 : "memory");
69} 69}
70 70
71static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 71static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
72{ 72{
73 unsigned long tmp1, tmp2; 73 unsigned long tmp1, tmp2;
74 74
@@ -222,9 +222,9 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define __raw_write_can_lock(rw) (!(rw)->lock)
224 224
225#define _raw_spin_relax(lock) cpu_relax() 225#define arch_spin_relax(lock) cpu_relax()
226#define _raw_read_relax(lock) cpu_relax() 226#define arch_read_relax(lock) cpu_relax()
227#define _raw_write_relax(lock) cpu_relax() 227#define arch_write_relax(lock) cpu_relax()
228 228
229#endif /* !(__ASSEMBLY__) */ 229#endif /* !(__ASSEMBLY__) */
230 230
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5655f75f10b7..dd59a85a918f 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
731 731
732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
733 733
734static inline int __raw_spin_is_locked(struct arch_spinlock *lock) 734static inline int arch_spin_is_locked(struct arch_spinlock *lock)
735{ 735{
736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
737} 737}
738 738
739static inline int __raw_spin_is_contended(struct arch_spinlock *lock) 739static inline int arch_spin_is_contended(struct arch_spinlock *lock)
740{ 740{
741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
742} 742}
743#define __raw_spin_is_contended __raw_spin_is_contended 743#define arch_spin_is_contended arch_spin_is_contended
744 744
745static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) 745static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
746{ 746{
747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
748} 748}
749 749
750static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, 750static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
751 unsigned long flags) 751 unsigned long flags)
752{ 752{
753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); 753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
754} 754}
755 755
756static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) 756static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
757{ 757{
758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
759} 759}
760 760
761static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) 761static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
762{ 762{
763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); 763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
764} 764}
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 204b524fcf57..ab9055fd57d9 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
174 174
175#ifndef CONFIG_PARAVIRT_SPINLOCKS 175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176 176
177static inline int __raw_spin_is_locked(arch_spinlock_t *lock) 177static inline int arch_spin_is_locked(arch_spinlock_t *lock)
178{ 178{
179 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
180} 180}
181 181
182static inline int __raw_spin_is_contended(arch_spinlock_t *lock) 182static inline int arch_spin_is_contended(arch_spinlock_t *lock)
183{ 183{
184 return __ticket_spin_is_contended(lock); 184 return __ticket_spin_is_contended(lock);
185} 185}
186#define __raw_spin_is_contended __raw_spin_is_contended 186#define arch_spin_is_contended arch_spin_is_contended
187 187
188static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) 188static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
189{ 189{
190 __ticket_spin_lock(lock); 190 __ticket_spin_lock(lock);
191} 191}
192 192
193static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) 193static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
194{ 194{
195 return __ticket_spin_trylock(lock); 195 return __ticket_spin_trylock(lock);
196} 196}
197 197
198static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) 198static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
199{ 199{
200 __ticket_spin_unlock(lock); 200 __ticket_spin_unlock(lock);
201} 201}
202 202
203static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, 203static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
204 unsigned long flags) 204 unsigned long flags)
205{ 205{
206 __raw_spin_lock(lock); 206 arch_spin_lock(lock);
207} 207}
208 208
209#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 210
211static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) 211static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
212{ 212{
213 while (__raw_spin_is_locked(lock)) 213 while (arch_spin_is_locked(lock))
214 cpu_relax(); 214 cpu_relax();
215} 215}
216 216
@@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
298#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 298#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
299#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 299#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
300 300
301#define _raw_spin_relax(lock) cpu_relax() 301#define arch_spin_relax(lock) cpu_relax()
302#define _raw_read_relax(lock) cpu_relax() 302#define arch_read_relax(lock) cpu_relax()
303#define _raw_write_relax(lock) cpu_relax() 303#define arch_write_relax(lock) cpu_relax()
304 304
305/* The {read|write|spin}_lock() on x86 are full memory barriers. */ 305/* The {read|write|spin}_lock() on x86 are full memory barriers. */
306static inline void smp_mb__after_lock(void) { } 306static inline void smp_mb__after_lock(void) { }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 5b75afac8a38..0a0aa1cec8f1 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void)
207 /* racy, but better than risking deadlock. */ 207 /* racy, but better than risking deadlock. */
208 raw_local_irq_save(flags); 208 raw_local_irq_save(flags);
209 cpu = smp_processor_id(); 209 cpu = smp_processor_id();
210 if (!__raw_spin_trylock(&die_lock)) { 210 if (!arch_spin_trylock(&die_lock)) {
211 if (cpu == die_owner) 211 if (cpu == die_owner)
212 /* nested oops. should stop eventually */; 212 /* nested oops. should stop eventually */;
213 else 213 else
214 __raw_spin_lock(&die_lock); 214 arch_spin_lock(&die_lock);
215 } 215 }
216 die_nest_count++; 216 die_nest_count++;
217 die_owner = cpu; 217 die_owner = cpu;
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
231 die_nest_count--; 231 die_nest_count--;
232 if (!die_nest_count) 232 if (!die_nest_count)
233 /* Nest count reaches zero, release the lock. */ 233 /* Nest count reaches zero, release the lock. */
234 __raw_spin_unlock(&die_lock); 234 arch_spin_unlock(&die_lock);
235 raw_local_irq_restore(flags); 235 raw_local_irq_restore(flags);
236 oops_exit(); 236 oops_exit();
237 237
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index a0f39e090684..676b8c77a976 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -10,7 +10,7 @@
10static inline void 10static inline void
11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
12{ 12{
13 __raw_spin_lock(lock); 13 arch_spin_lock(lock);
14} 14}
15 15
16struct pv_lock_ops pv_lock_ops = { 16struct pv_lock_ops pv_lock_ops = {
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index f1714697a09a..0aa5fed8b9e6 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void)
62 * previous TSC that was measured (possibly on 62 * previous TSC that was measured (possibly on
63 * another CPU) and update the previous TSC timestamp. 63 * another CPU) and update the previous TSC timestamp.
64 */ 64 */
65 __raw_spin_lock(&sync_lock); 65 arch_spin_lock(&sync_lock);
66 prev = last_tsc; 66 prev = last_tsc;
67 rdtsc_barrier(); 67 rdtsc_barrier();
68 now = get_cycles(); 68 now = get_cycles();
69 rdtsc_barrier(); 69 rdtsc_barrier();
70 last_tsc = now; 70 last_tsc = now;
71 __raw_spin_unlock(&sync_lock); 71 arch_spin_unlock(&sync_lock);
72 72
73 /* 73 /*
74 * Be nice every now and then (and also check whether 74 * Be nice every now and then (and also check whether
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void)
87 * we saw a time-warp of the TSC going backwards: 87 * we saw a time-warp of the TSC going backwards:
88 */ 88 */
89 if (unlikely(prev > now)) { 89 if (unlikely(prev > now)) {
90 __raw_spin_lock(&sync_lock); 90 arch_spin_lock(&sync_lock);
91 max_warp = max(max_warp, prev - now); 91 max_warp = max(max_warp, prev - now);
92 nr_warps++; 92 nr_warps++;
93 __raw_spin_unlock(&sync_lock); 93 arch_spin_unlock(&sync_lock);
94 } 94 }
95 } 95 }
96 WARN(!(now-start), 96 WARN(!(now-start),
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index dcf0afad4a7f..ecc44a8e2b44 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -22,12 +22,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
22#define _atomic_spin_lock_irqsave(l,f) do { \ 22#define _atomic_spin_lock_irqsave(l,f) do { \
23 arch_spinlock_t *s = ATOMIC_HASH(l); \ 23 arch_spinlock_t *s = ATOMIC_HASH(l); \
24 local_irq_save(f); \ 24 local_irq_save(f); \
25 __raw_spin_lock(s); \ 25 arch_spin_lock(s); \
26} while(0) 26} while(0)
27 27
28#define _atomic_spin_unlock_irqrestore(l,f) do { \ 28#define _atomic_spin_unlock_irqrestore(l,f) do { \
29 arch_spinlock_t *s = ATOMIC_HASH(l); \ 29 arch_spinlock_t *s = ATOMIC_HASH(l); \
30 __raw_spin_unlock(s); \ 30 arch_spin_unlock(s); \
31 local_irq_restore(f); \ 31 local_irq_restore(f); \
32} while(0) 32} while(0)
33 33
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 5ef7a4c060b5..de3a022489c6 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -14,7 +14,7 @@
14 * linux/spinlock_types.h: 14 * linux/spinlock_types.h:
15 * defines the generic type and initializers 15 * defines the generic type and initializers
16 * 16 *
17 * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code 18 * implementations, mostly inline assembly code
19 * 19 *
20 * (also included on UP-debug builds:) 20 * (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
34 * defines the generic type and initializers 34 * defines the generic type and initializers
35 * 35 *
36 * linux/spinlock_up.h: 36 * linux/spinlock_up.h:
37 * contains the __raw_spin_*()/etc. version of UP 37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt 38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds) 39 * builds)
40 * 40 *
@@ -103,17 +103,17 @@ do { \
103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) 103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
104#endif 104#endif
105 105
106#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 106#define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
107 107
108#ifdef CONFIG_GENERIC_LOCKBREAK 108#ifdef CONFIG_GENERIC_LOCKBREAK
109#define spin_is_contended(lock) ((lock)->break_lock) 109#define spin_is_contended(lock) ((lock)->break_lock)
110#else 110#else
111 111
112#ifdef __raw_spin_is_contended 112#ifdef arch_spin_is_contended
113#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 113#define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
114#else 114#else
115#define spin_is_contended(lock) (((void)(lock), 0)) 115#define spin_is_contended(lock) (((void)(lock), 0))
116#endif /*__raw_spin_is_contended*/ 116#endif /*arch_spin_is_contended*/
117#endif 117#endif
118 118
119/* The lock does not imply full memory barrier. */ 119/* The lock does not imply full memory barrier. */
@@ -125,7 +125,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
125 * spin_unlock_wait - wait until the spinlock gets unlocked 125 * spin_unlock_wait - wait until the spinlock gets unlocked
126 * @lock: the spinlock in question. 126 * @lock: the spinlock in question.
127 */ 127 */
128#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 128#define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
129 129
130#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
131 extern void _raw_spin_lock(spinlock_t *lock); 131 extern void _raw_spin_lock(spinlock_t *lock);
@@ -133,11 +133,11 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
133 extern int _raw_spin_trylock(spinlock_t *lock); 133 extern int _raw_spin_trylock(spinlock_t *lock);
134 extern void _raw_spin_unlock(spinlock_t *lock); 134 extern void _raw_spin_unlock(spinlock_t *lock);
135#else 135#else
136# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 136# define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock)
137# define _raw_spin_lock_flags(lock, flags) \ 137# define _raw_spin_lock_flags(lock, flags) \
138 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 138 arch_spin_lock_flags(&(lock)->raw_lock, *(flags))
139# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 139# define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock)
140# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 140# define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock)
141#endif 141#endif
142 142
143/* 143/*
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 8ee2ac1bf636..1d3bcc3cf7c6 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
18 */ 18 */
19 19
20#ifdef CONFIG_DEBUG_SPINLOCK 20#ifdef CONFIG_DEBUG_SPINLOCK
21#define __raw_spin_is_locked(x) ((x)->slock == 0) 21#define arch_spin_is_locked(x) ((x)->slock == 0)
22 22
23static inline void __raw_spin_lock(arch_spinlock_t *lock) 23static inline void arch_spin_lock(arch_spinlock_t *lock)
24{ 24{
25 lock->slock = 0; 25 lock->slock = 0;
26} 26}
27 27
28static inline void 28static inline void
29__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 29arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
30{ 30{
31 local_irq_save(flags); 31 local_irq_save(flags);
32 lock->slock = 0; 32 lock->slock = 0;
33} 33}
34 34
35static inline int __raw_spin_trylock(arch_spinlock_t *lock) 35static inline int arch_spin_trylock(arch_spinlock_t *lock)
36{ 36{
37 char oldval = lock->slock; 37 char oldval = lock->slock;
38 38
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock)
41 return oldval > 0; 41 return oldval > 0;
42} 42}
43 43
44static inline void __raw_spin_unlock(arch_spinlock_t *lock) 44static inline void arch_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 lock->slock = 1; 46 lock->slock = 1;
47} 47}
@@ -57,20 +57,20 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock)
57#define __raw_write_unlock(lock) do { (void)(lock); } while (0) 57#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
58 58
59#else /* DEBUG_SPINLOCK */ 59#else /* DEBUG_SPINLOCK */
60#define __raw_spin_is_locked(lock) ((void)(lock), 0) 60#define arch_spin_is_locked(lock) ((void)(lock), 0)
61/* for sched.c and kernel_lock.c: */ 61/* for sched.c and kernel_lock.c: */
62# define __raw_spin_lock(lock) do { (void)(lock); } while (0) 62# define arch_spin_lock(lock) do { (void)(lock); } while (0)
63# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 63# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
64# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) 64# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
65# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) 65# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
66#endif /* DEBUG_SPINLOCK */ 66#endif /* DEBUG_SPINLOCK */
67 67
68#define __raw_spin_is_contended(lock) (((void)(lock), 0)) 68#define arch_spin_is_contended(lock) (((void)(lock), 0))
69 69
70#define __raw_read_can_lock(lock) (((void)(lock), 1)) 70#define __raw_read_can_lock(lock) (((void)(lock), 1))
71#define __raw_write_can_lock(lock) (((void)(lock), 1)) 71#define __raw_write_can_lock(lock) (((void)(lock), 1))
72 72
73#define __raw_spin_unlock_wait(lock) \ 73#define arch_spin_unlock_wait(lock) \
74 do { cpu_relax(); } while (__raw_spin_is_locked(lock)) 74 do { cpu_relax(); } while (arch_spin_is_locked(lock))
75 75
76#endif /* __LINUX_SPINLOCK_UP_H */ 76#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 2389e3f85cf6..5feaddcdbe49 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -77,7 +77,7 @@ static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED
77 77
78static int graph_lock(void) 78static int graph_lock(void)
79{ 79{
80 __raw_spin_lock(&lockdep_lock); 80 arch_spin_lock(&lockdep_lock);
81 /* 81 /*
82 * Make sure that if another CPU detected a bug while 82 * Make sure that if another CPU detected a bug while
83 * walking the graph we dont change it (while the other 83 * walking the graph we dont change it (while the other
@@ -85,7 +85,7 @@ static int graph_lock(void)
85 * dropped already) 85 * dropped already)
86 */ 86 */
87 if (!debug_locks) { 87 if (!debug_locks) {
88 __raw_spin_unlock(&lockdep_lock); 88 arch_spin_unlock(&lockdep_lock);
89 return 0; 89 return 0;
90 } 90 }
91 /* prevent any recursions within lockdep from causing deadlocks */ 91 /* prevent any recursions within lockdep from causing deadlocks */
@@ -95,11 +95,11 @@ static int graph_lock(void)
95 95
96static inline int graph_unlock(void) 96static inline int graph_unlock(void)
97{ 97{
98 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) 98 if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
99 return DEBUG_LOCKS_WARN_ON(1); 99 return DEBUG_LOCKS_WARN_ON(1);
100 100
101 current->lockdep_recursion--; 101 current->lockdep_recursion--;
102 __raw_spin_unlock(&lockdep_lock); 102 arch_spin_unlock(&lockdep_lock);
103 return 0; 103 return 0;
104} 104}
105 105
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void)
111{ 111{
112 int ret = debug_locks_off(); 112 int ret = debug_locks_off();
113 113
114 __raw_spin_unlock(&lockdep_lock); 114 arch_spin_unlock(&lockdep_lock);
115 115
116 return ret; 116 return ret;
117} 117}
@@ -1170,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
1170 this.class = class; 1170 this.class = class;
1171 1171
1172 local_irq_save(flags); 1172 local_irq_save(flags);
1173 __raw_spin_lock(&lockdep_lock); 1173 arch_spin_lock(&lockdep_lock);
1174 ret = __lockdep_count_forward_deps(&this); 1174 ret = __lockdep_count_forward_deps(&this);
1175 __raw_spin_unlock(&lockdep_lock); 1175 arch_spin_unlock(&lockdep_lock);
1176 local_irq_restore(flags); 1176 local_irq_restore(flags);
1177 1177
1178 return ret; 1178 return ret;
@@ -1197,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
1197 this.class = class; 1197 this.class = class;
1198 1198
1199 local_irq_save(flags); 1199 local_irq_save(flags);
1200 __raw_spin_lock(&lockdep_lock); 1200 arch_spin_lock(&lockdep_lock);
1201 ret = __lockdep_count_backward_deps(&this); 1201 ret = __lockdep_count_backward_deps(&this);
1202 __raw_spin_unlock(&lockdep_lock); 1202 arch_spin_unlock(&lockdep_lock);
1203 local_irq_restore(flags); 1203 local_irq_restore(flags);
1204 1204
1205 return ret; 1205 return ret;
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 6b2d735846a5..7bebbd15b342 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock)
43 \ 43 \
44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \ 44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \
45 local_irq_save(flags); \ 45 local_irq_save(flags); \
46 __raw_spin_lock(&(lock)->raw_lock); \ 46 arch_spin_lock(&(lock)->raw_lock); \
47 DEBUG_LOCKS_WARN_ON(l->magic != l); \ 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \
48 } while (0) 48 } while (0)
49 49
50#define spin_unlock_mutex(lock, flags) \ 50#define spin_unlock_mutex(lock, flags) \
51 do { \ 51 do { \
52 __raw_spin_unlock(&(lock)->raw_lock); \ 52 arch_spin_unlock(&(lock)->raw_lock); \
53 local_irq_restore(flags); \ 53 local_irq_restore(flags); \
54 preempt_check_resched(); \ 54 preempt_check_resched(); \
55 } while (0) 55 } while (0)
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index e6e136318437..fbb5f8b78357 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -53,7 +53,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \
53 if (!(lock)->break_lock) \ 53 if (!(lock)->break_lock) \
54 (lock)->break_lock = 1; \ 54 (lock)->break_lock = 1; \
55 while (!op##_can_lock(lock) && (lock)->break_lock) \ 55 while (!op##_can_lock(lock) && (lock)->break_lock) \
56 _raw_##op##_relax(&lock->raw_lock); \ 56 arch_##op##_relax(&lock->raw_lock); \
57 } \ 57 } \
58 (lock)->break_lock = 0; \ 58 (lock)->break_lock = 0; \
59} \ 59} \
@@ -73,7 +73,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
73 if (!(lock)->break_lock) \ 73 if (!(lock)->break_lock) \
74 (lock)->break_lock = 1; \ 74 (lock)->break_lock = 1; \
75 while (!op##_can_lock(lock) && (lock)->break_lock) \ 75 while (!op##_can_lock(lock) && (lock)->break_lock) \
76 _raw_##op##_relax(&lock->raw_lock); \ 76 arch_##op##_relax(&lock->raw_lock); \
77 } \ 77 } \
78 (lock)->break_lock = 0; \ 78 (lock)->break_lock = 0; \
79 return flags; \ 79 return flags; \
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index fb7a0fa508b9..f58c9ad15830 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2834 int ret; 2834 int ret;
2835 2835
2836 local_irq_save(flags); 2836 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock); 2837 arch_spin_lock(&cpu_buffer->lock);
2838 2838
2839 again: 2839 again:
2840 /* 2840 /*
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2923 goto again; 2923 goto again;
2924 2924
2925 out: 2925 out:
2926 __raw_spin_unlock(&cpu_buffer->lock); 2926 arch_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags); 2927 local_irq_restore(flags);
2928 2928
2929 return reader; 2929 return reader;
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3286 synchronize_sched(); 3286 synchronize_sched();
3287 3287
3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock); 3289 arch_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter); 3290 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock); 3291 arch_spin_unlock(&cpu_buffer->lock);
3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293 3293
3294 return iter; 3294 return iter;
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out; 3409 goto out;
3410 3410
3411 __raw_spin_lock(&cpu_buffer->lock); 3411 arch_spin_lock(&cpu_buffer->lock);
3412 3412
3413 rb_reset_cpu(cpu_buffer); 3413 rb_reset_cpu(cpu_buffer);
3414 3414
3415 __raw_spin_unlock(&cpu_buffer->lock); 3415 arch_spin_unlock(&cpu_buffer->lock);
3416 3416
3417 out: 3417 out:
3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 63bc1cc38219..bb6b5e7fa2a2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
555 return; 555 return;
556 556
557 WARN_ON_ONCE(!irqs_disabled()); 557 WARN_ON_ONCE(!irqs_disabled());
558 __raw_spin_lock(&ftrace_max_lock); 558 arch_spin_lock(&ftrace_max_lock);
559 559
560 tr->buffer = max_tr.buffer; 560 tr->buffer = max_tr.buffer;
561 max_tr.buffer = buf; 561 max_tr.buffer = buf;
562 562
563 __update_max_tr(tr, tsk, cpu); 563 __update_max_tr(tr, tsk, cpu);
564 __raw_spin_unlock(&ftrace_max_lock); 564 arch_spin_unlock(&ftrace_max_lock);
565} 565}
566 566
567/** 567/**
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
581 return; 581 return;
582 582
583 WARN_ON_ONCE(!irqs_disabled()); 583 WARN_ON_ONCE(!irqs_disabled());
584 __raw_spin_lock(&ftrace_max_lock); 584 arch_spin_lock(&ftrace_max_lock);
585 585
586 ftrace_disable_cpu(); 586 ftrace_disable_cpu();
587 587
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
604 604
605 __update_max_tr(tr, tsk, cpu); 605 __update_max_tr(tr, tsk, cpu);
606 __raw_spin_unlock(&ftrace_max_lock); 606 arch_spin_unlock(&ftrace_max_lock);
607} 607}
608#endif /* CONFIG_TRACER_MAX_TRACE */ 608#endif /* CONFIG_TRACER_MAX_TRACE */
609 609
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
915 * nor do we want to disable interrupts, 915 * nor do we want to disable interrupts,
916 * so if we miss here, then better luck next time. 916 * so if we miss here, then better luck next time.
917 */ 917 */
918 if (!__raw_spin_trylock(&trace_cmdline_lock)) 918 if (!arch_spin_trylock(&trace_cmdline_lock))
919 return; 919 return;
920 920
921 idx = map_pid_to_cmdline[tsk->pid]; 921 idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
940 940
941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
942 942
943 __raw_spin_unlock(&trace_cmdline_lock); 943 arch_spin_unlock(&trace_cmdline_lock);
944} 944}
945 945
946void trace_find_cmdline(int pid, char comm[]) 946void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[])
958 } 958 }
959 959
960 preempt_disable(); 960 preempt_disable();
961 __raw_spin_lock(&trace_cmdline_lock); 961 arch_spin_lock(&trace_cmdline_lock);
962 map = map_pid_to_cmdline[pid]; 962 map = map_pid_to_cmdline[pid];
963 if (map != NO_CMDLINE_MAP) 963 if (map != NO_CMDLINE_MAP)
964 strcpy(comm, saved_cmdlines[map]); 964 strcpy(comm, saved_cmdlines[map]);
965 else 965 else
966 strcpy(comm, "<...>"); 966 strcpy(comm, "<...>");
967 967
968 __raw_spin_unlock(&trace_cmdline_lock); 968 arch_spin_unlock(&trace_cmdline_lock);
969 preempt_enable(); 969 preempt_enable();
970} 970}
971 971
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1283 1283
1284 /* Lockdep uses trace_printk for lock tracing */ 1284 /* Lockdep uses trace_printk for lock tracing */
1285 local_irq_save(flags); 1285 local_irq_save(flags);
1286 __raw_spin_lock(&trace_buf_lock); 1286 arch_spin_lock(&trace_buf_lock);
1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1288 1288
1289 if (len > TRACE_BUF_SIZE || len < 0) 1289 if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1304 ring_buffer_unlock_commit(buffer, event); 1304 ring_buffer_unlock_commit(buffer, event);
1305 1305
1306out_unlock: 1306out_unlock:
1307 __raw_spin_unlock(&trace_buf_lock); 1307 arch_spin_unlock(&trace_buf_lock);
1308 local_irq_restore(flags); 1308 local_irq_restore(flags);
1309 1309
1310out: 1310out:
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr,
1360 1360
1361 pause_graph_tracing(); 1361 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1363 arch_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1365 1365
1366 size = sizeof(*entry) + len + 1; 1366 size = sizeof(*entry) + len + 1;
@@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr,
1378 ring_buffer_unlock_commit(buffer, event); 1378 ring_buffer_unlock_commit(buffer, event);
1379 1379
1380 out_unlock: 1380 out_unlock:
1381 __raw_spin_unlock(&trace_buf_lock); 1381 arch_spin_unlock(&trace_buf_lock);
1382 raw_local_irq_restore(irq_flags); 1382 raw_local_irq_restore(irq_flags);
1383 unpause_graph_tracing(); 1383 unpause_graph_tracing();
1384 out: 1384 out:
@@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2279 mutex_lock(&tracing_cpumask_update_lock); 2279 mutex_lock(&tracing_cpumask_update_lock);
2280 2280
2281 local_irq_disable(); 2281 local_irq_disable();
2282 __raw_spin_lock(&ftrace_max_lock); 2282 arch_spin_lock(&ftrace_max_lock);
2283 for_each_tracing_cpu(cpu) { 2283 for_each_tracing_cpu(cpu) {
2284 /* 2284 /*
2285 * Increase/decrease the disabled counter if we are 2285 * Increase/decrease the disabled counter if we are
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2294 atomic_dec(&global_trace.data[cpu]->disabled); 2294 atomic_dec(&global_trace.data[cpu]->disabled);
2295 } 2295 }
2296 } 2296 }
2297 __raw_spin_unlock(&ftrace_max_lock); 2297 arch_spin_unlock(&ftrace_max_lock);
2298 local_irq_enable(); 2298 local_irq_enable();
2299 2299
2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing)
4318 4318
4319 /* only one dump */ 4319 /* only one dump */
4320 local_irq_save(flags); 4320 local_irq_save(flags);
4321 __raw_spin_lock(&ftrace_dump_lock); 4321 arch_spin_lock(&ftrace_dump_lock);
4322 if (dump_ran) 4322 if (dump_ran)
4323 goto out; 4323 goto out;
4324 4324
@@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing)
4393 } 4393 }
4394 4394
4395 out: 4395 out:
4396 __raw_spin_unlock(&ftrace_dump_lock); 4396 arch_spin_unlock(&ftrace_dump_lock);
4397 local_irq_restore(flags); 4397 local_irq_restore(flags);
4398} 4398}
4399 4399
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 433e2eda2d01..84a3a7ba072a 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void)
94 if (unlikely(in_nmi())) 94 if (unlikely(in_nmi()))
95 goto out; 95 goto out;
96 96
97 __raw_spin_lock(&trace_clock_struct.lock); 97 arch_spin_lock(&trace_clock_struct.lock);
98 98
99 /* 99 /*
100 * TODO: if this happens often then maybe we should reset 100 * TODO: if this happens often then maybe we should reset
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void)
106 106
107 trace_clock_struct.prev_time = now; 107 trace_clock_struct.prev_time = now;
108 108
109 __raw_spin_unlock(&trace_clock_struct.lock); 109 arch_spin_unlock(&trace_clock_struct.lock);
110 110
111 out: 111 out:
112 raw_local_irq_restore(flags); 112 raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e347853564e9..0271742abb8d 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
143 goto out; 143 goto out;
144 144
145 local_irq_save(flags); 145 local_irq_save(flags);
146 __raw_spin_lock(&wakeup_lock); 146 arch_spin_lock(&wakeup_lock);
147 147
148 /* We could race with grabbing wakeup_lock */ 148 /* We could race with grabbing wakeup_lock */
149 if (unlikely(!tracer_enabled || next != wakeup_task)) 149 if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
169 169
170out_unlock: 170out_unlock:
171 __wakeup_reset(wakeup_trace); 171 __wakeup_reset(wakeup_trace);
172 __raw_spin_unlock(&wakeup_lock); 172 arch_spin_unlock(&wakeup_lock);
173 local_irq_restore(flags); 173 local_irq_restore(flags);
174out: 174out:
175 atomic_dec(&wakeup_trace->data[cpu]->disabled); 175 atomic_dec(&wakeup_trace->data[cpu]->disabled);
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr)
193 tracing_reset_online_cpus(tr); 193 tracing_reset_online_cpus(tr);
194 194
195 local_irq_save(flags); 195 local_irq_save(flags);
196 __raw_spin_lock(&wakeup_lock); 196 arch_spin_lock(&wakeup_lock);
197 __wakeup_reset(tr); 197 __wakeup_reset(tr);
198 __raw_spin_unlock(&wakeup_lock); 198 arch_spin_unlock(&wakeup_lock);
199 local_irq_restore(flags); 199 local_irq_restore(flags);
200} 200}
201 201
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
225 goto out; 225 goto out;
226 226
227 /* interrupts should be off from try_to_wake_up */ 227 /* interrupts should be off from try_to_wake_up */
228 __raw_spin_lock(&wakeup_lock); 228 arch_spin_lock(&wakeup_lock);
229 229
230 /* check for races. */ 230 /* check for races. */
231 if (!tracer_enabled || p->prio >= wakeup_prio) 231 if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
256 256
257out_locked: 257out_locked:
258 __raw_spin_unlock(&wakeup_lock); 258 arch_spin_unlock(&wakeup_lock);
259out: 259out:
260 atomic_dec(&wakeup_trace->data[cpu]->disabled); 260 atomic_dec(&wakeup_trace->data[cpu]->disabled);
261} 261}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index dc98309e839a..280fea470d67 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
67 67
68 /* Don't allow flipping of max traces now */ 68 /* Don't allow flipping of max traces now */
69 local_irq_save(flags); 69 local_irq_save(flags);
70 __raw_spin_lock(&ftrace_max_lock); 70 arch_spin_lock(&ftrace_max_lock);
71 71
72 cnt = ring_buffer_entries(tr->buffer); 72 cnt = ring_buffer_entries(tr->buffer);
73 73
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
85 break; 85 break;
86 } 86 }
87 tracing_on(); 87 tracing_on();
88 __raw_spin_unlock(&ftrace_max_lock); 88 arch_spin_unlock(&ftrace_max_lock);
89 local_irq_restore(flags); 89 local_irq_restore(flags);
90 90
91 if (count) 91 if (count)
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 728c35221483..678a5120ee30 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -54,7 +54,7 @@ static inline void check_stack(void)
54 return; 54 return;
55 55
56 local_irq_save(flags); 56 local_irq_save(flags);
57 __raw_spin_lock(&max_stack_lock); 57 arch_spin_lock(&max_stack_lock);
58 58
59 /* a race could have already updated it */ 59 /* a race could have already updated it */
60 if (this_size <= max_stack_size) 60 if (this_size <= max_stack_size)
@@ -103,7 +103,7 @@ static inline void check_stack(void)
103 } 103 }
104 104
105 out: 105 out:
106 __raw_spin_unlock(&max_stack_lock); 106 arch_spin_unlock(&max_stack_lock);
107 local_irq_restore(flags); 107 local_irq_restore(flags);
108} 108}
109 109
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
171 return ret; 171 return ret;
172 172
173 local_irq_save(flags); 173 local_irq_save(flags);
174 __raw_spin_lock(&max_stack_lock); 174 arch_spin_lock(&max_stack_lock);
175 *ptr = val; 175 *ptr = val;
176 __raw_spin_unlock(&max_stack_lock); 176 arch_spin_unlock(&max_stack_lock);
177 local_irq_restore(flags); 177 local_irq_restore(flags);
178 178
179 return count; 179 return count;
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
207static void *t_start(struct seq_file *m, loff_t *pos) 207static void *t_start(struct seq_file *m, loff_t *pos)
208{ 208{
209 local_irq_disable(); 209 local_irq_disable();
210 __raw_spin_lock(&max_stack_lock); 210 arch_spin_lock(&max_stack_lock);
211 211
212 if (*pos == 0) 212 if (*pos == 0)
213 return SEQ_START_TOKEN; 213 return SEQ_START_TOKEN;
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
217 217
218static void t_stop(struct seq_file *m, void *p) 218static void t_stop(struct seq_file *m, void *p)
219{ 219{
220 __raw_spin_unlock(&max_stack_lock); 220 arch_spin_unlock(&max_stack_lock);
221 local_irq_enable(); 221 local_irq_enable();
222} 222}
223 223
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index f73004137141..1304fe094546 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
106 106
107 for (;;) { 107 for (;;) {
108 for (i = 0; i < loops; i++) { 108 for (i = 0; i < loops; i++) {
109 if (__raw_spin_trylock(&lock->raw_lock)) 109 if (arch_spin_trylock(&lock->raw_lock))
110 return; 110 return;
111 __delay(1); 111 __delay(1);
112 } 112 }
@@ -128,14 +128,14 @@ static void __spin_lock_debug(spinlock_t *lock)
128void _raw_spin_lock(spinlock_t *lock) 128void _raw_spin_lock(spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
132 __spin_lock_debug(lock); 132 __spin_lock_debug(lock);
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(spinlock_t *lock) 136int _raw_spin_trylock(spinlock_t *lock)
137{ 137{
138 int ret = __raw_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
140 if (ret) 140 if (ret)
141 debug_spin_lock_after(lock); 141 debug_spin_lock_after(lock);
@@ -151,7 +151,7 @@ int _raw_spin_trylock(spinlock_t *lock)
151void _raw_spin_unlock(spinlock_t *lock) 151void _raw_spin_unlock(spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 __raw_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
155} 155}
156 156
157static void rwlock_bug(rwlock_t *lock, const char *msg) 157static void rwlock_bug(rwlock_t *lock, const char *msg)