aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/spinlock.h6
-rw-r--r--arch/alpha/include/asm/spinlock_types.h2
-rw-r--r--arch/arm/include/asm/spinlock.h6
-rw-r--r--arch/arm/include/asm/spinlock_types.h2
-rw-r--r--arch/blackfin/include/asm/spinlock.h10
-rw-r--r--arch/blackfin/include/asm/spinlock_types.h2
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h12
-rw-r--r--arch/ia64/include/asm/spinlock.h26
-rw-r--r--arch/ia64/include/asm/spinlock_types.h2
-rw-r--r--arch/m32r/include/asm/spinlock.h6
-rw-r--r--arch/m32r/include/asm/spinlock_types.h2
-rw-r--r--arch/mips/include/asm/spinlock.h10
-rw-r--r--arch/mips/include/asm/spinlock_types.h2
-rw-r--r--arch/parisc/include/asm/atomic.h6
-rw-r--r--arch/parisc/include/asm/spinlock.h8
-rw-r--r--arch/parisc/include/asm/spinlock_types.h4
-rw-r--r--arch/parisc/lib/bitops.c2
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h14
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h2
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/powerpc/lib/locks.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/s390/include/asm/spinlock.h16
-rw-r--r--arch/s390/include/asm/spinlock_types.h2
-rw-r--r--arch/s390/lib/spinlock.c8
-rw-r--r--arch/sh/include/asm/spinlock.h6
-rw-r--r--arch/sh/include/asm/spinlock_types.h2
-rw-r--r--arch/sparc/include/asm/spinlock_32.h6
-rw-r--r--arch/sparc/include/asm/spinlock_64.h8
-rw-r--r--arch/sparc/include/asm/spinlock_types.h2
-rw-r--r--arch/x86/include/asm/paravirt.h12
-rw-r--r--arch/x86/include/asm/paravirt_types.h14
-rw-r--r--arch/x86/include/asm/spinlock.h30
-rw-r--r--arch/x86/include/asm/spinlock_types.h4
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c2
-rw-r--r--arch/x86/xen/spinlock.c16
-rw-r--r--include/asm-generic/bitops/atomic.h6
-rw-r--r--include/linux/spinlock.h4
-rw-r--r--include/linux/spinlock_types.h2
-rw-r--r--include/linux/spinlock_types_up.h4
-rw-r--r--include/linux/spinlock_up.h8
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c18
-rw-r--r--kernel/trace/trace_clock.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--lib/spinlock_debug.c2
51 files changed, 164 insertions, 164 deletions
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index e38fb95cb335..bdb26a1940b4 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -17,13 +17,13 @@
17#define __raw_spin_unlock_wait(x) \ 17#define __raw_spin_unlock_wait(x) \
18 do { cpu_relax(); } while ((x)->lock) 18 do { cpu_relax(); } while ((x)->lock)
19 19
20static inline void __raw_spin_unlock(raw_spinlock_t * lock) 20static inline void __raw_spin_unlock(arch_spinlock_t * lock)
21{ 21{
22 mb(); 22 mb();
23 lock->lock = 0; 23 lock->lock = 0;
24} 24}
25 25
26static inline void __raw_spin_lock(raw_spinlock_t * lock) 26static inline void __raw_spin_lock(arch_spinlock_t * lock)
27{ 27{
28 long tmp; 28 long tmp;
29 29
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
43 : "m"(lock->lock) : "memory"); 43 : "m"(lock->lock) : "memory");
44} 44}
45 45
46static inline int __raw_spin_trylock(raw_spinlock_t *lock) 46static inline int __raw_spin_trylock(arch_spinlock_t *lock)
47{ 47{
48 return !test_and_set_bit(0, &lock->lock); 48 return !test_and_set_bit(0, &lock->lock);
49} 49}
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 8141eb5ebf0d..bb94a51e53d2 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 13
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c13681ac1ede..4e7712ee9394 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -23,7 +23,7 @@
23 23
24#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 24#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
25 25
26static inline void __raw_spin_lock(raw_spinlock_t *lock) 26static inline void __raw_spin_lock(arch_spinlock_t *lock)
27{ 27{
28 unsigned long tmp; 28 unsigned long tmp;
29 29
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
43 smp_mb(); 43 smp_mb();
44} 44}
45 45
46static inline int __raw_spin_trylock(raw_spinlock_t *lock) 46static inline int __raw_spin_trylock(arch_spinlock_t *lock)
47{ 47{
48 unsigned long tmp; 48 unsigned long tmp;
49 49
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
63 } 63 }
64} 64}
65 65
66static inline void __raw_spin_unlock(raw_spinlock_t *lock) 66static inline void __raw_spin_unlock(arch_spinlock_t *lock)
67{ 67{
68 smp_mb(); 68 smp_mb();
69 69
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 43e83f6d2ee5..5e9d3eadd167 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 13
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index b0c7f0ee4b03..fc16b4c5309b 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
24asmlinkage int __raw_write_trylock_asm(volatile int *ptr); 24asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
25asmlinkage void __raw_write_unlock_asm(volatile int *ptr); 25asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
26 26
27static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 27static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
28{ 28{
29 return __raw_spin_is_locked_asm(&lock->lock); 29 return __raw_spin_is_locked_asm(&lock->lock);
30} 30}
31 31
32static inline void __raw_spin_lock(raw_spinlock_t *lock) 32static inline void __raw_spin_lock(arch_spinlock_t *lock)
33{ 33{
34 __raw_spin_lock_asm(&lock->lock); 34 __raw_spin_lock_asm(&lock->lock);
35} 35}
36 36
37#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 37#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
38 38
39static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39static inline int __raw_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 return __raw_spin_trylock_asm(&lock->lock); 41 return __raw_spin_trylock_asm(&lock->lock);
42} 42}
43 43
44static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44static inline void __raw_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 __raw_spin_unlock_asm(&lock->lock); 46 __raw_spin_unlock_asm(&lock->lock);
47} 47}
48 48
49static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 49static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
50{ 50{
51 while (__raw_spin_is_locked(lock)) 51 while (__raw_spin_is_locked(lock))
52 cpu_relax(); 52 cpu_relax();
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
index be75762c0610..03b377abf5c0 100644
--- a/arch/blackfin/include/asm/spinlock_types.h
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -15,7 +15,7 @@
15 15
16typedef struct { 16typedef struct {
17 volatile unsigned int lock; 17 volatile unsigned int lock;
18} raw_spinlock_t; 18} arch_spinlock_t;
19 19
20#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 20#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
21 21
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index 367a53ea10c5..e253457765f2 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
9extern void cris_spin_lock(void *l); 9extern void cris_spin_lock(void *l);
10extern int cris_spin_trylock(void *l); 10extern int cris_spin_trylock(void *l);
11 11
12static inline int __raw_spin_is_locked(raw_spinlock_t *x) 12static inline int __raw_spin_is_locked(arch_spinlock_t *x)
13{ 13{
14 return *(volatile signed char *)(&(x)->slock) <= 0; 14 return *(volatile signed char *)(&(x)->slock) <= 0;
15} 15}
16 16
17static inline void __raw_spin_unlock(raw_spinlock_t *lock) 17static inline void __raw_spin_unlock(arch_spinlock_t *lock)
18{ 18{
19 __asm__ volatile ("move.d %1,%0" \ 19 __asm__ volatile ("move.d %1,%0" \
20 : "=m" (lock->slock) \ 20 : "=m" (lock->slock) \
@@ -22,24 +22,24 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
22 : "memory"); 22 : "memory");
23} 23}
24 24
25static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 25static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
26{ 26{
27 while (__raw_spin_is_locked(lock)) 27 while (__raw_spin_is_locked(lock))
28 cpu_relax(); 28 cpu_relax();
29} 29}
30 30
31static inline int __raw_spin_trylock(raw_spinlock_t *lock) 31static inline int __raw_spin_trylock(arch_spinlock_t *lock)
32{ 32{
33 return cris_spin_trylock((void *)&lock->slock); 33 return cris_spin_trylock((void *)&lock->slock);
34} 34}
35 35
36static inline void __raw_spin_lock(raw_spinlock_t *lock) 36static inline void __raw_spin_lock(arch_spinlock_t *lock)
37{ 37{
38 cris_spin_lock((void *)&lock->slock); 38 cris_spin_lock((void *)&lock->slock);
39} 39}
40 40
41static inline void 41static inline void
42__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 42__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
43{ 43{
44 __raw_spin_lock(lock); 44 __raw_spin_lock(lock);
45} 45}
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 239ecdc9516d..9fbdf7e61087 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -38,7 +38,7 @@
38#define TICKET_BITS 15 38#define TICKET_BITS 15
39#define TICKET_MASK ((1 << TICKET_BITS) - 1) 39#define TICKET_MASK ((1 << TICKET_BITS) - 1)
40 40
41static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 41static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
42{ 42{
43 int *p = (int *)&lock->lock, ticket, serve; 43 int *p = (int *)&lock->lock, ticket, serve;
44 44
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
58 } 58 }
59} 59}
60 60
61static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 61static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
62{ 62{
63 int tmp = ACCESS_ONCE(lock->lock); 63 int tmp = ACCESS_ONCE(lock->lock);
64 64
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
67 return 0; 67 return 0;
68} 68}
69 69
70static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 70static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
71{ 71{
72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; 72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
73 73
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
75 ACCESS_ONCE(*p) = (tmp + 2) & ~1; 75 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
76} 76}
77 77
78static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) 78static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
79{ 79{
80 int *p = (int *)&lock->lock, ticket; 80 int *p = (int *)&lock->lock, ticket;
81 81
@@ -89,53 +89,53 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
89 } 89 }
90} 90}
91 91
92static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 92static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
93{ 93{
94 long tmp = ACCESS_ONCE(lock->lock); 94 long tmp = ACCESS_ONCE(lock->lock);
95 95
96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); 96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
97} 97}
98 98
99static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 99static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
100{ 100{
101 long tmp = ACCESS_ONCE(lock->lock); 101 long tmp = ACCESS_ONCE(lock->lock);
102 102
103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
104} 104}
105 105
106static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 106static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
107{ 107{
108 return __ticket_spin_is_locked(lock); 108 return __ticket_spin_is_locked(lock);
109} 109}
110 110
111static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 111static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
112{ 112{
113 return __ticket_spin_is_contended(lock); 113 return __ticket_spin_is_contended(lock);
114} 114}
115#define __raw_spin_is_contended __raw_spin_is_contended 115#define __raw_spin_is_contended __raw_spin_is_contended
116 116
117static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 117static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
118{ 118{
119 __ticket_spin_lock(lock); 119 __ticket_spin_lock(lock);
120} 120}
121 121
122static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 122static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
123{ 123{
124 return __ticket_spin_trylock(lock); 124 return __ticket_spin_trylock(lock);
125} 125}
126 126
127static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 127static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
128{ 128{
129 __ticket_spin_unlock(lock); 129 __ticket_spin_unlock(lock);
130} 130}
131 131
132static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 132static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 __raw_spin_lock(lock); 135 __raw_spin_lock(lock);
136} 136}
137 137
138static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
139{ 139{
140 __ticket_spin_unlock_wait(lock); 140 __ticket_spin_unlock_wait(lock);
141} 141}
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4a..447ccc6ca7a8 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 13
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index dded923883b2..0c0164225bc0 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -36,7 +36,7 @@
36 * __raw_spin_trylock() tries to get the lock and returns a result. 36 * __raw_spin_trylock() tries to get the lock and returns a result.
37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure). 37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
38 */ 38 */
39static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39static inline int __raw_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 int oldval; 41 int oldval;
42 unsigned long tmp1, tmp2; 42 unsigned long tmp1, tmp2;
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
69 return (oldval > 0); 69 return (oldval > 0);
70} 70}
71 71
72static inline void __raw_spin_lock(raw_spinlock_t *lock) 72static inline void __raw_spin_lock(arch_spinlock_t *lock)
73{ 73{
74 unsigned long tmp0, tmp1; 74 unsigned long tmp0, tmp1;
75 75
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
111 ); 111 );
112} 112}
113 113
114static inline void __raw_spin_unlock(raw_spinlock_t *lock) 114static inline void __raw_spin_unlock(arch_spinlock_t *lock)
115{ 115{
116 mb(); 116 mb();
117 lock->slock = 1; 117 lock->slock = 1;
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
index 83f52105c0e4..17d15bd6322d 100644
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
7 7
8typedef struct { 8typedef struct {
9 volatile int slock; 9 volatile int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
13 13
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5b60a09a0f08..0f16d0673b4a 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -34,7 +34,7 @@
34 * becomes equal to the the initial value of the tail. 34 * becomes equal to the the initial value of the tail.
35 */ 35 */
36 36
37static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 37static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
38{ 38{
39 unsigned int counters = ACCESS_ONCE(lock->lock); 39 unsigned int counters = ACCESS_ONCE(lock->lock);
40 40
@@ -45,7 +45,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
45#define __raw_spin_unlock_wait(x) \ 45#define __raw_spin_unlock_wait(x) \
46 while (__raw_spin_is_locked(x)) { cpu_relax(); } 46 while (__raw_spin_is_locked(x)) { cpu_relax(); }
47 47
48static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 48static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
49{ 49{
50 unsigned int counters = ACCESS_ONCE(lock->lock); 50 unsigned int counters = ACCESS_ONCE(lock->lock);
51 51
@@ -53,7 +53,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
53} 53}
54#define __raw_spin_is_contended __raw_spin_is_contended 54#define __raw_spin_is_contended __raw_spin_is_contended
55 55
56static inline void __raw_spin_lock(raw_spinlock_t *lock) 56static inline void __raw_spin_lock(arch_spinlock_t *lock)
57{ 57{
58 int my_ticket; 58 int my_ticket;
59 int tmp; 59 int tmp;
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
134 smp_llsc_mb(); 134 smp_llsc_mb();
135} 135}
136 136
137static inline void __raw_spin_unlock(raw_spinlock_t *lock) 137static inline void __raw_spin_unlock(arch_spinlock_t *lock)
138{ 138{
139 int tmp; 139 int tmp;
140 140
@@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
174 } 174 }
175} 175}
176 176
177static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) 177static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
178{ 178{
179 int tmp, tmp2, tmp3; 179 int tmp, tmp2, tmp3;
180 180
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index adeedaa116c1..2e1060892d3b 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -12,7 +12,7 @@ typedef struct {
12 * bits 15..28: ticket 12 * bits 15..28: ticket
13 */ 13 */
14 unsigned int lock; 14 unsigned int lock;
15} raw_spinlock_t; 15} arch_spinlock_t;
16 16
17#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 17#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
18 18
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 8bc9e96699b2..3a4ea778d4b6 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -27,18 +27,18 @@
27# define ATOMIC_HASH_SIZE 4 27# define ATOMIC_HASH_SIZE 4
28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29 29
30extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 30extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31 31
32/* Can't use raw_spin_lock_irq because of #include problems, so 32/* Can't use raw_spin_lock_irq because of #include problems, so
33 * this is the substitute */ 33 * this is the substitute */
34#define _atomic_spin_lock_irqsave(l,f) do { \ 34#define _atomic_spin_lock_irqsave(l,f) do { \
35 raw_spinlock_t *s = ATOMIC_HASH(l); \ 35 arch_spinlock_t *s = ATOMIC_HASH(l); \
36 local_irq_save(f); \ 36 local_irq_save(f); \
37 __raw_spin_lock(s); \ 37 __raw_spin_lock(s); \
38} while(0) 38} while(0)
39 39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \ 40#define _atomic_spin_unlock_irqrestore(l,f) do { \
41 raw_spinlock_t *s = ATOMIC_HASH(l); \ 41 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 __raw_spin_unlock(s); \ 42 __raw_spin_unlock(s); \
43 local_irq_restore(f); \ 43 local_irq_restore(f); \
44} while(0) 44} while(0)
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fae03e136fa8..69e8dca26744 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,7 +5,7 @@
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/spinlock_types.h> 6#include <asm/spinlock_types.h>
7 7
8static inline int __raw_spin_is_locked(raw_spinlock_t *x) 8static inline int __raw_spin_is_locked(arch_spinlock_t *x)
9{ 9{
10 volatile unsigned int *a = __ldcw_align(x); 10 volatile unsigned int *a = __ldcw_align(x);
11 return *a == 0; 11 return *a == 0;
@@ -15,7 +15,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
15#define __raw_spin_unlock_wait(x) \ 15#define __raw_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (__raw_spin_is_locked(x))
17 17
18static inline void __raw_spin_lock_flags(raw_spinlock_t *x, 18static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
19 unsigned long flags) 19 unsigned long flags)
20{ 20{
21 volatile unsigned int *a; 21 volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
33 mb(); 33 mb();
34} 34}
35 35
36static inline void __raw_spin_unlock(raw_spinlock_t *x) 36static inline void __raw_spin_unlock(arch_spinlock_t *x)
37{ 37{
38 volatile unsigned int *a; 38 volatile unsigned int *a;
39 mb(); 39 mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
42 mb(); 42 mb();
43} 43}
44 44
45static inline int __raw_spin_trylock(raw_spinlock_t *x) 45static inline int __raw_spin_trylock(arch_spinlock_t *x)
46{ 46{
47 volatile unsigned int *a; 47 volatile unsigned int *a;
48 int ret; 48 int ret;
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 3f72f47cf4b2..735caafb81f5 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -9,10 +9,10 @@ typedef struct {
9 volatile unsigned int lock[4]; 9 volatile unsigned int lock[4];
10# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } 10# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
11#endif 11#endif
12} raw_spinlock_t; 12} arch_spinlock_t;
13 13
14typedef struct { 14typedef struct {
15 raw_spinlock_t lock; 15 arch_spinlock_t lock;
16 volatile int counter; 16 volatile int counter;
17} raw_rwlock_t; 17} raw_rwlock_t;
18 18
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index e3eb739fab19..fdd7f583de54 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -12,7 +12,7 @@
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13 13
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { 15arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
16 [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED 16 [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
17}; 17};
18#endif 18#endif
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 168fce726201..20de73c36682 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
58 unsigned long entry; /* physical address pointer */ 58 unsigned long entry; /* physical address pointer */
59 unsigned long base; /* physical address pointer */ 59 unsigned long base; /* physical address pointer */
60 unsigned long size; 60 unsigned long size;
61 raw_spinlock_t lock; 61 arch_spinlock_t lock;
62 struct rtas_args args; 62 struct rtas_args args;
63 struct device_node *dev; /* virtual address pointer */ 63 struct device_node *dev; /* virtual address pointer */
64}; 64};
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 198266cf9e2d..c0d44c92ff0e 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) 57static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -73,7 +73,7 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
73 return tmp; 73 return tmp;
74} 74}
75 75
76static inline int __raw_spin_trylock(raw_spinlock_t *lock) 76static inline int __raw_spin_trylock(arch_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return arch_spin_trylock(lock) == 0; 79 return arch_spin_trylock(lock) == 0;
@@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
96#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 96#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
97/* We only yield to the hypervisor if we are in shared processor mode */ 97/* We only yield to the hypervisor if we are in shared processor mode */
98#define SHARED_PROCESSOR (get_lppaca()->shared_proc) 98#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
99extern void __spin_yield(raw_spinlock_t *lock); 99extern void __spin_yield(arch_spinlock_t *lock);
100extern void __rw_yield(raw_rwlock_t *lock); 100extern void __rw_yield(raw_rwlock_t *lock);
101#else /* SPLPAR || ISERIES */ 101#else /* SPLPAR || ISERIES */
102#define __spin_yield(x) barrier() 102#define __spin_yield(x) barrier()
@@ -104,7 +104,7 @@ extern void __rw_yield(raw_rwlock_t *lock);
104#define SHARED_PROCESSOR 0 104#define SHARED_PROCESSOR 0
105#endif 105#endif
106 106
107static inline void __raw_spin_lock(raw_spinlock_t *lock) 107static inline void __raw_spin_lock(arch_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
@@ -120,7 +120,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
120} 120}
121 121
122static inline 122static inline
123void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 123void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
124{ 124{
125 unsigned long flags_dis; 125 unsigned long flags_dis;
126 126
@@ -140,7 +140,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
140 } 140 }
141} 141}
142 142
143static inline void __raw_spin_unlock(raw_spinlock_t *lock) 143static inline void __raw_spin_unlock(arch_spinlock_t *lock)
144{ 144{
145 SYNC_IO; 145 SYNC_IO;
146 __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 __asm__ __volatile__("# __raw_spin_unlock\n\t"
@@ -149,7 +149,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
149} 149}
150 150
151#ifdef CONFIG_PPC64 151#ifdef CONFIG_PPC64
152extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); 152extern void __raw_spin_unlock_wait(arch_spinlock_t *lock);
153#else 153#else
154#define __raw_spin_unlock_wait(lock) \ 154#define __raw_spin_unlock_wait(lock) \
155 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 155 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index 74236c9f05b1..4312e5baaf88 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int slock; 9 volatile unsigned int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 13
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index bf90361bb70f..579069c12152 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
978 return 1; 978 return 1;
979} 979}
980 980
981static raw_spinlock_t timebase_lock; 981static arch_spinlock_t timebase_lock;
982static u64 timebase = 0; 982static u64 timebase = 0;
983 983
984void __cpuinit rtas_give_timebase(void) 984void __cpuinit rtas_give_timebase(void)
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 79d0fa3a470d..b06294cde499 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -25,7 +25,7 @@
25#include <asm/smp.h> 25#include <asm/smp.h>
26#include <asm/firmware.h> 26#include <asm/firmware.h>
27 27
28void __spin_yield(raw_spinlock_t *lock) 28void __spin_yield(arch_spinlock_t *lock)
29{ 29{
30 unsigned int lock_value, holder_cpu, yield_count; 30 unsigned int lock_value, holder_cpu, yield_count;
31 31
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
82} 82}
83#endif 83#endif
84 84
85void __raw_spin_unlock_wait(raw_spinlock_t *lock) 85void __raw_spin_unlock_wait(arch_spinlock_t *lock)
86{ 86{
87 while (lock->slock) { 87 while (lock->slock) {
88 HMT_low(); 88 HMT_low();
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index a4619347aa7e..be36fece41d7 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
71} 71}
72 72
73#ifdef CONFIG_SMP 73#ifdef CONFIG_SMP
74static raw_spinlock_t timebase_lock; 74static arch_spinlock_t timebase_lock;
75static unsigned long timebase; 75static unsigned long timebase;
76 76
77static void __devinit pas_give_timebase(void) 77static void __devinit pas_give_timebase(void)
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c9af0d19c7ab..6121fa4b83d9 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned int *lock,
57 do { while (__raw_spin_is_locked(lock)) \ 57 do { while (__raw_spin_is_locked(lock)) \
58 _raw_spin_relax(lock); } while (0) 58 _raw_spin_relax(lock); } while (0)
59 59
60extern void _raw_spin_lock_wait(raw_spinlock_t *); 60extern void _raw_spin_lock_wait(arch_spinlock_t *);
61extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); 61extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
62extern int _raw_spin_trylock_retry(raw_spinlock_t *); 62extern int _raw_spin_trylock_retry(arch_spinlock_t *);
63extern void _raw_spin_relax(raw_spinlock_t *lock); 63extern void _raw_spin_relax(arch_spinlock_t *lock);
64 64
65static inline void __raw_spin_lock(raw_spinlock_t *lp) 65static inline void __raw_spin_lock(arch_spinlock_t *lp)
66{ 66{
67 int old; 67 int old;
68 68
@@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lp)
72 _raw_spin_lock_wait(lp); 72 _raw_spin_lock_wait(lp);
73} 73}
74 74
75static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, 75static inline void __raw_spin_lock_flags(arch_spinlock_t *lp,
76 unsigned long flags) 76 unsigned long flags)
77{ 77{
78 int old; 78 int old;
@@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
83 _raw_spin_lock_wait_flags(lp, flags); 83 _raw_spin_lock_wait_flags(lp, flags);
84} 84}
85 85
86static inline int __raw_spin_trylock(raw_spinlock_t *lp) 86static inline int __raw_spin_trylock(arch_spinlock_t *lp)
87{ 87{
88 int old; 88 int old;
89 89
@@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lp)
93 return _raw_spin_trylock_retry(lp); 93 return _raw_spin_trylock_retry(lp);
94} 94}
95 95
96static inline void __raw_spin_unlock(raw_spinlock_t *lp) 96static inline void __raw_spin_unlock(arch_spinlock_t *lp)
97{ 97{
98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
99} 99}
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 654abc40de04..a93638eee3f7 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int owner_cpu; 9 volatile unsigned int owner_cpu;
10} __attribute__ ((aligned (4))) raw_spinlock_t; 10} __attribute__ ((aligned (4))) arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 13
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f7e0d30250b7..d4cbf71a6077 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
39 _raw_yield(); 39 _raw_yield();
40} 40}
41 41
42void _raw_spin_lock_wait(raw_spinlock_t *lp) 42void _raw_spin_lock_wait(arch_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
@@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
59} 59}
60EXPORT_SYMBOL(_raw_spin_lock_wait); 60EXPORT_SYMBOL(_raw_spin_lock_wait);
61 61
62void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) 62void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63{ 63{
64 int count = spin_retry; 64 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id(); 65 unsigned int cpu = ~smp_processor_id();
@@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
82} 82}
83EXPORT_SYMBOL(_raw_spin_lock_wait_flags); 83EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
84 84
85int _raw_spin_trylock_retry(raw_spinlock_t *lp) 85int _raw_spin_trylock_retry(arch_spinlock_t *lp)
86{ 86{
87 unsigned int cpu = ~smp_processor_id(); 87 unsigned int cpu = ~smp_processor_id();
88 int count; 88 int count;
@@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp)
97} 97}
98EXPORT_SYMBOL(_raw_spin_trylock_retry); 98EXPORT_SYMBOL(_raw_spin_trylock_retry);
99 99
100void _raw_spin_relax(raw_spinlock_t *lock) 100void _raw_spin_relax(arch_spinlock_t *lock)
101{ 101{
102 unsigned int cpu = lock->owner_cpu; 102 unsigned int cpu = lock->owner_cpu;
103 if (cpu != 0) 103 if (cpu != 0)
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index a28c9f0053fd..5a05b3fcefbe 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -34,7 +34,7 @@
34 * 34 *
35 * We make no fairness assumptions. They have a cost. 35 * We make no fairness assumptions. They have a cost.
36 */ 36 */
37static inline void __raw_spin_lock(raw_spinlock_t *lock) 37static inline void __raw_spin_lock(arch_spinlock_t *lock)
38{ 38{
39 unsigned long tmp; 39 unsigned long tmp;
40 unsigned long oldval; 40 unsigned long oldval;
@@ -54,7 +54,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
54 ); 54 );
55} 55}
56 56
57static inline void __raw_spin_unlock(raw_spinlock_t *lock) 57static inline void __raw_spin_unlock(arch_spinlock_t *lock)
58{ 58{
59 unsigned long tmp; 59 unsigned long tmp;
60 60
@@ -67,7 +67,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
67 ); 67 );
68} 68}
69 69
70static inline int __raw_spin_trylock(raw_spinlock_t *lock) 70static inline int __raw_spin_trylock(arch_spinlock_t *lock)
71{ 71{
72 unsigned long tmp, oldval; 72 unsigned long tmp, oldval;
73 73
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
index b4d244e7b60c..37712c32ba99 100644
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
13 13
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 857630cff636..b2d8a67f727e 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -15,7 +15,7 @@
15#define __raw_spin_unlock_wait(lock) \ 15#define __raw_spin_unlock_wait(lock) \
16 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 16 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
17 17
18static inline void __raw_spin_lock(raw_spinlock_t *lock) 18static inline void __raw_spin_lock(arch_spinlock_t *lock)
19{ 19{
20 __asm__ __volatile__( 20 __asm__ __volatile__(
21 "\n1:\n\t" 21 "\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
35 : "g2", "memory", "cc"); 35 : "g2", "memory", "cc");
36} 36}
37 37
38static inline int __raw_spin_trylock(raw_spinlock_t *lock) 38static inline int __raw_spin_trylock(arch_spinlock_t *lock)
39{ 39{
40 unsigned int result; 40 unsigned int result;
41 __asm__ __volatile__("ldstub [%1], %0" 41 __asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
45 return (result == 0); 45 return (result == 0);
46} 46}
47 47
48static inline void __raw_spin_unlock(raw_spinlock_t *lock) 48static inline void __raw_spin_unlock(arch_spinlock_t *lock)
49{ 49{
50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
51} 51}
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 43e514783582..38e16c40efc4 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -27,7 +27,7 @@
27 do { rmb(); \ 27 do { rmb(); \
28 } while((lp)->lock) 28 } while((lp)->lock)
29 29
30static inline void __raw_spin_lock(raw_spinlock_t *lock) 30static inline void __raw_spin_lock(arch_spinlock_t *lock)
31{ 31{
32 unsigned long tmp; 32 unsigned long tmp;
33 33
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
46 : "memory"); 46 : "memory");
47} 47}
48 48
49static inline int __raw_spin_trylock(raw_spinlock_t *lock) 49static inline int __raw_spin_trylock(arch_spinlock_t *lock)
50{ 50{
51 unsigned long result; 51 unsigned long result;
52 52
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59 return (result == 0UL); 59 return (result == 0UL);
60} 60}
61 61
62static inline void __raw_spin_unlock(raw_spinlock_t *lock) 62static inline void __raw_spin_unlock(arch_spinlock_t *lock)
63{ 63{
64 __asm__ __volatile__( 64 __asm__ __volatile__(
65" stb %%g0, [%0]" 65" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
68 : "memory"); 68 : "memory");
69} 69}
70 70
71static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 71static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
72{ 72{
73 unsigned long tmp1, tmp2; 73 unsigned long tmp1, tmp2;
74 74
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 37cbe01c585b..41d9a8fec13d 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -7,7 +7,7 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned char lock; 9 volatile unsigned char lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 13
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index efb38994859c..5655f75f10b7 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
731 731
732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
733 733
734static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 734static inline int __raw_spin_is_locked(struct arch_spinlock *lock)
735{ 735{
736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
737} 737}
738 738
739static inline int __raw_spin_is_contended(struct raw_spinlock *lock) 739static inline int __raw_spin_is_contended(struct arch_spinlock *lock)
740{ 740{
741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
742} 742}
743#define __raw_spin_is_contended __raw_spin_is_contended 743#define __raw_spin_is_contended __raw_spin_is_contended
744 744
745static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) 745static __always_inline void __raw_spin_lock(struct arch_spinlock *lock)
746{ 746{
747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
748} 748}
749 749
750static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, 750static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock,
751 unsigned long flags) 751 unsigned long flags)
752{ 752{
753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); 753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
754} 754}
755 755
756static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) 756static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock)
757{ 757{
758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
759} 759}
760 760
761static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) 761static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock)
762{ 762{
763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); 763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
764} 764}
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 9357473c8da0..b1e70d51e40c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
318 phys_addr_t phys, pgprot_t flags); 318 phys_addr_t phys, pgprot_t flags);
319}; 319};
320 320
321struct raw_spinlock; 321struct arch_spinlock;
322struct pv_lock_ops { 322struct pv_lock_ops {
323 int (*spin_is_locked)(struct raw_spinlock *lock); 323 int (*spin_is_locked)(struct arch_spinlock *lock);
324 int (*spin_is_contended)(struct raw_spinlock *lock); 324 int (*spin_is_contended)(struct arch_spinlock *lock);
325 void (*spin_lock)(struct raw_spinlock *lock); 325 void (*spin_lock)(struct arch_spinlock *lock);
326 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); 326 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
327 int (*spin_trylock)(struct raw_spinlock *lock); 327 int (*spin_trylock)(struct arch_spinlock *lock);
328 void (*spin_unlock)(struct raw_spinlock *lock); 328 void (*spin_unlock)(struct arch_spinlock *lock);
329}; 329};
330 330
331/* This contains all the paravirt structures: we get a convenient 331/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 4e77853321db..204b524fcf57 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -58,7 +58,7 @@
58#if (NR_CPUS < 256) 58#if (NR_CPUS < 256)
59#define TICKET_SHIFT 8 59#define TICKET_SHIFT 8
60 60
61static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 61static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
62{ 62{
63 short inc = 0x0100; 63 short inc = 0x0100;
64 64
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
77 : "memory", "cc"); 77 : "memory", "cc");
78} 78}
79 79
80static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 80static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
81{ 81{
82 int tmp, new; 82 int tmp, new;
83 83
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
96 return tmp; 96 return tmp;
97} 97}
98 98
99static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 99static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
100{ 100{
101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
102 : "+m" (lock->slock) 102 : "+m" (lock->slock)
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
106#else 106#else
107#define TICKET_SHIFT 16 107#define TICKET_SHIFT 16
108 108
109static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 109static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
110{ 110{
111 int inc = 0x00010000; 111 int inc = 0x00010000;
112 int tmp; 112 int tmp;
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
127 : "memory", "cc"); 127 : "memory", "cc");
128} 128}
129 129
130static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 130static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
131{ 131{
132 int tmp; 132 int tmp;
133 int new; 133 int new;
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
149 return tmp; 149 return tmp;
150} 150}
151 151
152static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 152static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
153{ 153{
154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
155 : "+m" (lock->slock) 155 : "+m" (lock->slock)
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
158} 158}
159#endif 159#endif
160 160
161static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 161static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
162{ 162{
163 int tmp = ACCESS_ONCE(lock->slock); 163 int tmp = ACCESS_ONCE(lock->slock);
164 164
165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); 165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
166} 166}
167 167
168static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 168static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
169{ 169{
170 int tmp = ACCESS_ONCE(lock->slock); 170 int tmp = ACCESS_ONCE(lock->slock);
171 171
@@ -174,33 +174,33 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
174 174
175#ifndef CONFIG_PARAVIRT_SPINLOCKS 175#ifndef CONFIG_PARAVIRT_SPINLOCKS
176 176
177static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
178{ 178{
179 return __ticket_spin_is_locked(lock); 179 return __ticket_spin_is_locked(lock);
180} 180}
181 181
182static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 182static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
183{ 183{
184 return __ticket_spin_is_contended(lock); 184 return __ticket_spin_is_contended(lock);
185} 185}
186#define __raw_spin_is_contended __raw_spin_is_contended 186#define __raw_spin_is_contended __raw_spin_is_contended
187 187
188static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 188static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
189{ 189{
190 __ticket_spin_lock(lock); 190 __ticket_spin_lock(lock);
191} 191}
192 192
193static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 193static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
194{ 194{
195 return __ticket_spin_trylock(lock); 195 return __ticket_spin_trylock(lock);
196} 196}
197 197
198static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 198static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
199{ 199{
200 __ticket_spin_unlock(lock); 200 __ticket_spin_unlock(lock);
201} 201}
202 202
203static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 203static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
204 unsigned long flags) 204 unsigned long flags)
205{ 205{
206 __raw_spin_lock(lock); 206 __raw_spin_lock(lock);
@@ -208,7 +208,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
208 208
209#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 209#endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 210
211static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
212{ 212{
213 while (__raw_spin_is_locked(lock)) 213 while (__raw_spin_is_locked(lock))
214 cpu_relax(); 214 cpu_relax();
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 845f81c87091..2ae7637ed524 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,9 +5,9 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8typedef struct raw_spinlock { 8typedef struct arch_spinlock {
9 unsigned int slock; 9 unsigned int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13 13
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b8ce165dde5d..0862d9d89c92 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -188,7 +188,7 @@ void dump_stack(void)
188} 188}
189EXPORT_SYMBOL(dump_stack); 189EXPORT_SYMBOL(dump_stack);
190 190
191static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; 191static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
192static int die_owner = -1; 192static int die_owner = -1;
193static unsigned int die_nest_count; 193static unsigned int die_nest_count;
194 194
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 3a7c5a44082e..a0f39e090684 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -8,7 +8,7 @@
8#include <asm/paravirt.h> 8#include <asm/paravirt.h>
9 9
10static inline void 10static inline void
11default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
12{ 12{
13 __raw_spin_lock(lock); 13 __raw_spin_lock(lock);
14} 14}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index eed156851f5d..9f908b9d1abe 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; 36static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static __cpuinitdata cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static __cpuinitdata cycles_t max_warp;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 36a5141108df..24ded31b5aec 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -120,14 +120,14 @@ struct xen_spinlock {
120 unsigned short spinners; /* count of waiting cpus */ 120 unsigned short spinners; /* count of waiting cpus */
121}; 121};
122 122
123static int xen_spin_is_locked(struct raw_spinlock *lock) 123static int xen_spin_is_locked(struct arch_spinlock *lock)
124{ 124{
125 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 125 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
126 126
127 return xl->lock != 0; 127 return xl->lock != 0;
128} 128}
129 129
130static int xen_spin_is_contended(struct raw_spinlock *lock) 130static int xen_spin_is_contended(struct arch_spinlock *lock)
131{ 131{
132 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 132 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
133 133
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
136 return xl->spinners != 0; 136 return xl->spinners != 0;
137} 137}
138 138
139static int xen_spin_trylock(struct raw_spinlock *lock) 139static int xen_spin_trylock(struct arch_spinlock *lock)
140{ 140{
141 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 141 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
142 u8 old = 1; 142 u8 old = 1;
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
181 __get_cpu_var(lock_spinners) = prev; 181 __get_cpu_var(lock_spinners) = prev;
182} 182}
183 183
184static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) 184static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
185{ 185{
186 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 186 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
187 struct xen_spinlock *prev; 187 struct xen_spinlock *prev;
@@ -254,7 +254,7 @@ out:
254 return ret; 254 return ret;
255} 255}
256 256
257static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) 257static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
258{ 258{
259 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 259 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
260 unsigned timeout; 260 unsigned timeout;
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
291 spin_time_accum_total(start_spin); 291 spin_time_accum_total(start_spin);
292} 292}
293 293
294static void xen_spin_lock(struct raw_spinlock *lock) 294static void xen_spin_lock(struct arch_spinlock *lock)
295{ 295{
296 __xen_spin_lock(lock, false); 296 __xen_spin_lock(lock, false);
297} 297}
298 298
299static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 299static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
300{ 300{
301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); 301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
302} 302}
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
317 } 317 }
318} 318}
319 319
320static void xen_spin_unlock(struct raw_spinlock *lock) 320static void xen_spin_unlock(struct arch_spinlock *lock)
321{ 321{
322 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 322 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
323 323
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63a..dcf0afad4a7f 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,18 +15,18 @@
15# define ATOMIC_HASH_SIZE 4 15# define ATOMIC_HASH_SIZE 4
16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 16# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17 17
18extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 18extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19 19
20/* Can't use raw_spin_lock_irq because of #include problems, so 20/* Can't use raw_spin_lock_irq because of #include problems, so
21 * this is the substitute */ 21 * this is the substitute */
22#define _atomic_spin_lock_irqsave(l,f) do { \ 22#define _atomic_spin_lock_irqsave(l,f) do { \
23 raw_spinlock_t *s = ATOMIC_HASH(l); \ 23 arch_spinlock_t *s = ATOMIC_HASH(l); \
24 local_irq_save(f); \ 24 local_irq_save(f); \
25 __raw_spin_lock(s); \ 25 __raw_spin_lock(s); \
26} while(0) 26} while(0)
27 27
28#define _atomic_spin_unlock_irqrestore(l,f) do { \ 28#define _atomic_spin_unlock_irqrestore(l,f) do { \
29 raw_spinlock_t *s = ATOMIC_HASH(l); \ 29 arch_spinlock_t *s = ATOMIC_HASH(l); \
30 __raw_spin_unlock(s); \ 30 __raw_spin_unlock(s); \
31 local_irq_restore(f); \ 31 local_irq_restore(f); \
32} while(0) 32} while(0)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a9aaa709fb93..5ef7a4c060b5 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,7 +8,7 @@
8 * 8 *
9 * on SMP builds: 9 * on SMP builds:
10 * 10 *
11 * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the 11 * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the
12 * initializers 12 * initializers
13 * 13 *
14 * linux/spinlock_types.h: 14 * linux/spinlock_types.h:
@@ -75,7 +75,7 @@
75#define __lockfunc __attribute__((section(".spinlock.text"))) 75#define __lockfunc __attribute__((section(".spinlock.text")))
76 76
77/* 77/*
78 * Pull the raw_spinlock_t and raw_rwlock_t definitions: 78 * Pull the arch_spinlock_t and raw_rwlock_t definitions:
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index f979d5d8a160..d4af2d7a86ea 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -18,7 +18,7 @@
18#include <linux/lockdep.h> 18#include <linux/lockdep.h>
19 19
20typedef struct { 20typedef struct {
21 raw_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK 22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock; 23 unsigned int break_lock;
24#endif 24#endif
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..34d36691c4ec 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,13 +16,13 @@
16 16
17typedef struct { 17typedef struct {
18 volatile unsigned int slock; 18 volatile unsigned int slock;
19} raw_spinlock_t; 19} arch_spinlock_t;
20 20
21#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 21#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
22 22
23#else 23#else
24 24
25typedef struct { } raw_spinlock_t; 25typedef struct { } arch_spinlock_t;
26 26
27#define __RAW_SPIN_LOCK_UNLOCKED { } 27#define __RAW_SPIN_LOCK_UNLOCKED { }
28 28
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215b..8ee2ac1bf636 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -20,19 +20,19 @@
20#ifdef CONFIG_DEBUG_SPINLOCK 20#ifdef CONFIG_DEBUG_SPINLOCK
21#define __raw_spin_is_locked(x) ((x)->slock == 0) 21#define __raw_spin_is_locked(x) ((x)->slock == 0)
22 22
23static inline void __raw_spin_lock(raw_spinlock_t *lock) 23static inline void __raw_spin_lock(arch_spinlock_t *lock)
24{ 24{
25 lock->slock = 0; 25 lock->slock = 0;
26} 26}
27 27
28static inline void 28static inline void
29__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 29__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
30{ 30{
31 local_irq_save(flags); 31 local_irq_save(flags);
32 lock->slock = 0; 32 lock->slock = 0;
33} 33}
34 34
35static inline int __raw_spin_trylock(raw_spinlock_t *lock) 35static inline int __raw_spin_trylock(arch_spinlock_t *lock)
36{ 36{
37 char oldval = lock->slock; 37 char oldval = lock->slock;
38 38
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41 return oldval > 0; 41 return oldval > 0;
42} 42}
43 43
44static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44static inline void __raw_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 lock->slock = 1; 46 lock->slock = 1;
47} 47}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 429540c70d3f..7cc50c62af59 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
73 * to use a raw spinlock - we really dont want the spinlock 73 * to use a raw spinlock - we really dont want the spinlock
74 * code to recurse back into the lockdep code... 74 * code to recurse back into the lockdep code...
75 */ 75 */
76static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 76static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
77 77
78static int graph_lock(void) 78static int graph_lock(void)
79{ 79{
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5e..5ac8ee0a9e35 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
423 int cpu; 423 int cpu;
424 struct ring_buffer *buffer; 424 struct ring_buffer *buffer;
425 spinlock_t reader_lock; /* serialize readers */ 425 spinlock_t reader_lock; /* serialize readers */
426 raw_spinlock_t lock; 426 arch_spinlock_t lock;
427 struct lock_class_key lock_key; 427 struct lock_class_key lock_key;
428 struct list_head *pages; 428 struct list_head *pages;
429 struct buffer_page *head_page; /* read from head */ 429 struct buffer_page *head_page; /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
998 cpu_buffer->buffer = buffer; 998 cpu_buffer->buffer = buffer;
999 spin_lock_init(&cpu_buffer->reader_lock); 999 spin_lock_init(&cpu_buffer->reader_lock);
1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1001 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1001 cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
1002 1002
1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1004 GFP_KERNEL, cpu_to_node(cpu)); 1004 GFP_KERNEL, cpu_to_node(cpu));
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c82dfd92fdfd..7d56cecc2c6e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
493 * protected by per_cpu spinlocks. But the action of the swap 493 * protected by per_cpu spinlocks. But the action of the swap
494 * needs its own lock. 494 * needs its own lock.
495 * 495 *
496 * This is defined as a raw_spinlock_t in order to help 496 * This is defined as a arch_spinlock_t in order to help
497 * with performance when lockdep debugging is enabled. 497 * with performance when lockdep debugging is enabled.
498 * 498 *
499 * It is also used in other places outside the update_max_tr 499 * It is also used in other places outside the update_max_tr
500 * so it needs to be defined outside of the 500 * so it needs to be defined outside of the
501 * CONFIG_TRACER_MAX_TRACE. 501 * CONFIG_TRACER_MAX_TRACE.
502 */ 502 */
503static raw_spinlock_t ftrace_max_lock = 503static arch_spinlock_t ftrace_max_lock =
504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 504 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
505 505
506#ifdef CONFIG_TRACER_MAX_TRACE 506#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency; 507unsigned long __read_mostly tracing_max_latency;
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
802static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 802static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
804static int cmdline_idx; 804static int cmdline_idx;
805static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 805static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
806 806
807/* temporary disable recording */ 807/* temporary disable recording */
808static atomic_t trace_record_cmdline_disabled __read_mostly; 808static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1251 */ 1251 */
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1253{ 1253{
1254 static raw_spinlock_t trace_buf_lock = 1254 static arch_spinlock_t trace_buf_lock =
1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1255 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1256 static u32 trace_buf[TRACE_BUF_SIZE];
1257 1257
1258 struct ftrace_event_call *call = &event_bprint; 1258 struct ftrace_event_call *call = &event_bprint;
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
1334int trace_array_vprintk(struct trace_array *tr, 1334int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args) 1335 unsigned long ip, const char *fmt, va_list args)
1336{ 1336{
1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1337 static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE]; 1338 static char trace_buf[TRACE_BUF_SIZE];
1339 1339
1340 struct ftrace_event_call *call = &event_print; 1340 struct ftrace_event_call *call = &event_print;
@@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
4307 4307
4308static void __ftrace_dump(bool disable_tracing) 4308static void __ftrace_dump(bool disable_tracing)
4309{ 4309{
4310 static raw_spinlock_t ftrace_dump_lock = 4310 static arch_spinlock_t ftrace_dump_lock =
4311 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4311 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
4312 /* use static because iter can be a bit big for the stack */ 4312 /* use static because iter can be a bit big for the stack */
4313 static struct trace_iterator iter; 4313 static struct trace_iterator iter;
4314 unsigned int old_userobj; 4314 unsigned int old_userobj;
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 878c03f386ba..206ec3d4b3c2 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
71/* keep prev_time and lock in the same cacheline. */ 71/* keep prev_time and lock in the same cacheline. */
72static struct { 72static struct {
73 u64 prev_time; 73 u64 prev_time;
74 raw_spinlock_t lock; 74 arch_spinlock_t lock;
75} trace_clock_struct ____cacheline_aligned_in_smp = 75} trace_clock_struct ____cacheline_aligned_in_smp =
76 { 76 {
77 .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, 77 .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
78 }; 78 };
79 79
80u64 notrace trace_clock_global(void) 80u64 notrace trace_clock_global(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 26185d727676..4cf7e83ec235 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -28,8 +28,8 @@ static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1; 28static unsigned wakeup_prio = -1;
29static int wakeup_rt; 29static int wakeup_rt;
30 30
31static raw_spinlock_t wakeup_lock = 31static arch_spinlock_t wakeup_lock =
32 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 32 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
33 33
34static void __wakeup_reset(struct trace_array *tr); 34static void __wakeup_reset(struct trace_array *tr);
35 35
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 8504ac71e4e8..9a82d568fdec 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = {
27}; 27};
28 28
29static unsigned long max_stack_size; 29static unsigned long max_stack_size;
30static raw_spinlock_t max_stack_lock = 30static arch_spinlock_t max_stack_lock =
31 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
32 32
33static int stack_trace_disabled __read_mostly; 33static int stack_trace_disabled __read_mostly;
34static DEFINE_PER_CPU(int, trace_active); 34static DEFINE_PER_CPU(int, trace_active);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..2acd501b3826 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0); 24 lockdep_init_map(&lock->dep_map, name, key, 0);
25#endif 25#endif
26 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC; 27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT; 28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1; 29 lock->owner_cpu = -1;