diff options
Diffstat (limited to 'arch')
24 files changed, 238 insertions, 238 deletions
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index bdb26a1940b4..4dac79f504c3 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h | |||
@@ -12,18 +12,18 @@ | |||
12 | * We make no fairness assumptions. They have a cost. | 12 | * We make no fairness assumptions. They have a cost. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 15 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
16 | #define __raw_spin_is_locked(x) ((x)->lock != 0) | 16 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
17 | #define __raw_spin_unlock_wait(x) \ | 17 | #define arch_spin_unlock_wait(x) \ |
18 | do { cpu_relax(); } while ((x)->lock) | 18 | do { cpu_relax(); } while ((x)->lock) |
19 | 19 | ||
20 | static inline void __raw_spin_unlock(arch_spinlock_t * lock) | 20 | static inline void arch_spin_unlock(arch_spinlock_t * lock) |
21 | { | 21 | { |
22 | mb(); | 22 | mb(); |
23 | lock->lock = 0; | 23 | lock->lock = 0; |
24 | } | 24 | } |
25 | 25 | ||
26 | static inline void __raw_spin_lock(arch_spinlock_t * lock) | 26 | static inline void arch_spin_lock(arch_spinlock_t * lock) |
27 | { | 27 | { |
28 | long tmp; | 28 | long tmp; |
29 | 29 | ||
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t * lock) | |||
43 | : "m"(lock->lock) : "memory"); | 43 | : "m"(lock->lock) : "memory"); |
44 | } | 44 | } |
45 | 45 | ||
46 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 46 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
47 | { | 47 | { |
48 | return !test_and_set_bit(0, &lock->lock); | 48 | return !test_and_set_bit(0, &lock->lock); |
49 | } | 49 | } |
@@ -169,8 +169,8 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock) | |||
169 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 169 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
170 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 170 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
171 | 171 | ||
172 | #define _raw_spin_relax(lock) cpu_relax() | 172 | #define arch_spin_relax(lock) cpu_relax() |
173 | #define _raw_read_relax(lock) cpu_relax() | 173 | #define arch_read_relax(lock) cpu_relax() |
174 | #define _raw_write_relax(lock) cpu_relax() | 174 | #define arch_write_relax(lock) cpu_relax() |
175 | 175 | ||
176 | #endif /* _ALPHA_SPINLOCK_H */ | 176 | #endif /* _ALPHA_SPINLOCK_H */ |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 4e7712ee9394..de62eb098f68 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -17,13 +17,13 @@ | |||
17 | * Locked value: 1 | 17 | * Locked value: 1 |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) | 20 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
21 | #define __raw_spin_unlock_wait(lock) \ | 21 | #define arch_spin_unlock_wait(lock) \ |
22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 22 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
23 | 23 | ||
24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 24 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
25 | 25 | ||
26 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 26 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
27 | { | 27 | { |
28 | unsigned long tmp; | 28 | unsigned long tmp; |
29 | 29 | ||
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
43 | smp_mb(); | 43 | smp_mb(); |
44 | } | 44 | } |
45 | 45 | ||
46 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 46 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
47 | { | 47 | { |
48 | unsigned long tmp; | 48 | unsigned long tmp; |
49 | 49 | ||
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) | |||
63 | } | 63 | } |
64 | } | 64 | } |
65 | 65 | ||
66 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 66 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
67 | { | 67 | { |
68 | smp_mb(); | 68 | smp_mb(); |
69 | 69 | ||
@@ -220,8 +220,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
220 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 220 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
221 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 221 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
222 | 222 | ||
223 | #define _raw_spin_relax(lock) cpu_relax() | 223 | #define arch_spin_relax(lock) cpu_relax() |
224 | #define _raw_read_relax(lock) cpu_relax() | 224 | #define arch_read_relax(lock) cpu_relax() |
225 | #define _raw_write_relax(lock) cpu_relax() | 225 | #define arch_write_relax(lock) cpu_relax() |
226 | 226 | ||
227 | #endif /* __ASM_SPINLOCK_H */ | 227 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index fc16b4c5309b..62d49540e02b 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h | |||
@@ -24,31 +24,31 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr); | |||
24 | asmlinkage int __raw_write_trylock_asm(volatile int *ptr); | 24 | asmlinkage int __raw_write_trylock_asm(volatile int *ptr); |
25 | asmlinkage void __raw_write_unlock_asm(volatile int *ptr); | 25 | asmlinkage void __raw_write_unlock_asm(volatile int *ptr); |
26 | 26 | ||
27 | static inline int __raw_spin_is_locked(arch_spinlock_t *lock) | 27 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
28 | { | 28 | { |
29 | return __raw_spin_is_locked_asm(&lock->lock); | 29 | return __raw_spin_is_locked_asm(&lock->lock); |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 32 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
33 | { | 33 | { |
34 | __raw_spin_lock_asm(&lock->lock); | 34 | __raw_spin_lock_asm(&lock->lock); |
35 | } | 35 | } |
36 | 36 | ||
37 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 37 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
38 | 38 | ||
39 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
40 | { | 40 | { |
41 | return __raw_spin_trylock_asm(&lock->lock); | 41 | return __raw_spin_trylock_asm(&lock->lock); |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 44 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
45 | { | 45 | { |
46 | __raw_spin_unlock_asm(&lock->lock); | 46 | __raw_spin_unlock_asm(&lock->lock); |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) | 49 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
50 | { | 50 | { |
51 | while (__raw_spin_is_locked(lock)) | 51 | while (arch_spin_is_locked(lock)) |
52 | cpu_relax(); | 52 | cpu_relax(); |
53 | } | 53 | } |
54 | 54 | ||
@@ -92,9 +92,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
92 | __raw_write_unlock_asm(&rw->lock); | 92 | __raw_write_unlock_asm(&rw->lock); |
93 | } | 93 | } |
94 | 94 | ||
95 | #define _raw_spin_relax(lock) cpu_relax() | 95 | #define arch_spin_relax(lock) cpu_relax() |
96 | #define _raw_read_relax(lock) cpu_relax() | 96 | #define arch_read_relax(lock) cpu_relax() |
97 | #define _raw_write_relax(lock) cpu_relax() | 97 | #define arch_write_relax(lock) cpu_relax() |
98 | 98 | ||
99 | #endif | 99 | #endif |
100 | 100 | ||
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index e253457765f2..a2e8a394d555 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h | |||
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val); | |||
9 | extern void cris_spin_lock(void *l); | 9 | extern void cris_spin_lock(void *l); |
10 | extern int cris_spin_trylock(void *l); | 10 | extern int cris_spin_trylock(void *l); |
11 | 11 | ||
12 | static inline int __raw_spin_is_locked(arch_spinlock_t *x) | 12 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
13 | { | 13 | { |
14 | return *(volatile signed char *)(&(x)->slock) <= 0; | 14 | return *(volatile signed char *)(&(x)->slock) <= 0; |
15 | } | 15 | } |
16 | 16 | ||
17 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 17 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
18 | { | 18 | { |
19 | __asm__ volatile ("move.d %1,%0" \ | 19 | __asm__ volatile ("move.d %1,%0" \ |
20 | : "=m" (lock->slock) \ | 20 | : "=m" (lock->slock) \ |
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
22 | : "memory"); | 22 | : "memory"); |
23 | } | 23 | } |
24 | 24 | ||
25 | static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) | 25 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
26 | { | 26 | { |
27 | while (__raw_spin_is_locked(lock)) | 27 | while (arch_spin_is_locked(lock)) |
28 | cpu_relax(); | 28 | cpu_relax(); |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 31 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
32 | { | 32 | { |
33 | return cris_spin_trylock((void *)&lock->slock); | 33 | return cris_spin_trylock((void *)&lock->slock); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 36 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
37 | { | 37 | { |
38 | cris_spin_lock((void *)&lock->slock); | 38 | cris_spin_lock((void *)&lock->slock); |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline void | 41 | static inline void |
42 | __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | 42 | arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
43 | { | 43 | { |
44 | __raw_spin_lock(lock); | 44 | arch_spin_lock(lock); |
45 | } | 45 | } |
46 | 46 | ||
47 | /* | 47 | /* |
@@ -68,64 +68,64 @@ static inline int __raw_write_can_lock(raw_rwlock_t *x) | |||
68 | 68 | ||
69 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 69 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
70 | { | 70 | { |
71 | __raw_spin_lock(&rw->slock); | 71 | arch_spin_lock(&rw->slock); |
72 | while (rw->lock == 0); | 72 | while (rw->lock == 0); |
73 | rw->lock--; | 73 | rw->lock--; |
74 | __raw_spin_unlock(&rw->slock); | 74 | arch_spin_unlock(&rw->slock); |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 77 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
78 | { | 78 | { |
79 | __raw_spin_lock(&rw->slock); | 79 | arch_spin_lock(&rw->slock); |
80 | while (rw->lock != RW_LOCK_BIAS); | 80 | while (rw->lock != RW_LOCK_BIAS); |
81 | rw->lock = 0; | 81 | rw->lock = 0; |
82 | __raw_spin_unlock(&rw->slock); | 82 | arch_spin_unlock(&rw->slock); |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 85 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
86 | { | 86 | { |
87 | __raw_spin_lock(&rw->slock); | 87 | arch_spin_lock(&rw->slock); |
88 | rw->lock++; | 88 | rw->lock++; |
89 | __raw_spin_unlock(&rw->slock); | 89 | arch_spin_unlock(&rw->slock); |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 92 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
93 | { | 93 | { |
94 | __raw_spin_lock(&rw->slock); | 94 | arch_spin_lock(&rw->slock); |
95 | while (rw->lock != RW_LOCK_BIAS); | 95 | while (rw->lock != RW_LOCK_BIAS); |
96 | rw->lock = RW_LOCK_BIAS; | 96 | rw->lock = RW_LOCK_BIAS; |
97 | __raw_spin_unlock(&rw->slock); | 97 | arch_spin_unlock(&rw->slock); |
98 | } | 98 | } |
99 | 99 | ||
100 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 100 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
101 | { | 101 | { |
102 | int ret = 0; | 102 | int ret = 0; |
103 | __raw_spin_lock(&rw->slock); | 103 | arch_spin_lock(&rw->slock); |
104 | if (rw->lock != 0) { | 104 | if (rw->lock != 0) { |
105 | rw->lock--; | 105 | rw->lock--; |
106 | ret = 1; | 106 | ret = 1; |
107 | } | 107 | } |
108 | __raw_spin_unlock(&rw->slock); | 108 | arch_spin_unlock(&rw->slock); |
109 | return ret; | 109 | return ret; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 112 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
113 | { | 113 | { |
114 | int ret = 0; | 114 | int ret = 0; |
115 | __raw_spin_lock(&rw->slock); | 115 | arch_spin_lock(&rw->slock); |
116 | if (rw->lock == RW_LOCK_BIAS) { | 116 | if (rw->lock == RW_LOCK_BIAS) { |
117 | rw->lock = 0; | 117 | rw->lock = 0; |
118 | ret = 1; | 118 | ret = 1; |
119 | } | 119 | } |
120 | __raw_spin_unlock(&rw->slock); | 120 | arch_spin_unlock(&rw->slock); |
121 | return 1; | 121 | return 1; |
122 | } | 122 | } |
123 | 123 | ||
124 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) | 124 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) |
125 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) | 125 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) |
126 | 126 | ||
127 | #define _raw_spin_relax(lock) cpu_relax() | 127 | #define arch_spin_relax(lock) cpu_relax() |
128 | #define _raw_read_relax(lock) cpu_relax() | 128 | #define arch_read_relax(lock) cpu_relax() |
129 | #define _raw_write_relax(lock) cpu_relax() | 129 | #define arch_write_relax(lock) cpu_relax() |
130 | 130 | ||
131 | #endif /* __ASM_ARCH_SPINLOCK_H */ | 131 | #endif /* __ASM_ARCH_SPINLOCK_H */ |
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787bc9fb..6ebc229a1c51 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h | |||
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) | |||
127 | * @addr: Address to start counting from | 127 | * @addr: Address to start counting from |
128 | * | 128 | * |
129 | * Similarly to clear_bit_unlock, the implementation uses a store | 129 | * Similarly to clear_bit_unlock, the implementation uses a store |
130 | * with release semantics. See also __raw_spin_unlock(). | 130 | * with release semantics. See also arch_spin_unlock(). |
131 | */ | 131 | */ |
132 | static __inline__ void | 132 | static __inline__ void |
133 | __clear_bit_unlock(int nr, void *addr) | 133 | __clear_bit_unlock(int nr, void *addr) |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 9fbdf7e61087..b06165f6352f 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) | 20 | #define arch_spin_lock_init(x) ((x)->lock = 0) |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Ticket locks are conceptually two parts, one indicating the current head of | 23 | * Ticket locks are conceptually two parts, one indicating the current head of |
@@ -103,39 +103,39 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) | |||
103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; | 103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int __raw_spin_is_locked(arch_spinlock_t *lock) | 106 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
107 | { | 107 | { |
108 | return __ticket_spin_is_locked(lock); | 108 | return __ticket_spin_is_locked(lock); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline int __raw_spin_is_contended(arch_spinlock_t *lock) | 111 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
112 | { | 112 | { |
113 | return __ticket_spin_is_contended(lock); | 113 | return __ticket_spin_is_contended(lock); |
114 | } | 114 | } |
115 | #define __raw_spin_is_contended __raw_spin_is_contended | 115 | #define arch_spin_is_contended arch_spin_is_contended |
116 | 116 | ||
117 | static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) | 117 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
118 | { | 118 | { |
119 | __ticket_spin_lock(lock); | 119 | __ticket_spin_lock(lock); |
120 | } | 120 | } |
121 | 121 | ||
122 | static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) | 122 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
123 | { | 123 | { |
124 | return __ticket_spin_trylock(lock); | 124 | return __ticket_spin_trylock(lock); |
125 | } | 125 | } |
126 | 126 | ||
127 | static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) | 127 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
128 | { | 128 | { |
129 | __ticket_spin_unlock(lock); | 129 | __ticket_spin_unlock(lock); |
130 | } | 130 | } |
131 | 131 | ||
132 | static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, | 132 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
133 | unsigned long flags) | 133 | unsigned long flags) |
134 | { | 134 | { |
135 | __raw_spin_lock(lock); | 135 | arch_spin_lock(lock); |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) | 138 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
139 | { | 139 | { |
140 | __ticket_spin_unlock_wait(lock); | 140 | __ticket_spin_unlock_wait(lock); |
141 | } | 141 | } |
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) | |||
285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; | 285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; |
286 | } | 286 | } |
287 | 287 | ||
288 | #define _raw_spin_relax(lock) cpu_relax() | 288 | #define arch_spin_relax(lock) cpu_relax() |
289 | #define _raw_read_relax(lock) cpu_relax() | 289 | #define arch_read_relax(lock) cpu_relax() |
290 | #define _raw_write_relax(lock) cpu_relax() | 290 | #define arch_write_relax(lock) cpu_relax() |
291 | 291 | ||
292 | #endif /* _ASM_IA64_SPINLOCK_H */ | 292 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 0c0164225bc0..8acac950a43c 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h | |||
@@ -24,19 +24,19 @@ | |||
24 | * We make no fairness assumptions. They have a cost. | 24 | * We make no fairness assumptions. They have a cost. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 27 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
28 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 28 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
29 | #define __raw_spin_unlock_wait(x) \ | 29 | #define arch_spin_unlock_wait(x) \ |
30 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 30 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * __raw_spin_trylock - Try spin lock and return a result | 33 | * arch_spin_trylock - Try spin lock and return a result |
34 | * @lock: Pointer to the lock variable | 34 | * @lock: Pointer to the lock variable |
35 | * | 35 | * |
36 | * __raw_spin_trylock() tries to get the lock and returns a result. | 36 | * arch_spin_trylock() tries to get the lock and returns a result. |
37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
38 | */ | 38 | */ |
39 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
40 | { | 40 | { |
41 | int oldval; | 41 | int oldval; |
42 | unsigned long tmp1, tmp2; | 42 | unsigned long tmp1, tmp2; |
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) | |||
50 | * } | 50 | * } |
51 | */ | 51 | */ |
52 | __asm__ __volatile__ ( | 52 | __asm__ __volatile__ ( |
53 | "# __raw_spin_trylock \n\t" | 53 | "# arch_spin_trylock \n\t" |
54 | "ldi %1, #0; \n\t" | 54 | "ldi %1, #0; \n\t" |
55 | "mvfc %2, psw; \n\t" | 55 | "mvfc %2, psw; \n\t" |
56 | "clrpsw #0x40 -> nop; \n\t" | 56 | "clrpsw #0x40 -> nop; \n\t" |
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) | |||
69 | return (oldval > 0); | 69 | return (oldval > 0); |
70 | } | 70 | } |
71 | 71 | ||
72 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 72 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
73 | { | 73 | { |
74 | unsigned long tmp0, tmp1; | 74 | unsigned long tmp0, tmp1; |
75 | 75 | ||
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
84 | * } | 84 | * } |
85 | */ | 85 | */ |
86 | __asm__ __volatile__ ( | 86 | __asm__ __volatile__ ( |
87 | "# __raw_spin_lock \n\t" | 87 | "# arch_spin_lock \n\t" |
88 | ".fillinsn \n" | 88 | ".fillinsn \n" |
89 | "1: \n\t" | 89 | "1: \n\t" |
90 | "mvfc %1, psw; \n\t" | 90 | "mvfc %1, psw; \n\t" |
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
111 | ); | 111 | ); |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 114 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
115 | { | 115 | { |
116 | mb(); | 116 | mb(); |
117 | lock->slock = 1; | 117 | lock->slock = 1; |
@@ -319,8 +319,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
319 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 319 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
320 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 320 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
321 | 321 | ||
322 | #define _raw_spin_relax(lock) cpu_relax() | 322 | #define arch_spin_relax(lock) cpu_relax() |
323 | #define _raw_read_relax(lock) cpu_relax() | 323 | #define arch_read_relax(lock) cpu_relax() |
324 | #define _raw_write_relax(lock) cpu_relax() | 324 | #define arch_write_relax(lock) cpu_relax() |
325 | 325 | ||
326 | #endif /* _ASM_M32R_SPINLOCK_H */ | 326 | #endif /* _ASM_M32R_SPINLOCK_H */ |
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 0f16d0673b4a..95edebaaf22a 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
@@ -34,33 +34,33 @@ | |||
34 | * becomes equal to the the initial value of the tail. | 34 | * becomes equal to the the initial value of the tail. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | static inline int __raw_spin_is_locked(arch_spinlock_t *lock) | 37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned int counters = ACCESS_ONCE(lock->lock); | 39 | unsigned int counters = ACCESS_ONCE(lock->lock); |
40 | 40 | ||
41 | return ((counters >> 14) ^ counters) & 0x1fff; | 41 | return ((counters >> 14) ^ counters) & 0x1fff; |
42 | } | 42 | } |
43 | 43 | ||
44 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 44 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
45 | #define __raw_spin_unlock_wait(x) \ | 45 | #define arch_spin_unlock_wait(x) \ |
46 | while (__raw_spin_is_locked(x)) { cpu_relax(); } | 46 | while (arch_spin_is_locked(x)) { cpu_relax(); } |
47 | 47 | ||
48 | static inline int __raw_spin_is_contended(arch_spinlock_t *lock) | 48 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
49 | { | 49 | { |
50 | unsigned int counters = ACCESS_ONCE(lock->lock); | 50 | unsigned int counters = ACCESS_ONCE(lock->lock); |
51 | 51 | ||
52 | return (((counters >> 14) - counters) & 0x1fff) > 1; | 52 | return (((counters >> 14) - counters) & 0x1fff) > 1; |
53 | } | 53 | } |
54 | #define __raw_spin_is_contended __raw_spin_is_contended | 54 | #define arch_spin_is_contended arch_spin_is_contended |
55 | 55 | ||
56 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 56 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
57 | { | 57 | { |
58 | int my_ticket; | 58 | int my_ticket; |
59 | int tmp; | 59 | int tmp; |
60 | 60 | ||
61 | if (R10000_LLSC_WAR) { | 61 | if (R10000_LLSC_WAR) { |
62 | __asm__ __volatile__ ( | 62 | __asm__ __volatile__ ( |
63 | " .set push # __raw_spin_lock \n" | 63 | " .set push # arch_spin_lock \n" |
64 | " .set noreorder \n" | 64 | " .set noreorder \n" |
65 | " \n" | 65 | " \n" |
66 | "1: ll %[ticket], %[ticket_ptr] \n" | 66 | "1: ll %[ticket], %[ticket_ptr] \n" |
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
94 | [my_ticket] "=&r" (my_ticket)); | 94 | [my_ticket] "=&r" (my_ticket)); |
95 | } else { | 95 | } else { |
96 | __asm__ __volatile__ ( | 96 | __asm__ __volatile__ ( |
97 | " .set push # __raw_spin_lock \n" | 97 | " .set push # arch_spin_lock \n" |
98 | " .set noreorder \n" | 98 | " .set noreorder \n" |
99 | " \n" | 99 | " \n" |
100 | " ll %[ticket], %[ticket_ptr] \n" | 100 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
134 | smp_llsc_mb(); | 134 | smp_llsc_mb(); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 137 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
138 | { | 138 | { |
139 | int tmp; | 139 | int tmp; |
140 | 140 | ||
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
142 | 142 | ||
143 | if (R10000_LLSC_WAR) { | 143 | if (R10000_LLSC_WAR) { |
144 | __asm__ __volatile__ ( | 144 | __asm__ __volatile__ ( |
145 | " # __raw_spin_unlock \n" | 145 | " # arch_spin_unlock \n" |
146 | "1: ll %[ticket], %[ticket_ptr] \n" | 146 | "1: ll %[ticket], %[ticket_ptr] \n" |
147 | " addiu %[ticket], %[ticket], 1 \n" | 147 | " addiu %[ticket], %[ticket], 1 \n" |
148 | " ori %[ticket], %[ticket], 0x2000 \n" | 148 | " ori %[ticket], %[ticket], 0x2000 \n" |
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
153 | [ticket] "=&r" (tmp)); | 153 | [ticket] "=&r" (tmp)); |
154 | } else { | 154 | } else { |
155 | __asm__ __volatile__ ( | 155 | __asm__ __volatile__ ( |
156 | " .set push # __raw_spin_unlock \n" | 156 | " .set push # arch_spin_unlock \n" |
157 | " .set noreorder \n" | 157 | " .set noreorder \n" |
158 | " \n" | 158 | " \n" |
159 | " ll %[ticket], %[ticket_ptr] \n" | 159 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
174 | } | 174 | } |
175 | } | 175 | } |
176 | 176 | ||
177 | static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) | 177 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
178 | { | 178 | { |
179 | int tmp, tmp2, tmp3; | 179 | int tmp, tmp2, tmp3; |
180 | 180 | ||
181 | if (R10000_LLSC_WAR) { | 181 | if (R10000_LLSC_WAR) { |
182 | __asm__ __volatile__ ( | 182 | __asm__ __volatile__ ( |
183 | " .set push # __raw_spin_trylock \n" | 183 | " .set push # arch_spin_trylock \n" |
184 | " .set noreorder \n" | 184 | " .set noreorder \n" |
185 | " \n" | 185 | " \n" |
186 | "1: ll %[ticket], %[ticket_ptr] \n" | 186 | "1: ll %[ticket], %[ticket_ptr] \n" |
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) | |||
204 | [now_serving] "=&r" (tmp3)); | 204 | [now_serving] "=&r" (tmp3)); |
205 | } else { | 205 | } else { |
206 | __asm__ __volatile__ ( | 206 | __asm__ __volatile__ ( |
207 | " .set push # __raw_spin_trylock \n" | 207 | " .set push # arch_spin_trylock \n" |
208 | " .set noreorder \n" | 208 | " .set noreorder \n" |
209 | " \n" | 209 | " \n" |
210 | " ll %[ticket], %[ticket_ptr] \n" | 210 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -483,8 +483,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
485 | 485 | ||
486 | #define _raw_spin_relax(lock) cpu_relax() | 486 | #define arch_spin_relax(lock) cpu_relax() |
487 | #define _raw_read_relax(lock) cpu_relax() | 487 | #define arch_read_relax(lock) cpu_relax() |
488 | #define _raw_write_relax(lock) cpu_relax() | 488 | #define arch_write_relax(lock) cpu_relax() |
489 | 489 | ||
490 | #endif /* _ASM_SPINLOCK_H */ | 490 | #endif /* _ASM_SPINLOCK_H */ |
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 3a4ea778d4b6..716634d1f546 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -34,12 +34,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | |||
34 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 34 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
35 | arch_spinlock_t *s = ATOMIC_HASH(l); \ | 35 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
36 | local_irq_save(f); \ | 36 | local_irq_save(f); \ |
37 | __raw_spin_lock(s); \ | 37 | arch_spin_lock(s); \ |
38 | } while(0) | 38 | } while(0) |
39 | 39 | ||
40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
41 | arch_spinlock_t *s = ATOMIC_HASH(l); \ | 41 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
42 | __raw_spin_unlock(s); \ | 42 | arch_spin_unlock(s); \ |
43 | local_irq_restore(f); \ | 43 | local_irq_restore(f); \ |
44 | } while(0) | 44 | } while(0) |
45 | 45 | ||
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 69e8dca26744..235e7e386e2a 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -5,17 +5,17 @@ | |||
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/spinlock_types.h> | 6 | #include <asm/spinlock_types.h> |
7 | 7 | ||
8 | static inline int __raw_spin_is_locked(arch_spinlock_t *x) | 8 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
9 | { | 9 | { |
10 | volatile unsigned int *a = __ldcw_align(x); | 10 | volatile unsigned int *a = __ldcw_align(x); |
11 | return *a == 0; | 11 | return *a == 0; |
12 | } | 12 | } |
13 | 13 | ||
14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) | 14 | #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) |
15 | #define __raw_spin_unlock_wait(x) \ | 15 | #define arch_spin_unlock_wait(x) \ |
16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 16 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
17 | 17 | ||
18 | static inline void __raw_spin_lock_flags(arch_spinlock_t *x, | 18 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, |
19 | unsigned long flags) | 19 | unsigned long flags) |
20 | { | 20 | { |
21 | volatile unsigned int *a; | 21 | volatile unsigned int *a; |
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *x, | |||
33 | mb(); | 33 | mb(); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void __raw_spin_unlock(arch_spinlock_t *x) | 36 | static inline void arch_spin_unlock(arch_spinlock_t *x) |
37 | { | 37 | { |
38 | volatile unsigned int *a; | 38 | volatile unsigned int *a; |
39 | mb(); | 39 | mb(); |
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *x) | |||
42 | mb(); | 42 | mb(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline int __raw_spin_trylock(arch_spinlock_t *x) | 45 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
46 | { | 46 | { |
47 | volatile unsigned int *a; | 47 | volatile unsigned int *a; |
48 | int ret; | 48 | int ret; |
@@ -73,9 +73,9 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw) | |||
73 | { | 73 | { |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | local_irq_save(flags); | 75 | local_irq_save(flags); |
76 | __raw_spin_lock_flags(&rw->lock, flags); | 76 | arch_spin_lock_flags(&rw->lock, flags); |
77 | rw->counter++; | 77 | rw->counter++; |
78 | __raw_spin_unlock(&rw->lock); | 78 | arch_spin_unlock(&rw->lock); |
79 | local_irq_restore(flags); | 79 | local_irq_restore(flags); |
80 | } | 80 | } |
81 | 81 | ||
@@ -85,9 +85,9 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | |||
85 | { | 85 | { |
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | local_irq_save(flags); | 87 | local_irq_save(flags); |
88 | __raw_spin_lock_flags(&rw->lock, flags); | 88 | arch_spin_lock_flags(&rw->lock, flags); |
89 | rw->counter--; | 89 | rw->counter--; |
90 | __raw_spin_unlock(&rw->lock); | 90 | arch_spin_unlock(&rw->lock); |
91 | local_irq_restore(flags); | 91 | local_irq_restore(flags); |
92 | } | 92 | } |
93 | 93 | ||
@@ -98,9 +98,9 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
98 | unsigned long flags; | 98 | unsigned long flags; |
99 | retry: | 99 | retry: |
100 | local_irq_save(flags); | 100 | local_irq_save(flags); |
101 | if (__raw_spin_trylock(&rw->lock)) { | 101 | if (arch_spin_trylock(&rw->lock)) { |
102 | rw->counter++; | 102 | rw->counter++; |
103 | __raw_spin_unlock(&rw->lock); | 103 | arch_spin_unlock(&rw->lock); |
104 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
105 | return 1; | 105 | return 1; |
106 | } | 106 | } |
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | /* Wait until we have a realistic chance at the lock */ | 113 | /* Wait until we have a realistic chance at the lock */ |
114 | while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) | 114 | while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) |
115 | cpu_relax(); | 115 | cpu_relax(); |
116 | 116 | ||
117 | goto retry; | 117 | goto retry; |
@@ -124,10 +124,10 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw) | |||
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | retry: | 125 | retry: |
126 | local_irq_save(flags); | 126 | local_irq_save(flags); |
127 | __raw_spin_lock_flags(&rw->lock, flags); | 127 | arch_spin_lock_flags(&rw->lock, flags); |
128 | 128 | ||
129 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
130 | __raw_spin_unlock(&rw->lock); | 130 | arch_spin_unlock(&rw->lock); |
131 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
132 | 132 | ||
133 | while (rw->counter != 0) | 133 | while (rw->counter != 0) |
@@ -144,7 +144,7 @@ retry: | |||
144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
145 | { | 145 | { |
146 | rw->counter = 0; | 146 | rw->counter = 0; |
147 | __raw_spin_unlock(&rw->lock); | 147 | arch_spin_unlock(&rw->lock); |
148 | } | 148 | } |
149 | 149 | ||
150 | /* Note that we have to ensure interrupts are disabled in case we're | 150 | /* Note that we have to ensure interrupts are disabled in case we're |
@@ -155,13 +155,13 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | |||
155 | int result = 0; | 155 | int result = 0; |
156 | 156 | ||
157 | local_irq_save(flags); | 157 | local_irq_save(flags); |
158 | if (__raw_spin_trylock(&rw->lock)) { | 158 | if (arch_spin_trylock(&rw->lock)) { |
159 | if (rw->counter == 0) { | 159 | if (rw->counter == 0) { |
160 | rw->counter = -1; | 160 | rw->counter = -1; |
161 | result = 1; | 161 | result = 1; |
162 | } else { | 162 | } else { |
163 | /* Read-locked. Oh well. */ | 163 | /* Read-locked. Oh well. */ |
164 | __raw_spin_unlock(&rw->lock); | 164 | arch_spin_unlock(&rw->lock); |
165 | } | 165 | } |
166 | } | 166 | } |
167 | local_irq_restore(flags); | 167 | local_irq_restore(flags); |
@@ -190,8 +190,8 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | |||
190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
192 | 192 | ||
193 | #define _raw_spin_relax(lock) cpu_relax() | 193 | #define arch_spin_relax(lock) cpu_relax() |
194 | #define _raw_read_relax(lock) cpu_relax() | 194 | #define arch_read_relax(lock) cpu_relax() |
195 | #define _raw_write_relax(lock) cpu_relax() | 195 | #define arch_write_relax(lock) cpu_relax() |
196 | 196 | ||
197 | #endif /* __ASM_SPINLOCK_H */ | 197 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c0d44c92ff0e..cdcaf6b97087 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <asm/asm-compat.h> | 28 | #include <asm/asm-compat.h> |
29 | #include <asm/synch.h> | 29 | #include <asm/synch.h> |
30 | 30 | ||
31 | #define __raw_spin_is_locked(x) ((x)->slock != 0) | 31 | #define arch_spin_is_locked(x) ((x)->slock != 0) |
32 | 32 | ||
33 | #ifdef CONFIG_PPC64 | 33 | #ifdef CONFIG_PPC64 |
34 | /* use 0x800000yy when locked, where yy == CPU number */ | 34 | /* use 0x800000yy when locked, where yy == CPU number */ |
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) | 57 | static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) | |||
73 | return tmp; | 73 | return tmp; |
74 | } | 74 | } |
75 | 75 | ||
76 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 76 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return arch_spin_trylock(lock) == 0; | 79 | return __arch_spin_trylock(lock) == 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *lock); | |||
104 | #define SHARED_PROCESSOR 0 | 104 | #define SHARED_PROCESSOR 0 |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 107 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
111 | if (likely(arch_spin_trylock(lock) == 0)) | 111 | if (likely(__arch_spin_trylock(lock) == 0)) |
112 | break; | 112 | break; |
113 | do { | 113 | do { |
114 | HMT_low(); | 114 | HMT_low(); |
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
120 | } | 120 | } |
121 | 121 | ||
122 | static inline | 122 | static inline |
123 | void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | 123 | void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
124 | { | 124 | { |
125 | unsigned long flags_dis; | 125 | unsigned long flags_dis; |
126 | 126 | ||
127 | CLEAR_IO_SYNC; | 127 | CLEAR_IO_SYNC; |
128 | while (1) { | 128 | while (1) { |
129 | if (likely(arch_spin_trylock(lock) == 0)) | 129 | if (likely(__arch_spin_trylock(lock) == 0)) |
130 | break; | 130 | break; |
131 | local_save_flags(flags_dis); | 131 | local_save_flags(flags_dis); |
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |||
140 | } | 140 | } |
141 | } | 141 | } |
142 | 142 | ||
143 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 143 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
144 | { | 144 | { |
145 | SYNC_IO; | 145 | SYNC_IO; |
146 | __asm__ __volatile__("# __raw_spin_unlock\n\t" | 146 | __asm__ __volatile__("# arch_spin_unlock\n\t" |
147 | LWSYNC_ON_SMP: : :"memory"); | 147 | LWSYNC_ON_SMP: : :"memory"); |
148 | lock->slock = 0; | 148 | lock->slock = 0; |
149 | } | 149 | } |
150 | 150 | ||
151 | #ifdef CONFIG_PPC64 | 151 | #ifdef CONFIG_PPC64 |
152 | extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); | 152 | extern void arch_spin_unlock_wait(arch_spinlock_t *lock); |
153 | #else | 153 | #else |
154 | #define __raw_spin_unlock_wait(lock) \ | 154 | #define arch_spin_unlock_wait(lock) \ |
155 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 155 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
156 | #endif | 156 | #endif |
157 | 157 | ||
158 | /* | 158 | /* |
@@ -290,9 +290,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
290 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 290 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
291 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 291 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
292 | 292 | ||
293 | #define _raw_spin_relax(lock) __spin_yield(lock) | 293 | #define arch_spin_relax(lock) __spin_yield(lock) |
294 | #define _raw_read_relax(lock) __rw_yield(lock) | 294 | #define arch_read_relax(lock) __rw_yield(lock) |
295 | #define _raw_write_relax(lock) __rw_yield(lock) | 295 | #define arch_write_relax(lock) __rw_yield(lock) |
296 | 296 | ||
297 | #endif /* __KERNEL__ */ | 297 | #endif /* __KERNEL__ */ |
298 | #endif /* __ASM_SPINLOCK_H */ | 298 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 57dfa414cfb8..fd0d29493fd6 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void) | |||
80 | 80 | ||
81 | local_irq_save(flags); | 81 | local_irq_save(flags); |
82 | preempt_disable(); | 82 | preempt_disable(); |
83 | __raw_spin_lock_flags(&rtas.lock, flags); | 83 | arch_spin_lock_flags(&rtas.lock, flags); |
84 | return flags; | 84 | return flags; |
85 | } | 85 | } |
86 | 86 | ||
87 | static void unlock_rtas(unsigned long flags) | 87 | static void unlock_rtas(unsigned long flags) |
88 | { | 88 | { |
89 | __raw_spin_unlock(&rtas.lock); | 89 | arch_spin_unlock(&rtas.lock); |
90 | local_irq_restore(flags); | 90 | local_irq_restore(flags); |
91 | preempt_enable(); | 91 | preempt_enable(); |
92 | } | 92 | } |
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void) | |||
987 | 987 | ||
988 | local_irq_save(flags); | 988 | local_irq_save(flags); |
989 | hard_irq_disable(); | 989 | hard_irq_disable(); |
990 | __raw_spin_lock(&timebase_lock); | 990 | arch_spin_lock(&timebase_lock); |
991 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); | 991 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); |
992 | timebase = get_tb(); | 992 | timebase = get_tb(); |
993 | __raw_spin_unlock(&timebase_lock); | 993 | arch_spin_unlock(&timebase_lock); |
994 | 994 | ||
995 | while (timebase) | 995 | while (timebase) |
996 | barrier(); | 996 | barrier(); |
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void) | |||
1002 | { | 1002 | { |
1003 | while (!timebase) | 1003 | while (!timebase) |
1004 | barrier(); | 1004 | barrier(); |
1005 | __raw_spin_lock(&timebase_lock); | 1005 | arch_spin_lock(&timebase_lock); |
1006 | set_tb(timebase >> 32, timebase & 0xffffffff); | 1006 | set_tb(timebase >> 32, timebase & 0xffffffff); |
1007 | timebase = 0; | 1007 | timebase = 0; |
1008 | __raw_spin_unlock(&timebase_lock); | 1008 | arch_spin_unlock(&timebase_lock); |
1009 | } | 1009 | } |
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index b06294cde499..ee395e392115 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c | |||
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) | |||
82 | } | 82 | } |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | void __raw_spin_unlock_wait(arch_spinlock_t *lock) | 85 | void arch_spin_unlock_wait(arch_spinlock_t *lock) |
86 | { | 86 | { |
87 | while (lock->slock) { | 87 | while (lock->slock) { |
88 | HMT_low(); | 88 | HMT_low(); |
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(arch_spinlock_t *lock) | |||
92 | HMT_medium(); | 92 | HMT_medium(); |
93 | } | 93 | } |
94 | 94 | ||
95 | EXPORT_SYMBOL(__raw_spin_unlock_wait); | 95 | EXPORT_SYMBOL(arch_spin_unlock_wait); |
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index be36fece41d7..242f8095c2df 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c | |||
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void) | |||
80 | 80 | ||
81 | local_irq_save(flags); | 81 | local_irq_save(flags); |
82 | hard_irq_disable(); | 82 | hard_irq_disable(); |
83 | __raw_spin_lock(&timebase_lock); | 83 | arch_spin_lock(&timebase_lock); |
84 | mtspr(SPRN_TBCTL, TBCTL_FREEZE); | 84 | mtspr(SPRN_TBCTL, TBCTL_FREEZE); |
85 | isync(); | 85 | isync(); |
86 | timebase = get_tb(); | 86 | timebase = get_tb(); |
87 | __raw_spin_unlock(&timebase_lock); | 87 | arch_spin_unlock(&timebase_lock); |
88 | 88 | ||
89 | while (timebase) | 89 | while (timebase) |
90 | barrier(); | 90 | barrier(); |
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void) | |||
97 | while (!timebase) | 97 | while (!timebase) |
98 | smp_rmb(); | 98 | smp_rmb(); |
99 | 99 | ||
100 | __raw_spin_lock(&timebase_lock); | 100 | arch_spin_lock(&timebase_lock); |
101 | set_tb(timebase >> 32, timebase & 0xffffffff); | 101 | set_tb(timebase >> 32, timebase & 0xffffffff); |
102 | timebase = 0; | 102 | timebase = 0; |
103 | __raw_spin_unlock(&timebase_lock); | 103 | arch_spin_unlock(&timebase_lock); |
104 | } | 104 | } |
105 | 105 | ||
106 | struct smp_ops_t pas_smp_ops = { | 106 | struct smp_ops_t pas_smp_ops = { |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 6121fa4b83d9..a94c146657a9 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
52 | * (the type definitions are in asm/spinlock_types.h) | 52 | * (the type definitions are in asm/spinlock_types.h) |
53 | */ | 53 | */ |
54 | 54 | ||
55 | #define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) | 55 | #define arch_spin_is_locked(x) ((x)->owner_cpu != 0) |
56 | #define __raw_spin_unlock_wait(lock) \ | 56 | #define arch_spin_unlock_wait(lock) \ |
57 | do { while (__raw_spin_is_locked(lock)) \ | 57 | do { while (arch_spin_is_locked(lock)) \ |
58 | _raw_spin_relax(lock); } while (0) | 58 | arch_spin_relax(lock); } while (0) |
59 | 59 | ||
60 | extern void _raw_spin_lock_wait(arch_spinlock_t *); | 60 | extern void arch_spin_lock_wait(arch_spinlock_t *); |
61 | extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); | 61 | extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); |
62 | extern int _raw_spin_trylock_retry(arch_spinlock_t *); | 62 | extern int arch_spin_trylock_retry(arch_spinlock_t *); |
63 | extern void _raw_spin_relax(arch_spinlock_t *lock); | 63 | extern void arch_spin_relax(arch_spinlock_t *lock); |
64 | 64 | ||
65 | static inline void __raw_spin_lock(arch_spinlock_t *lp) | 65 | static inline void arch_spin_lock(arch_spinlock_t *lp) |
66 | { | 66 | { |
67 | int old; | 67 | int old; |
68 | 68 | ||
69 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 69 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); |
70 | if (likely(old == 0)) | 70 | if (likely(old == 0)) |
71 | return; | 71 | return; |
72 | _raw_spin_lock_wait(lp); | 72 | arch_spin_lock_wait(lp); |
73 | } | 73 | } |
74 | 74 | ||
75 | static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, | 75 | static inline void arch_spin_lock_flags(arch_spinlock_t *lp, |
76 | unsigned long flags) | 76 | unsigned long flags) |
77 | { | 77 | { |
78 | int old; | 78 | int old; |
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, | |||
80 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 80 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); |
81 | if (likely(old == 0)) | 81 | if (likely(old == 0)) |
82 | return; | 82 | return; |
83 | _raw_spin_lock_wait_flags(lp, flags); | 83 | arch_spin_lock_wait_flags(lp, flags); |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline int __raw_spin_trylock(arch_spinlock_t *lp) | 86 | static inline int arch_spin_trylock(arch_spinlock_t *lp) |
87 | { | 87 | { |
88 | int old; | 88 | int old; |
89 | 89 | ||
90 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 90 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); |
91 | if (likely(old == 0)) | 91 | if (likely(old == 0)) |
92 | return 1; | 92 | return 1; |
93 | return _raw_spin_trylock_retry(lp); | 93 | return arch_spin_trylock_retry(lp); |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline void __raw_spin_unlock(arch_spinlock_t *lp) | 96 | static inline void arch_spin_unlock(arch_spinlock_t *lp) |
97 | { | 97 | { |
98 | _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); | 98 | _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); |
99 | } | 99 | } |
@@ -188,7 +188,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
188 | return _raw_write_trylock_retry(rw); | 188 | return _raw_write_trylock_retry(rw); |
189 | } | 189 | } |
190 | 190 | ||
191 | #define _raw_read_relax(lock) cpu_relax() | 191 | #define arch_read_relax(lock) cpu_relax() |
192 | #define _raw_write_relax(lock) cpu_relax() | 192 | #define arch_write_relax(lock) cpu_relax() |
193 | 193 | ||
194 | #endif /* __ASM_SPINLOCK_H */ | 194 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index d4cbf71a6077..f4596452f072 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) | |||
39 | _raw_yield(); | 39 | _raw_yield(); |
40 | } | 40 | } |
41 | 41 | ||
42 | void _raw_spin_lock_wait(arch_spinlock_t *lp) | 42 | void arch_spin_lock_wait(arch_spinlock_t *lp) |
43 | { | 43 | { |
44 | int count = spin_retry; | 44 | int count = spin_retry; |
45 | unsigned int cpu = ~smp_processor_id(); | 45 | unsigned int cpu = ~smp_processor_id(); |
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(arch_spinlock_t *lp) | |||
51 | _raw_yield_cpu(~owner); | 51 | _raw_yield_cpu(~owner); |
52 | count = spin_retry; | 52 | count = spin_retry; |
53 | } | 53 | } |
54 | if (__raw_spin_is_locked(lp)) | 54 | if (arch_spin_is_locked(lp)) |
55 | continue; | 55 | continue; |
56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
57 | return; | 57 | return; |
58 | } | 58 | } |
59 | } | 59 | } |
60 | EXPORT_SYMBOL(_raw_spin_lock_wait); | 60 | EXPORT_SYMBOL(arch_spin_lock_wait); |
61 | 61 | ||
62 | void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | 62 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) |
63 | { | 63 | { |
64 | int count = spin_retry; | 64 | int count = spin_retry; |
65 | unsigned int cpu = ~smp_processor_id(); | 65 | unsigned int cpu = ~smp_processor_id(); |
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | |||
72 | _raw_yield_cpu(~owner); | 72 | _raw_yield_cpu(~owner); |
73 | count = spin_retry; | 73 | count = spin_retry; |
74 | } | 74 | } |
75 | if (__raw_spin_is_locked(lp)) | 75 | if (arch_spin_is_locked(lp)) |
76 | continue; | 76 | continue; |
77 | local_irq_disable(); | 77 | local_irq_disable(); |
78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
@@ -80,30 +80,30 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | |||
80 | local_irq_restore(flags); | 80 | local_irq_restore(flags); |
81 | } | 81 | } |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(_raw_spin_lock_wait_flags); | 83 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); |
84 | 84 | ||
85 | int _raw_spin_trylock_retry(arch_spinlock_t *lp) | 85 | int arch_spin_trylock_retry(arch_spinlock_t *lp) |
86 | { | 86 | { |
87 | unsigned int cpu = ~smp_processor_id(); | 87 | unsigned int cpu = ~smp_processor_id(); |
88 | int count; | 88 | int count; |
89 | 89 | ||
90 | for (count = spin_retry; count > 0; count--) { | 90 | for (count = spin_retry; count > 0; count--) { |
91 | if (__raw_spin_is_locked(lp)) | 91 | if (arch_spin_is_locked(lp)) |
92 | continue; | 92 | continue; |
93 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 93 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
94 | return 1; | 94 | return 1; |
95 | } | 95 | } |
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
98 | EXPORT_SYMBOL(_raw_spin_trylock_retry); | 98 | EXPORT_SYMBOL(arch_spin_trylock_retry); |
99 | 99 | ||
100 | void _raw_spin_relax(arch_spinlock_t *lock) | 100 | void arch_spin_relax(arch_spinlock_t *lock) |
101 | { | 101 | { |
102 | unsigned int cpu = lock->owner_cpu; | 102 | unsigned int cpu = lock->owner_cpu; |
103 | if (cpu != 0) | 103 | if (cpu != 0) |
104 | _raw_yield_cpu(~cpu); | 104 | _raw_yield_cpu(~cpu); |
105 | } | 105 | } |
106 | EXPORT_SYMBOL(_raw_spin_relax); | 106 | EXPORT_SYMBOL(arch_spin_relax); |
107 | 107 | ||
108 | void _raw_read_lock_wait(raw_rwlock_t *rw) | 108 | void _raw_read_lock_wait(raw_rwlock_t *rw) |
109 | { | 109 | { |
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index 5a05b3fcefbe..da1c6491ed4b 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h | |||
@@ -23,10 +23,10 @@ | |||
23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define __raw_spin_is_locked(x) ((x)->lock <= 0) | 26 | #define arch_spin_is_locked(x) ((x)->lock <= 0) |
27 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 27 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
28 | #define __raw_spin_unlock_wait(x) \ | 28 | #define arch_spin_unlock_wait(x) \ |
29 | do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) | 29 | do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Simple spin lock operations. There are two variants, one clears IRQ's | 32 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -34,14 +34,14 @@ | |||
34 | * | 34 | * |
35 | * We make no fairness assumptions. They have a cost. | 35 | * We make no fairness assumptions. They have a cost. |
36 | */ | 36 | */ |
37 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 37 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned long tmp; | 39 | unsigned long tmp; |
40 | unsigned long oldval; | 40 | unsigned long oldval; |
41 | 41 | ||
42 | __asm__ __volatile__ ( | 42 | __asm__ __volatile__ ( |
43 | "1: \n\t" | 43 | "1: \n\t" |
44 | "movli.l @%2, %0 ! __raw_spin_lock \n\t" | 44 | "movli.l @%2, %0 ! arch_spin_lock \n\t" |
45 | "mov %0, %1 \n\t" | 45 | "mov %0, %1 \n\t" |
46 | "mov #0, %0 \n\t" | 46 | "mov #0, %0 \n\t" |
47 | "movco.l %0, @%2 \n\t" | 47 | "movco.l %0, @%2 \n\t" |
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
54 | ); | 54 | ); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 57 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp; | 59 | unsigned long tmp; |
60 | 60 | ||
61 | __asm__ __volatile__ ( | 61 | __asm__ __volatile__ ( |
62 | "mov #1, %0 ! __raw_spin_unlock \n\t" | 62 | "mov #1, %0 ! arch_spin_unlock \n\t" |
63 | "mov.l %0, @%1 \n\t" | 63 | "mov.l %0, @%1 \n\t" |
64 | : "=&z" (tmp) | 64 | : "=&z" (tmp) |
65 | : "r" (&lock->lock) | 65 | : "r" (&lock->lock) |
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
67 | ); | 67 | ); |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 70 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned long tmp, oldval; | 72 | unsigned long tmp, oldval; |
73 | 73 | ||
74 | __asm__ __volatile__ ( | 74 | __asm__ __volatile__ ( |
75 | "1: \n\t" | 75 | "1: \n\t" |
76 | "movli.l @%2, %0 ! __raw_spin_trylock \n\t" | 76 | "movli.l @%2, %0 ! arch_spin_trylock \n\t" |
77 | "mov %0, %1 \n\t" | 77 | "mov %0, %1 \n\t" |
78 | "mov #0, %0 \n\t" | 78 | "mov #0, %0 \n\t" |
79 | "movco.l %0, @%2 \n\t" | 79 | "movco.l %0, @%2 \n\t" |
@@ -219,8 +219,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
219 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 219 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
220 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 220 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
221 | 221 | ||
222 | #define _raw_spin_relax(lock) cpu_relax() | 222 | #define arch_spin_relax(lock) cpu_relax() |
223 | #define _raw_read_relax(lock) cpu_relax() | 223 | #define arch_read_relax(lock) cpu_relax() |
224 | #define _raw_write_relax(lock) cpu_relax() | 224 | #define arch_write_relax(lock) cpu_relax() |
225 | 225 | ||
226 | #endif /* __ASM_SH_SPINLOCK_H */ | 226 | #endif /* __ASM_SH_SPINLOCK_H */ |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index b2d8a67f727e..9b0f2f53c81c 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -10,12 +10,12 @@ | |||
10 | 10 | ||
11 | #include <asm/psr.h> | 11 | #include <asm/psr.h> |
12 | 12 | ||
13 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | 13 | #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
14 | 14 | ||
15 | #define __raw_spin_unlock_wait(lock) \ | 15 | #define arch_spin_unlock_wait(lock) \ |
16 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 16 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
17 | 17 | ||
18 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 18 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
19 | { | 19 | { |
20 | __asm__ __volatile__( | 20 | __asm__ __volatile__( |
21 | "\n1:\n\t" | 21 | "\n1:\n\t" |
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
35 | : "g2", "memory", "cc"); | 35 | : "g2", "memory", "cc"); |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 38 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
39 | { | 39 | { |
40 | unsigned int result; | 40 | unsigned int result; |
41 | __asm__ __volatile__("ldstub [%1], %0" | 41 | __asm__ __volatile__("ldstub [%1], %0" |
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) | |||
45 | return (result == 0); | 45 | return (result == 0); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 48 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
49 | { | 49 | { |
50 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 50 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); |
51 | } | 51 | } |
@@ -176,13 +176,13 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) | |||
176 | 176 | ||
177 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 177 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
178 | 178 | ||
179 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 179 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
180 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 180 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) |
181 | #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) | 181 | #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) |
182 | 182 | ||
183 | #define _raw_spin_relax(lock) cpu_relax() | 183 | #define arch_spin_relax(lock) cpu_relax() |
184 | #define _raw_read_relax(lock) cpu_relax() | 184 | #define arch_read_relax(lock) cpu_relax() |
185 | #define _raw_write_relax(lock) cpu_relax() | 185 | #define arch_write_relax(lock) cpu_relax() |
186 | 186 | ||
187 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) | 187 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) |
188 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 188 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 38e16c40efc4..7cf58a2fcda4 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -21,13 +21,13 @@ | |||
21 | * the spinner sections must be pre-V9 branches. | 21 | * the spinner sections must be pre-V9 branches. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) | 24 | #define arch_spin_is_locked(lp) ((lp)->lock != 0) |
25 | 25 | ||
26 | #define __raw_spin_unlock_wait(lp) \ | 26 | #define arch_spin_unlock_wait(lp) \ |
27 | do { rmb(); \ | 27 | do { rmb(); \ |
28 | } while((lp)->lock) | 28 | } while((lp)->lock) |
29 | 29 | ||
30 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 30 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
31 | { | 31 | { |
32 | unsigned long tmp; | 32 | unsigned long tmp; |
33 | 33 | ||
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) | |||
46 | : "memory"); | 46 | : "memory"); |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 49 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
50 | { | 50 | { |
51 | unsigned long result; | 51 | unsigned long result; |
52 | 52 | ||
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) | |||
59 | return (result == 0UL); | 59 | return (result == 0UL); |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 62 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
63 | { | 63 | { |
64 | __asm__ __volatile__( | 64 | __asm__ __volatile__( |
65 | " stb %%g0, [%0]" | 65 | " stb %%g0, [%0]" |
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
68 | : "memory"); | 68 | : "memory"); |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | 71 | static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
72 | { | 72 | { |
73 | unsigned long tmp1, tmp2; | 73 | unsigned long tmp1, tmp2; |
74 | 74 | ||
@@ -222,9 +222,9 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) | |||
222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
223 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 223 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
224 | 224 | ||
225 | #define _raw_spin_relax(lock) cpu_relax() | 225 | #define arch_spin_relax(lock) cpu_relax() |
226 | #define _raw_read_relax(lock) cpu_relax() | 226 | #define arch_read_relax(lock) cpu_relax() |
227 | #define _raw_write_relax(lock) cpu_relax() | 227 | #define arch_write_relax(lock) cpu_relax() |
228 | 228 | ||
229 | #endif /* !(__ASSEMBLY__) */ | 229 | #endif /* !(__ASSEMBLY__) */ |
230 | 230 | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5655f75f10b7..dd59a85a918f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
731 | 731 | ||
732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
733 | 733 | ||
734 | static inline int __raw_spin_is_locked(struct arch_spinlock *lock) | 734 | static inline int arch_spin_is_locked(struct arch_spinlock *lock) |
735 | { | 735 | { |
736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); | 736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); |
737 | } | 737 | } |
738 | 738 | ||
739 | static inline int __raw_spin_is_contended(struct arch_spinlock *lock) | 739 | static inline int arch_spin_is_contended(struct arch_spinlock *lock) |
740 | { | 740 | { |
741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); | 741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); |
742 | } | 742 | } |
743 | #define __raw_spin_is_contended __raw_spin_is_contended | 743 | #define arch_spin_is_contended arch_spin_is_contended |
744 | 744 | ||
745 | static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) | 745 | static __always_inline void arch_spin_lock(struct arch_spinlock *lock) |
746 | { | 746 | { |
747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); | 747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); |
748 | } | 748 | } |
749 | 749 | ||
750 | static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, | 750 | static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, |
751 | unsigned long flags) | 751 | unsigned long flags) |
752 | { | 752 | { |
753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); | 753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); |
754 | } | 754 | } |
755 | 755 | ||
756 | static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) | 756 | static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) |
757 | { | 757 | { |
758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); | 758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); |
759 | } | 759 | } |
760 | 760 | ||
761 | static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) | 761 | static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) |
762 | { | 762 | { |
763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); | 763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); |
764 | } | 764 | } |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 204b524fcf57..ab9055fd57d9 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) | |||
174 | 174 | ||
175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS | 175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
176 | 176 | ||
177 | static inline int __raw_spin_is_locked(arch_spinlock_t *lock) | 177 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
178 | { | 178 | { |
179 | return __ticket_spin_is_locked(lock); | 179 | return __ticket_spin_is_locked(lock); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline int __raw_spin_is_contended(arch_spinlock_t *lock) | 182 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
183 | { | 183 | { |
184 | return __ticket_spin_is_contended(lock); | 184 | return __ticket_spin_is_contended(lock); |
185 | } | 185 | } |
186 | #define __raw_spin_is_contended __raw_spin_is_contended | 186 | #define arch_spin_is_contended arch_spin_is_contended |
187 | 187 | ||
188 | static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) | 188 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
189 | { | 189 | { |
190 | __ticket_spin_lock(lock); | 190 | __ticket_spin_lock(lock); |
191 | } | 191 | } |
192 | 192 | ||
193 | static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) | 193 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
194 | { | 194 | { |
195 | return __ticket_spin_trylock(lock); | 195 | return __ticket_spin_trylock(lock); |
196 | } | 196 | } |
197 | 197 | ||
198 | static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) | 198 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
199 | { | 199 | { |
200 | __ticket_spin_unlock(lock); | 200 | __ticket_spin_unlock(lock); |
201 | } | 201 | } |
202 | 202 | ||
203 | static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, | 203 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
204 | unsigned long flags) | 204 | unsigned long flags) |
205 | { | 205 | { |
206 | __raw_spin_lock(lock); | 206 | arch_spin_lock(lock); |
207 | } | 207 | } |
208 | 208 | ||
209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ | 209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
210 | 210 | ||
211 | static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) | 211 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
212 | { | 212 | { |
213 | while (__raw_spin_is_locked(lock)) | 213 | while (arch_spin_is_locked(lock)) |
214 | cpu_relax(); | 214 | cpu_relax(); |
215 | } | 215 | } |
216 | 216 | ||
@@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
300 | 300 | ||
301 | #define _raw_spin_relax(lock) cpu_relax() | 301 | #define arch_spin_relax(lock) cpu_relax() |
302 | #define _raw_read_relax(lock) cpu_relax() | 302 | #define arch_read_relax(lock) cpu_relax() |
303 | #define _raw_write_relax(lock) cpu_relax() | 303 | #define arch_write_relax(lock) cpu_relax() |
304 | 304 | ||
305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | 305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ |
306 | static inline void smp_mb__after_lock(void) { } | 306 | static inline void smp_mb__after_lock(void) { } |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 5b75afac8a38..0a0aa1cec8f1 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void) | |||
207 | /* racy, but better than risking deadlock. */ | 207 | /* racy, but better than risking deadlock. */ |
208 | raw_local_irq_save(flags); | 208 | raw_local_irq_save(flags); |
209 | cpu = smp_processor_id(); | 209 | cpu = smp_processor_id(); |
210 | if (!__raw_spin_trylock(&die_lock)) { | 210 | if (!arch_spin_trylock(&die_lock)) { |
211 | if (cpu == die_owner) | 211 | if (cpu == die_owner) |
212 | /* nested oops. should stop eventually */; | 212 | /* nested oops. should stop eventually */; |
213 | else | 213 | else |
214 | __raw_spin_lock(&die_lock); | 214 | arch_spin_lock(&die_lock); |
215 | } | 215 | } |
216 | die_nest_count++; | 216 | die_nest_count++; |
217 | die_owner = cpu; | 217 | die_owner = cpu; |
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |||
231 | die_nest_count--; | 231 | die_nest_count--; |
232 | if (!die_nest_count) | 232 | if (!die_nest_count) |
233 | /* Nest count reaches zero, release the lock. */ | 233 | /* Nest count reaches zero, release the lock. */ |
234 | __raw_spin_unlock(&die_lock); | 234 | arch_spin_unlock(&die_lock); |
235 | raw_local_irq_restore(flags); | 235 | raw_local_irq_restore(flags); |
236 | oops_exit(); | 236 | oops_exit(); |
237 | 237 | ||
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index a0f39e090684..676b8c77a976 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c | |||
@@ -10,7 +10,7 @@ | |||
10 | static inline void | 10 | static inline void |
11 | default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | 11 | default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
12 | { | 12 | { |
13 | __raw_spin_lock(lock); | 13 | arch_spin_lock(lock); |
14 | } | 14 | } |
15 | 15 | ||
16 | struct pv_lock_ops pv_lock_ops = { | 16 | struct pv_lock_ops pv_lock_ops = { |
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index f1714697a09a..0aa5fed8b9e6 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void) | |||
62 | * previous TSC that was measured (possibly on | 62 | * previous TSC that was measured (possibly on |
63 | * another CPU) and update the previous TSC timestamp. | 63 | * another CPU) and update the previous TSC timestamp. |
64 | */ | 64 | */ |
65 | __raw_spin_lock(&sync_lock); | 65 | arch_spin_lock(&sync_lock); |
66 | prev = last_tsc; | 66 | prev = last_tsc; |
67 | rdtsc_barrier(); | 67 | rdtsc_barrier(); |
68 | now = get_cycles(); | 68 | now = get_cycles(); |
69 | rdtsc_barrier(); | 69 | rdtsc_barrier(); |
70 | last_tsc = now; | 70 | last_tsc = now; |
71 | __raw_spin_unlock(&sync_lock); | 71 | arch_spin_unlock(&sync_lock); |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * Be nice every now and then (and also check whether | 74 | * Be nice every now and then (and also check whether |
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void) | |||
87 | * we saw a time-warp of the TSC going backwards: | 87 | * we saw a time-warp of the TSC going backwards: |
88 | */ | 88 | */ |
89 | if (unlikely(prev > now)) { | 89 | if (unlikely(prev > now)) { |
90 | __raw_spin_lock(&sync_lock); | 90 | arch_spin_lock(&sync_lock); |
91 | max_warp = max(max_warp, prev - now); | 91 | max_warp = max(max_warp, prev - now); |
92 | nr_warps++; | 92 | nr_warps++; |
93 | __raw_spin_unlock(&sync_lock); | 93 | arch_spin_unlock(&sync_lock); |
94 | } | 94 | } |
95 | } | 95 | } |
96 | WARN(!(now-start), | 96 | WARN(!(now-start), |