diff options
Diffstat (limited to 'arch/sh/include/asm/spinlock.h')
-rw-r--r-- | arch/sh/include/asm/spinlock.h | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0053fd..bdc0f3b6c56a 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h | |||
@@ -23,10 +23,10 @@ | |||
23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define __raw_spin_is_locked(x) ((x)->lock <= 0) | 26 | #define arch_spin_is_locked(x) ((x)->lock <= 0) |
27 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 27 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
28 | #define __raw_spin_unlock_wait(x) \ | 28 | #define arch_spin_unlock_wait(x) \ |
29 | do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) | 29 | do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Simple spin lock operations. There are two variants, one clears IRQ's | 32 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -34,14 +34,14 @@ | |||
34 | * | 34 | * |
35 | * We make no fairness assumptions. They have a cost. | 35 | * We make no fairness assumptions. They have a cost. |
36 | */ | 36 | */ |
37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 37 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned long tmp; | 39 | unsigned long tmp; |
40 | unsigned long oldval; | 40 | unsigned long oldval; |
41 | 41 | ||
42 | __asm__ __volatile__ ( | 42 | __asm__ __volatile__ ( |
43 | "1: \n\t" | 43 | "1: \n\t" |
44 | "movli.l @%2, %0 ! __raw_spin_lock \n\t" | 44 | "movli.l @%2, %0 ! arch_spin_lock \n\t" |
45 | "mov %0, %1 \n\t" | 45 | "mov %0, %1 \n\t" |
46 | "mov #0, %0 \n\t" | 46 | "mov #0, %0 \n\t" |
47 | "movco.l %0, @%2 \n\t" | 47 | "movco.l %0, @%2 \n\t" |
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
54 | ); | 54 | ); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 57 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp; | 59 | unsigned long tmp; |
60 | 60 | ||
61 | __asm__ __volatile__ ( | 61 | __asm__ __volatile__ ( |
62 | "mov #1, %0 ! __raw_spin_unlock \n\t" | 62 | "mov #1, %0 ! arch_spin_unlock \n\t" |
63 | "mov.l %0, @%1 \n\t" | 63 | "mov.l %0, @%1 \n\t" |
64 | : "=&z" (tmp) | 64 | : "=&z" (tmp) |
65 | : "r" (&lock->lock) | 65 | : "r" (&lock->lock) |
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
67 | ); | 67 | ); |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 70 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned long tmp, oldval; | 72 | unsigned long tmp, oldval; |
73 | 73 | ||
74 | __asm__ __volatile__ ( | 74 | __asm__ __volatile__ ( |
75 | "1: \n\t" | 75 | "1: \n\t" |
76 | "movli.l @%2, %0 ! __raw_spin_trylock \n\t" | 76 | "movli.l @%2, %0 ! arch_spin_trylock \n\t" |
77 | "mov %0, %1 \n\t" | 77 | "mov %0, %1 \n\t" |
78 | "mov #0, %0 \n\t" | 78 | "mov #0, %0 \n\t" |
79 | "movco.l %0, @%2 \n\t" | 79 | "movco.l %0, @%2 \n\t" |
@@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
100 | * read_can_lock - would read_trylock() succeed? | 100 | * read_can_lock - would read_trylock() succeed? |
101 | * @lock: the rwlock in question. | 101 | * @lock: the rwlock in question. |
102 | */ | 102 | */ |
103 | #define __raw_read_can_lock(x) ((x)->lock > 0) | 103 | #define arch_read_can_lock(x) ((x)->lock > 0) |
104 | 104 | ||
105 | /** | 105 | /** |
106 | * write_can_lock - would write_trylock() succeed? | 106 | * write_can_lock - would write_trylock() succeed? |
107 | * @lock: the rwlock in question. | 107 | * @lock: the rwlock in question. |
108 | */ | 108 | */ |
109 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 109 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
110 | 110 | ||
111 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 111 | static inline void arch_read_lock(arch_rwlock_t *rw) |
112 | { | 112 | { |
113 | unsigned long tmp; | 113 | unsigned long tmp; |
114 | 114 | ||
115 | __asm__ __volatile__ ( | 115 | __asm__ __volatile__ ( |
116 | "1: \n\t" | 116 | "1: \n\t" |
117 | "movli.l @%1, %0 ! __raw_read_lock \n\t" | 117 | "movli.l @%1, %0 ! arch_read_lock \n\t" |
118 | "cmp/pl %0 \n\t" | 118 | "cmp/pl %0 \n\t" |
119 | "bf 1b \n\t" | 119 | "bf 1b \n\t" |
120 | "add #-1, %0 \n\t" | 120 | "add #-1, %0 \n\t" |
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
126 | ); | 126 | ); |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 129 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
130 | { | 130 | { |
131 | unsigned long tmp; | 131 | unsigned long tmp; |
132 | 132 | ||
133 | __asm__ __volatile__ ( | 133 | __asm__ __volatile__ ( |
134 | "1: \n\t" | 134 | "1: \n\t" |
135 | "movli.l @%1, %0 ! __raw_read_unlock \n\t" | 135 | "movli.l @%1, %0 ! arch_read_unlock \n\t" |
136 | "add #1, %0 \n\t" | 136 | "add #1, %0 \n\t" |
137 | "movco.l %0, @%1 \n\t" | 137 | "movco.l %0, @%1 \n\t" |
138 | "bf 1b \n\t" | 138 | "bf 1b \n\t" |
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
142 | ); | 142 | ); |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 145 | static inline void arch_write_lock(arch_rwlock_t *rw) |
146 | { | 146 | { |
147 | unsigned long tmp; | 147 | unsigned long tmp; |
148 | 148 | ||
149 | __asm__ __volatile__ ( | 149 | __asm__ __volatile__ ( |
150 | "1: \n\t" | 150 | "1: \n\t" |
151 | "movli.l @%1, %0 ! __raw_write_lock \n\t" | 151 | "movli.l @%1, %0 ! arch_write_lock \n\t" |
152 | "cmp/hs %2, %0 \n\t" | 152 | "cmp/hs %2, %0 \n\t" |
153 | "bf 1b \n\t" | 153 | "bf 1b \n\t" |
154 | "sub %2, %0 \n\t" | 154 | "sub %2, %0 \n\t" |
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
160 | ); | 160 | ); |
161 | } | 161 | } |
162 | 162 | ||
163 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 163 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
164 | { | 164 | { |
165 | __asm__ __volatile__ ( | 165 | __asm__ __volatile__ ( |
166 | "mov.l %1, @%0 ! __raw_write_unlock \n\t" | 166 | "mov.l %1, @%0 ! arch_write_unlock \n\t" |
167 | : | 167 | : |
168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | 168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) |
169 | : "t", "memory" | 169 | : "t", "memory" |
170 | ); | 170 | ); |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 173 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
174 | { | 174 | { |
175 | unsigned long tmp, oldval; | 175 | unsigned long tmp, oldval; |
176 | 176 | ||
177 | __asm__ __volatile__ ( | 177 | __asm__ __volatile__ ( |
178 | "1: \n\t" | 178 | "1: \n\t" |
179 | "movli.l @%2, %0 ! __raw_read_trylock \n\t" | 179 | "movli.l @%2, %0 ! arch_read_trylock \n\t" |
180 | "mov %0, %1 \n\t" | 180 | "mov %0, %1 \n\t" |
181 | "cmp/pl %0 \n\t" | 181 | "cmp/pl %0 \n\t" |
182 | "bf 2f \n\t" | 182 | "bf 2f \n\t" |
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
193 | return (oldval > 0); | 193 | return (oldval > 0); |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 196 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
197 | { | 197 | { |
198 | unsigned long tmp, oldval; | 198 | unsigned long tmp, oldval; |
199 | 199 | ||
200 | __asm__ __volatile__ ( | 200 | __asm__ __volatile__ ( |
201 | "1: \n\t" | 201 | "1: \n\t" |
202 | "movli.l @%2, %0 ! __raw_write_trylock \n\t" | 202 | "movli.l @%2, %0 ! arch_write_trylock \n\t" |
203 | "mov %0, %1 \n\t" | 203 | "mov %0, %1 \n\t" |
204 | "cmp/hs %3, %0 \n\t" | 204 | "cmp/hs %3, %0 \n\t" |
205 | "bf 2f \n\t" | 205 | "bf 2f \n\t" |
@@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
216 | return (oldval > (RW_LOCK_BIAS - 1)); | 216 | return (oldval > (RW_LOCK_BIAS - 1)); |
217 | } | 217 | } |
218 | 218 | ||
219 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 219 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
220 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 220 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
221 | 221 | ||
222 | #define _raw_spin_relax(lock) cpu_relax() | 222 | #define arch_spin_relax(lock) cpu_relax() |
223 | #define _raw_read_relax(lock) cpu_relax() | 223 | #define arch_read_relax(lock) cpu_relax() |
224 | #define _raw_write_relax(lock) cpu_relax() | 224 | #define arch_write_relax(lock) cpu_relax() |
225 | 225 | ||
226 | #endif /* __ASM_SH_SPINLOCK_H */ | 226 | #endif /* __ASM_SH_SPINLOCK_H */ |