diff options
Diffstat (limited to 'arch/m32r/include/asm/spinlock.h')
-rw-r--r-- | arch/m32r/include/asm/spinlock.h | 308 |
1 files changed, 0 insertions, 308 deletions
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h deleted file mode 100644 index 0189f410f8f5..000000000000 --- a/arch/m32r/include/asm/spinlock.h +++ /dev/null | |||
@@ -1,308 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _ASM_M32R_SPINLOCK_H | ||
3 | #define _ASM_M32R_SPINLOCK_H | ||
4 | |||
5 | /* | ||
6 | * linux/include/asm-m32r/spinlock.h | ||
7 | * | ||
8 | * M32R version: | ||
9 | * Copyright (C) 2001, 2002 Hitoshi Yamamoto | ||
10 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | ||
11 | */ | ||
12 | |||
13 | #include <linux/compiler.h> | ||
14 | #include <linux/atomic.h> | ||
15 | #include <asm/dcache_clear.h> | ||
16 | #include <asm/page.h> | ||
17 | #include <asm/barrier.h> | ||
18 | #include <asm/processor.h> | ||
19 | |||
20 | /* | ||
21 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
22 | * | ||
23 | * (the type definitions are in asm/spinlock_types.h) | ||
24 | * | ||
25 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
26 | * on the local processor, one does not. | ||
27 | * | ||
28 | * We make no fairness assumptions. They have a cost. | ||
29 | */ | ||
30 | |||
31 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | ||
32 | |||
33 | /** | ||
34 | * arch_spin_trylock - Try spin lock and return a result | ||
35 | * @lock: Pointer to the lock variable | ||
36 | * | ||
37 | * arch_spin_trylock() tries to get the lock and returns a result. | ||
38 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | ||
39 | */ | ||
40 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
41 | { | ||
42 | int oldval; | ||
43 | unsigned long tmp1, tmp2; | ||
44 | |||
45 | /* | ||
46 | * lock->slock : =1 : unlock | ||
47 | * : <=0 : lock | ||
48 | * { | ||
49 | * oldval = lock->slock; <--+ need atomic operation | ||
50 | * lock->slock = 0; <--+ | ||
51 | * } | ||
52 | */ | ||
53 | __asm__ __volatile__ ( | ||
54 | "# arch_spin_trylock \n\t" | ||
55 | "ldi %1, #0; \n\t" | ||
56 | "mvfc %2, psw; \n\t" | ||
57 | "clrpsw #0x40 -> nop; \n\t" | ||
58 | DCACHE_CLEAR("%0", "r6", "%3") | ||
59 | "lock %0, @%3; \n\t" | ||
60 | "unlock %1, @%3; \n\t" | ||
61 | "mvtc %2, psw; \n\t" | ||
62 | : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2) | ||
63 | : "r" (&lock->slock) | ||
64 | : "memory" | ||
65 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
66 | , "r6" | ||
67 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
68 | ); | ||
69 | |||
70 | return (oldval > 0); | ||
71 | } | ||
72 | |||
73 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
74 | { | ||
75 | unsigned long tmp0, tmp1; | ||
76 | |||
77 | /* | ||
78 | * lock->slock : =1 : unlock | ||
79 | * : <=0 : lock | ||
80 | * | ||
81 | * for ( ; ; ) { | ||
82 | * lock->slock -= 1; <-- need atomic operation | ||
83 | * if (lock->slock == 0) break; | ||
84 | * for ( ; lock->slock <= 0 ; ); | ||
85 | * } | ||
86 | */ | ||
87 | __asm__ __volatile__ ( | ||
88 | "# arch_spin_lock \n\t" | ||
89 | ".fillinsn \n" | ||
90 | "1: \n\t" | ||
91 | "mvfc %1, psw; \n\t" | ||
92 | "clrpsw #0x40 -> nop; \n\t" | ||
93 | DCACHE_CLEAR("%0", "r6", "%2") | ||
94 | "lock %0, @%2; \n\t" | ||
95 | "addi %0, #-1; \n\t" | ||
96 | "unlock %0, @%2; \n\t" | ||
97 | "mvtc %1, psw; \n\t" | ||
98 | "bltz %0, 2f; \n\t" | ||
99 | LOCK_SECTION_START(".balign 4 \n\t") | ||
100 | ".fillinsn \n" | ||
101 | "2: \n\t" | ||
102 | "ld %0, @%2; \n\t" | ||
103 | "bgtz %0, 1b; \n\t" | ||
104 | "bra 2b; \n\t" | ||
105 | LOCK_SECTION_END | ||
106 | : "=&r" (tmp0), "=&r" (tmp1) | ||
107 | : "r" (&lock->slock) | ||
108 | : "memory" | ||
109 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
110 | , "r6" | ||
111 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
112 | ); | ||
113 | } | ||
114 | |||
115 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
116 | { | ||
117 | mb(); | ||
118 | lock->slock = 1; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Read-write spinlocks, allowing multiple readers | ||
123 | * but only one writer. | ||
124 | * | ||
125 | * NOTE! it is quite common to have readers in interrupts | ||
126 | * but no interrupt writers. For those circumstances we | ||
127 | * can "mix" irq-safe locks - any writer needs to get a | ||
128 | * irq-safe write-lock, but readers can get non-irqsafe | ||
129 | * read-locks. | ||
130 | * | ||
131 | * On x86, we implement read-write locks as a 32-bit counter | ||
132 | * with the high bit (sign) being the "contended" bit. | ||
133 | * | ||
134 | * The inline assembly is non-obvious. Think about it. | ||
135 | * | ||
136 | * Changed to use the same technique as rw semaphores. See | ||
137 | * semaphore.h for details. -ben | ||
138 | */ | ||
139 | |||
140 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
141 | { | ||
142 | unsigned long tmp0, tmp1; | ||
143 | |||
144 | /* | ||
145 | * rw->lock : >0 : unlock | ||
146 | * : <=0 : lock | ||
147 | * | ||
148 | * for ( ; ; ) { | ||
149 | * rw->lock -= 1; <-- need atomic operation | ||
150 | * if (rw->lock >= 0) break; | ||
151 | * rw->lock += 1; <-- need atomic operation | ||
152 | * for ( ; rw->lock <= 0 ; ); | ||
153 | * } | ||
154 | */ | ||
155 | __asm__ __volatile__ ( | ||
156 | "# read_lock \n\t" | ||
157 | ".fillinsn \n" | ||
158 | "1: \n\t" | ||
159 | "mvfc %1, psw; \n\t" | ||
160 | "clrpsw #0x40 -> nop; \n\t" | ||
161 | DCACHE_CLEAR("%0", "r6", "%2") | ||
162 | "lock %0, @%2; \n\t" | ||
163 | "addi %0, #-1; \n\t" | ||
164 | "unlock %0, @%2; \n\t" | ||
165 | "mvtc %1, psw; \n\t" | ||
166 | "bltz %0, 2f; \n\t" | ||
167 | LOCK_SECTION_START(".balign 4 \n\t") | ||
168 | ".fillinsn \n" | ||
169 | "2: \n\t" | ||
170 | "clrpsw #0x40 -> nop; \n\t" | ||
171 | DCACHE_CLEAR("%0", "r6", "%2") | ||
172 | "lock %0, @%2; \n\t" | ||
173 | "addi %0, #1; \n\t" | ||
174 | "unlock %0, @%2; \n\t" | ||
175 | "mvtc %1, psw; \n\t" | ||
176 | ".fillinsn \n" | ||
177 | "3: \n\t" | ||
178 | "ld %0, @%2; \n\t" | ||
179 | "bgtz %0, 1b; \n\t" | ||
180 | "bra 3b; \n\t" | ||
181 | LOCK_SECTION_END | ||
182 | : "=&r" (tmp0), "=&r" (tmp1) | ||
183 | : "r" (&rw->lock) | ||
184 | : "memory" | ||
185 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
186 | , "r6" | ||
187 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
188 | ); | ||
189 | } | ||
190 | |||
191 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
192 | { | ||
193 | unsigned long tmp0, tmp1, tmp2; | ||
194 | |||
195 | /* | ||
196 | * rw->lock : =RW_LOCK_BIAS_STR : unlock | ||
197 | * : !=RW_LOCK_BIAS_STR : lock | ||
198 | * | ||
199 | * for ( ; ; ) { | ||
200 | * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation | ||
201 | * if (rw->lock == 0) break; | ||
202 | * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation | ||
203 | * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ; | ||
204 | * } | ||
205 | */ | ||
206 | __asm__ __volatile__ ( | ||
207 | "# write_lock \n\t" | ||
208 | "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t" | ||
209 | "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t" | ||
210 | ".fillinsn \n" | ||
211 | "1: \n\t" | ||
212 | "mvfc %2, psw; \n\t" | ||
213 | "clrpsw #0x40 -> nop; \n\t" | ||
214 | DCACHE_CLEAR("%0", "r7", "%3") | ||
215 | "lock %0, @%3; \n\t" | ||
216 | "sub %0, %1; \n\t" | ||
217 | "unlock %0, @%3; \n\t" | ||
218 | "mvtc %2, psw; \n\t" | ||
219 | "bnez %0, 2f; \n\t" | ||
220 | LOCK_SECTION_START(".balign 4 \n\t") | ||
221 | ".fillinsn \n" | ||
222 | "2: \n\t" | ||
223 | "clrpsw #0x40 -> nop; \n\t" | ||
224 | DCACHE_CLEAR("%0", "r7", "%3") | ||
225 | "lock %0, @%3; \n\t" | ||
226 | "add %0, %1; \n\t" | ||
227 | "unlock %0, @%3; \n\t" | ||
228 | "mvtc %2, psw; \n\t" | ||
229 | ".fillinsn \n" | ||
230 | "3: \n\t" | ||
231 | "ld %0, @%3; \n\t" | ||
232 | "beq %0, %1, 1b; \n\t" | ||
233 | "bra 3b; \n\t" | ||
234 | LOCK_SECTION_END | ||
235 | : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2) | ||
236 | : "r" (&rw->lock) | ||
237 | : "memory" | ||
238 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
239 | , "r7" | ||
240 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
241 | ); | ||
242 | } | ||
243 | |||
244 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
245 | { | ||
246 | unsigned long tmp0, tmp1; | ||
247 | |||
248 | __asm__ __volatile__ ( | ||
249 | "# read_unlock \n\t" | ||
250 | "mvfc %1, psw; \n\t" | ||
251 | "clrpsw #0x40 -> nop; \n\t" | ||
252 | DCACHE_CLEAR("%0", "r6", "%2") | ||
253 | "lock %0, @%2; \n\t" | ||
254 | "addi %0, #1; \n\t" | ||
255 | "unlock %0, @%2; \n\t" | ||
256 | "mvtc %1, psw; \n\t" | ||
257 | : "=&r" (tmp0), "=&r" (tmp1) | ||
258 | : "r" (&rw->lock) | ||
259 | : "memory" | ||
260 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
261 | , "r6" | ||
262 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
263 | ); | ||
264 | } | ||
265 | |||
266 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
267 | { | ||
268 | unsigned long tmp0, tmp1, tmp2; | ||
269 | |||
270 | __asm__ __volatile__ ( | ||
271 | "# write_unlock \n\t" | ||
272 | "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t" | ||
273 | "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t" | ||
274 | "mvfc %2, psw; \n\t" | ||
275 | "clrpsw #0x40 -> nop; \n\t" | ||
276 | DCACHE_CLEAR("%0", "r7", "%3") | ||
277 | "lock %0, @%3; \n\t" | ||
278 | "add %0, %1; \n\t" | ||
279 | "unlock %0, @%3; \n\t" | ||
280 | "mvtc %2, psw; \n\t" | ||
281 | : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2) | ||
282 | : "r" (&rw->lock) | ||
283 | : "memory" | ||
284 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
285 | , "r7" | ||
286 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
287 | ); | ||
288 | } | ||
289 | |||
290 | static inline int arch_read_trylock(arch_rwlock_t *lock) | ||
291 | { | ||
292 | atomic_t *count = (atomic_t*)lock; | ||
293 | if (atomic_dec_return(count) >= 0) | ||
294 | return 1; | ||
295 | atomic_inc(count); | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static inline int arch_write_trylock(arch_rwlock_t *lock) | ||
300 | { | ||
301 | atomic_t *count = (atomic_t *)lock; | ||
302 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
303 | return 1; | ||
304 | atomic_add(RW_LOCK_BIAS, count); | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | #endif /* _ASM_M32R_SPINLOCK_H */ | ||