diff options
Diffstat (limited to 'include/asm-mips/spinlock.h')
-rw-r--r-- | include/asm-mips/spinlock.h | 299 |
1 files changed, 299 insertions, 0 deletions
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h new file mode 100644 index 000000000000..114d3eb98a6a --- /dev/null +++ b/include/asm-mips/spinlock.h | |||
@@ -0,0 +1,299 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1999, 2000 by Ralf Baechle | ||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
8 | */ | ||
9 | #ifndef _ASM_SPINLOCK_H | ||
10 | #define _ASM_SPINLOCK_H | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/war.h> | ||
14 | |||
15 | /* | ||
16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
17 | */ | ||
18 | |||
19 | typedef struct { | ||
20 | volatile unsigned int lock; | ||
21 | #ifdef CONFIG_PREEMPT | ||
22 | unsigned int break_lock; | ||
23 | #endif | ||
24 | } spinlock_t; | ||
25 | |||
26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
27 | |||
28 | #define spin_lock_init(x) do { (x)->lock = 0; } while(0) | ||
29 | |||
30 | #define spin_is_locked(x) ((x)->lock != 0) | ||
31 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
32 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
33 | |||
34 | /* | ||
35 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
36 | * on the local processor, one does not. | ||
37 | * | ||
38 | * We make no fairness assumptions. They have a cost. | ||
39 | */ | ||
40 | |||
41 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
42 | { | ||
43 | unsigned int tmp; | ||
44 | |||
45 | if (R10000_LLSC_WAR) { | ||
46 | __asm__ __volatile__( | ||
47 | " .set noreorder # _raw_spin_lock \n" | ||
48 | "1: ll %1, %2 \n" | ||
49 | " bnez %1, 1b \n" | ||
50 | " li %1, 1 \n" | ||
51 | " sc %1, %0 \n" | ||
52 | " beqzl %1, 1b \n" | ||
53 | " nop \n" | ||
54 | " sync \n" | ||
55 | " .set reorder \n" | ||
56 | : "=m" (lock->lock), "=&r" (tmp) | ||
57 | : "m" (lock->lock) | ||
58 | : "memory"); | ||
59 | } else { | ||
60 | __asm__ __volatile__( | ||
61 | " .set noreorder # _raw_spin_lock \n" | ||
62 | "1: ll %1, %2 \n" | ||
63 | " bnez %1, 1b \n" | ||
64 | " li %1, 1 \n" | ||
65 | " sc %1, %0 \n" | ||
66 | " beqz %1, 1b \n" | ||
67 | " sync \n" | ||
68 | " .set reorder \n" | ||
69 | : "=m" (lock->lock), "=&r" (tmp) | ||
70 | : "m" (lock->lock) | ||
71 | : "memory"); | ||
72 | } | ||
73 | } | ||
74 | |||
75 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
76 | { | ||
77 | __asm__ __volatile__( | ||
78 | " .set noreorder # _raw_spin_unlock \n" | ||
79 | " sync \n" | ||
80 | " sw $0, %0 \n" | ||
81 | " .set\treorder \n" | ||
82 | : "=m" (lock->lock) | ||
83 | : "m" (lock->lock) | ||
84 | : "memory"); | ||
85 | } | ||
86 | |||
87 | static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | ||
88 | { | ||
89 | unsigned int temp, res; | ||
90 | |||
91 | if (R10000_LLSC_WAR) { | ||
92 | __asm__ __volatile__( | ||
93 | " .set noreorder # _raw_spin_trylock \n" | ||
94 | "1: ll %0, %3 \n" | ||
95 | " ori %2, %0, 1 \n" | ||
96 | " sc %2, %1 \n" | ||
97 | " beqzl %2, 1b \n" | ||
98 | " nop \n" | ||
99 | " andi %2, %0, 1 \n" | ||
100 | " sync \n" | ||
101 | " .set reorder" | ||
102 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) | ||
103 | : "m" (lock->lock) | ||
104 | : "memory"); | ||
105 | } else { | ||
106 | __asm__ __volatile__( | ||
107 | " .set noreorder # _raw_spin_trylock \n" | ||
108 | "1: ll %0, %3 \n" | ||
109 | " ori %2, %0, 1 \n" | ||
110 | " sc %2, %1 \n" | ||
111 | " beqz %2, 1b \n" | ||
112 | " andi %2, %0, 1 \n" | ||
113 | " sync \n" | ||
114 | " .set reorder" | ||
115 | : "=&r" (temp), "=m" (lock->lock), "=&r" (res) | ||
116 | : "m" (lock->lock) | ||
117 | : "memory"); | ||
118 | } | ||
119 | |||
120 | return res == 0; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
125 | * | ||
126 | * NOTE! it is quite common to have readers in interrupts but no interrupt | ||
127 | * writers. For those circumstances we can "mix" irq-safe locks - any writer | ||
128 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | ||
129 | * read-locks. | ||
130 | */ | ||
131 | |||
132 | typedef struct { | ||
133 | volatile unsigned int lock; | ||
134 | #ifdef CONFIG_PREEMPT | ||
135 | unsigned int break_lock; | ||
136 | #endif | ||
137 | } rwlock_t; | ||
138 | |||
139 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
140 | |||
141 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
142 | |||
143 | static inline void _raw_read_lock(rwlock_t *rw) | ||
144 | { | ||
145 | unsigned int tmp; | ||
146 | |||
147 | if (R10000_LLSC_WAR) { | ||
148 | __asm__ __volatile__( | ||
149 | " .set noreorder # _raw_read_lock \n" | ||
150 | "1: ll %1, %2 \n" | ||
151 | " bltz %1, 1b \n" | ||
152 | " addu %1, 1 \n" | ||
153 | " sc %1, %0 \n" | ||
154 | " beqzl %1, 1b \n" | ||
155 | " nop \n" | ||
156 | " sync \n" | ||
157 | " .set reorder \n" | ||
158 | : "=m" (rw->lock), "=&r" (tmp) | ||
159 | : "m" (rw->lock) | ||
160 | : "memory"); | ||
161 | } else { | ||
162 | __asm__ __volatile__( | ||
163 | " .set noreorder # _raw_read_lock \n" | ||
164 | "1: ll %1, %2 \n" | ||
165 | " bltz %1, 1b \n" | ||
166 | " addu %1, 1 \n" | ||
167 | " sc %1, %0 \n" | ||
168 | " beqz %1, 1b \n" | ||
169 | " sync \n" | ||
170 | " .set reorder \n" | ||
171 | : "=m" (rw->lock), "=&r" (tmp) | ||
172 | : "m" (rw->lock) | ||
173 | : "memory"); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | /* Note the use of sub, not subu which will make the kernel die with an | ||
178 | overflow exception if we ever try to unlock an rwlock that is already | ||
179 | unlocked or is being held by a writer. */ | ||
180 | static inline void _raw_read_unlock(rwlock_t *rw) | ||
181 | { | ||
182 | unsigned int tmp; | ||
183 | |||
184 | if (R10000_LLSC_WAR) { | ||
185 | __asm__ __volatile__( | ||
186 | "1: ll %1, %2 # _raw_read_unlock \n" | ||
187 | " sub %1, 1 \n" | ||
188 | " sc %1, %0 \n" | ||
189 | " beqzl %1, 1b \n" | ||
190 | " sync \n" | ||
191 | : "=m" (rw->lock), "=&r" (tmp) | ||
192 | : "m" (rw->lock) | ||
193 | : "memory"); | ||
194 | } else { | ||
195 | __asm__ __volatile__( | ||
196 | " .set noreorder # _raw_read_unlock \n" | ||
197 | "1: ll %1, %2 \n" | ||
198 | " sub %1, 1 \n" | ||
199 | " sc %1, %0 \n" | ||
200 | " beqz %1, 1b \n" | ||
201 | " sync \n" | ||
202 | " .set reorder \n" | ||
203 | : "=m" (rw->lock), "=&r" (tmp) | ||
204 | : "m" (rw->lock) | ||
205 | : "memory"); | ||
206 | } | ||
207 | } | ||
208 | |||
209 | static inline void _raw_write_lock(rwlock_t *rw) | ||
210 | { | ||
211 | unsigned int tmp; | ||
212 | |||
213 | if (R10000_LLSC_WAR) { | ||
214 | __asm__ __volatile__( | ||
215 | " .set noreorder # _raw_write_lock \n" | ||
216 | "1: ll %1, %2 \n" | ||
217 | " bnez %1, 1b \n" | ||
218 | " lui %1, 0x8000 \n" | ||
219 | " sc %1, %0 \n" | ||
220 | " beqzl %1, 1b \n" | ||
221 | " nop \n" | ||
222 | " sync \n" | ||
223 | " .set reorder \n" | ||
224 | : "=m" (rw->lock), "=&r" (tmp) | ||
225 | : "m" (rw->lock) | ||
226 | : "memory"); | ||
227 | } else { | ||
228 | __asm__ __volatile__( | ||
229 | " .set noreorder # _raw_write_lock \n" | ||
230 | "1: ll %1, %2 \n" | ||
231 | " bnez %1, 1b \n" | ||
232 | " lui %1, 0x8000 \n" | ||
233 | " sc %1, %0 \n" | ||
234 | " beqz %1, 1b \n" | ||
235 | " nop \n" | ||
236 | " sync \n" | ||
237 | " .set reorder \n" | ||
238 | : "=m" (rw->lock), "=&r" (tmp) | ||
239 | : "m" (rw->lock) | ||
240 | : "memory"); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | static inline void _raw_write_unlock(rwlock_t *rw) | ||
245 | { | ||
246 | __asm__ __volatile__( | ||
247 | " sync # _raw_write_unlock \n" | ||
248 | " sw $0, %0 \n" | ||
249 | : "=m" (rw->lock) | ||
250 | : "m" (rw->lock) | ||
251 | : "memory"); | ||
252 | } | ||
253 | |||
254 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | ||
255 | |||
256 | static inline int _raw_write_trylock(rwlock_t *rw) | ||
257 | { | ||
258 | unsigned int tmp; | ||
259 | int ret; | ||
260 | |||
261 | if (R10000_LLSC_WAR) { | ||
262 | __asm__ __volatile__( | ||
263 | " .set noreorder # _raw_write_trylock \n" | ||
264 | " li %2, 0 \n" | ||
265 | "1: ll %1, %3 \n" | ||
266 | " bnez %1, 2f \n" | ||
267 | " lui %1, 0x8000 \n" | ||
268 | " sc %1, %0 \n" | ||
269 | " beqzl %1, 1b \n" | ||
270 | " nop \n" | ||
271 | " sync \n" | ||
272 | " li %2, 1 \n" | ||
273 | " .set reorder \n" | ||
274 | "2: \n" | ||
275 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) | ||
276 | : "m" (rw->lock) | ||
277 | : "memory"); | ||
278 | } else { | ||
279 | __asm__ __volatile__( | ||
280 | " .set noreorder # _raw_write_trylock \n" | ||
281 | " li %2, 0 \n" | ||
282 | "1: ll %1, %3 \n" | ||
283 | " bnez %1, 2f \n" | ||
284 | " lui %1, 0x8000 \n" | ||
285 | " sc %1, %0 \n" | ||
286 | " beqz %1, 1b \n" | ||
287 | " sync \n" | ||
288 | " li %2, 1 \n" | ||
289 | " .set reorder \n" | ||
290 | "2: \n" | ||
291 | : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) | ||
292 | : "m" (rw->lock) | ||
293 | : "memory"); | ||
294 | } | ||
295 | |||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | #endif /* _ASM_SPINLOCK_H */ | ||