diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-sparc64/spinlock.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-sparc64/spinlock.h')
-rw-r--r-- | include/asm-sparc64/spinlock.h | 310 |
1 files changed, 310 insertions, 0 deletions
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h new file mode 100644 index 000000000000..11efa474865b --- /dev/null +++ b/include/asm-sparc64/spinlock.h | |||
@@ -0,0 +1,310 @@ | |||
1 | /* spinlock.h: 64-bit Sparc spinlock support. | ||
2 | * | ||
3 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
4 | */ | ||
5 | |||
6 | #ifndef __SPARC64_SPINLOCK_H | ||
7 | #define __SPARC64_SPINLOCK_H | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/threads.h> /* For NR_CPUS */ | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | /* To get debugging spinlocks which detect and catch | ||
15 | * deadlock situations, set CONFIG_DEBUG_SPINLOCK | ||
16 | * and rebuild your kernel. | ||
17 | */ | ||
18 | |||
19 | /* All of these locking primitives are expected to work properly | ||
20 | * even in an RMO memory model, which currently is what the kernel | ||
21 | * runs in. | ||
22 | * | ||
23 | * There is another issue. Because we play games to save cycles | ||
24 | * in the non-contention case, we need to be extra careful about | ||
25 | * branch targets into the "spinning" code. They live in their | ||
26 | * own section, but the newer V9 branches have a shorter range | ||
27 | * than the traditional 32-bit sparc branch variants. The rule | ||
28 | * is that the branches that go into and out of the spinner sections | ||
29 | * must be pre-V9 branches. | ||
30 | */ | ||
31 | |||
32 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
33 | |||
34 | typedef unsigned char spinlock_t; | ||
35 | #define SPIN_LOCK_UNLOCKED 0 | ||
36 | |||
37 | #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0) | ||
38 | #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | ||
39 | |||
40 | #define spin_unlock_wait(lock) \ | ||
41 | do { membar("#LoadLoad"); \ | ||
42 | } while(*((volatile unsigned char *)lock)) | ||
43 | |||
44 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
45 | { | ||
46 | unsigned long tmp; | ||
47 | |||
48 | __asm__ __volatile__( | ||
49 | "1: ldstub [%1], %0\n" | ||
50 | " brnz,pn %0, 2f\n" | ||
51 | " membar #StoreLoad | #StoreStore\n" | ||
52 | " .subsection 2\n" | ||
53 | "2: ldub [%1], %0\n" | ||
54 | " brnz,pt %0, 2b\n" | ||
55 | " membar #LoadLoad\n" | ||
56 | " ba,a,pt %%xcc, 1b\n" | ||
57 | " .previous" | ||
58 | : "=&r" (tmp) | ||
59 | : "r" (lock) | ||
60 | : "memory"); | ||
61 | } | ||
62 | |||
63 | static inline int _raw_spin_trylock(spinlock_t *lock) | ||
64 | { | ||
65 | unsigned long result; | ||
66 | |||
67 | __asm__ __volatile__( | ||
68 | " ldstub [%1], %0\n" | ||
69 | " membar #StoreLoad | #StoreStore" | ||
70 | : "=r" (result) | ||
71 | : "r" (lock) | ||
72 | : "memory"); | ||
73 | |||
74 | return (result == 0UL); | ||
75 | } | ||
76 | |||
77 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
78 | { | ||
79 | __asm__ __volatile__( | ||
80 | " membar #StoreStore | #LoadStore\n" | ||
81 | " stb %%g0, [%0]" | ||
82 | : /* No outputs */ | ||
83 | : "r" (lock) | ||
84 | : "memory"); | ||
85 | } | ||
86 | |||
87 | static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | ||
88 | { | ||
89 | unsigned long tmp1, tmp2; | ||
90 | |||
91 | __asm__ __volatile__( | ||
92 | "1: ldstub [%2], %0\n" | ||
93 | " brnz,pn %0, 2f\n" | ||
94 | " membar #StoreLoad | #StoreStore\n" | ||
95 | " .subsection 2\n" | ||
96 | "2: rdpr %%pil, %1\n" | ||
97 | " wrpr %3, %%pil\n" | ||
98 | "3: ldub [%2], %0\n" | ||
99 | " brnz,pt %0, 3b\n" | ||
100 | " membar #LoadLoad\n" | ||
101 | " ba,pt %%xcc, 1b\n" | ||
102 | " wrpr %1, %%pil\n" | ||
103 | " .previous" | ||
104 | : "=&r" (tmp1), "=&r" (tmp2) | ||
105 | : "r"(lock), "r"(flags) | ||
106 | : "memory"); | ||
107 | } | ||
108 | |||
109 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
110 | |||
111 | typedef struct { | ||
112 | unsigned char lock; | ||
113 | unsigned int owner_pc, owner_cpu; | ||
114 | } spinlock_t; | ||
115 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } | ||
116 | #define spin_lock_init(__lock) \ | ||
117 | do { (__lock)->lock = 0; \ | ||
118 | (__lock)->owner_pc = 0; \ | ||
119 | (__lock)->owner_cpu = 0xff; \ | ||
120 | } while(0) | ||
121 | #define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0) | ||
122 | #define spin_unlock_wait(__lock) \ | ||
123 | do { \ | ||
124 | membar("#LoadLoad"); \ | ||
125 | } while(*((volatile unsigned char *)(&((__lock)->lock)))) | ||
126 | |||
127 | extern void _do_spin_lock (spinlock_t *lock, char *str); | ||
128 | extern void _do_spin_unlock (spinlock_t *lock); | ||
129 | extern int _do_spin_trylock (spinlock_t *lock); | ||
130 | |||
131 | #define _raw_spin_trylock(lp) _do_spin_trylock(lp) | ||
132 | #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") | ||
133 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
134 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
135 | |||
136 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
137 | |||
138 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | ||
139 | |||
140 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
141 | |||
142 | typedef unsigned int rwlock_t; | ||
143 | #define RW_LOCK_UNLOCKED 0 | ||
144 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
145 | |||
146 | static void inline __read_lock(rwlock_t *lock) | ||
147 | { | ||
148 | unsigned long tmp1, tmp2; | ||
149 | |||
150 | __asm__ __volatile__ ( | ||
151 | "1: ldsw [%2], %0\n" | ||
152 | " brlz,pn %0, 2f\n" | ||
153 | "4: add %0, 1, %1\n" | ||
154 | " cas [%2], %0, %1\n" | ||
155 | " cmp %0, %1\n" | ||
156 | " bne,pn %%icc, 1b\n" | ||
157 | " membar #StoreLoad | #StoreStore\n" | ||
158 | " .subsection 2\n" | ||
159 | "2: ldsw [%2], %0\n" | ||
160 | " brlz,pt %0, 2b\n" | ||
161 | " membar #LoadLoad\n" | ||
162 | " ba,a,pt %%xcc, 4b\n" | ||
163 | " .previous" | ||
164 | : "=&r" (tmp1), "=&r" (tmp2) | ||
165 | : "r" (lock) | ||
166 | : "memory"); | ||
167 | } | ||
168 | |||
169 | static void inline __read_unlock(rwlock_t *lock) | ||
170 | { | ||
171 | unsigned long tmp1, tmp2; | ||
172 | |||
173 | __asm__ __volatile__( | ||
174 | " membar #StoreLoad | #LoadLoad\n" | ||
175 | "1: lduw [%2], %0\n" | ||
176 | " sub %0, 1, %1\n" | ||
177 | " cas [%2], %0, %1\n" | ||
178 | " cmp %0, %1\n" | ||
179 | " bne,pn %%xcc, 1b\n" | ||
180 | " nop" | ||
181 | : "=&r" (tmp1), "=&r" (tmp2) | ||
182 | : "r" (lock) | ||
183 | : "memory"); | ||
184 | } | ||
185 | |||
186 | static void inline __write_lock(rwlock_t *lock) | ||
187 | { | ||
188 | unsigned long mask, tmp1, tmp2; | ||
189 | |||
190 | mask = 0x80000000UL; | ||
191 | |||
192 | __asm__ __volatile__( | ||
193 | "1: lduw [%2], %0\n" | ||
194 | " brnz,pn %0, 2f\n" | ||
195 | "4: or %0, %3, %1\n" | ||
196 | " cas [%2], %0, %1\n" | ||
197 | " cmp %0, %1\n" | ||
198 | " bne,pn %%icc, 1b\n" | ||
199 | " membar #StoreLoad | #StoreStore\n" | ||
200 | " .subsection 2\n" | ||
201 | "2: lduw [%2], %0\n" | ||
202 | " brnz,pt %0, 2b\n" | ||
203 | " membar #LoadLoad\n" | ||
204 | " ba,a,pt %%xcc, 4b\n" | ||
205 | " .previous" | ||
206 | : "=&r" (tmp1), "=&r" (tmp2) | ||
207 | : "r" (lock), "r" (mask) | ||
208 | : "memory"); | ||
209 | } | ||
210 | |||
211 | static void inline __write_unlock(rwlock_t *lock) | ||
212 | { | ||
213 | __asm__ __volatile__( | ||
214 | " membar #LoadStore | #StoreStore\n" | ||
215 | " stw %%g0, [%0]" | ||
216 | : /* no outputs */ | ||
217 | : "r" (lock) | ||
218 | : "memory"); | ||
219 | } | ||
220 | |||
221 | static int inline __write_trylock(rwlock_t *lock) | ||
222 | { | ||
223 | unsigned long mask, tmp1, tmp2, result; | ||
224 | |||
225 | mask = 0x80000000UL; | ||
226 | |||
227 | __asm__ __volatile__( | ||
228 | " mov 0, %2\n" | ||
229 | "1: lduw [%3], %0\n" | ||
230 | " brnz,pn %0, 2f\n" | ||
231 | " or %0, %4, %1\n" | ||
232 | " cas [%3], %0, %1\n" | ||
233 | " cmp %0, %1\n" | ||
234 | " bne,pn %%icc, 1b\n" | ||
235 | " membar #StoreLoad | #StoreStore\n" | ||
236 | " mov 1, %2\n" | ||
237 | "2:" | ||
238 | : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) | ||
239 | : "r" (lock), "r" (mask) | ||
240 | : "memory"); | ||
241 | |||
242 | return result; | ||
243 | } | ||
244 | |||
245 | #define _raw_read_lock(p) __read_lock(p) | ||
246 | #define _raw_read_unlock(p) __read_unlock(p) | ||
247 | #define _raw_write_lock(p) __write_lock(p) | ||
248 | #define _raw_write_unlock(p) __write_unlock(p) | ||
249 | #define _raw_write_trylock(p) __write_trylock(p) | ||
250 | |||
251 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
252 | |||
253 | typedef struct { | ||
254 | unsigned long lock; | ||
255 | unsigned int writer_pc, writer_cpu; | ||
256 | unsigned int reader_pc[NR_CPUS]; | ||
257 | } rwlock_t; | ||
258 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } | ||
259 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
260 | |||
261 | extern void _do_read_lock(rwlock_t *rw, char *str); | ||
262 | extern void _do_read_unlock(rwlock_t *rw, char *str); | ||
263 | extern void _do_write_lock(rwlock_t *rw, char *str); | ||
264 | extern void _do_write_unlock(rwlock_t *rw); | ||
265 | extern int _do_write_trylock(rwlock_t *rw, char *str); | ||
266 | |||
267 | #define _raw_read_lock(lock) \ | ||
268 | do { unsigned long flags; \ | ||
269 | local_irq_save(flags); \ | ||
270 | _do_read_lock(lock, "read_lock"); \ | ||
271 | local_irq_restore(flags); \ | ||
272 | } while(0) | ||
273 | |||
274 | #define _raw_read_unlock(lock) \ | ||
275 | do { unsigned long flags; \ | ||
276 | local_irq_save(flags); \ | ||
277 | _do_read_unlock(lock, "read_unlock"); \ | ||
278 | local_irq_restore(flags); \ | ||
279 | } while(0) | ||
280 | |||
281 | #define _raw_write_lock(lock) \ | ||
282 | do { unsigned long flags; \ | ||
283 | local_irq_save(flags); \ | ||
284 | _do_write_lock(lock, "write_lock"); \ | ||
285 | local_irq_restore(flags); \ | ||
286 | } while(0) | ||
287 | |||
288 | #define _raw_write_unlock(lock) \ | ||
289 | do { unsigned long flags; \ | ||
290 | local_irq_save(flags); \ | ||
291 | _do_write_unlock(lock); \ | ||
292 | local_irq_restore(flags); \ | ||
293 | } while(0) | ||
294 | |||
295 | #define _raw_write_trylock(lock) \ | ||
296 | ({ unsigned long flags; \ | ||
297 | int val; \ | ||
298 | local_irq_save(flags); \ | ||
299 | val = _do_write_trylock(lock, "write_trylock"); \ | ||
300 | local_irq_restore(flags); \ | ||
301 | val; \ | ||
302 | }) | ||
303 | |||
304 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
305 | |||
306 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | ||
307 | |||
308 | #endif /* !(__ASSEMBLY__) */ | ||
309 | |||
310 | #endif /* !(__SPARC64_SPINLOCK_H) */ | ||