diff options
author | Paul Mackerras <paulus@samba.org> | 2005-11-19 04:50:46 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-11-19 04:50:46 -0500 |
commit | 0212ddd839470f7a54cccccbaecd4833b4123da2 (patch) | |
tree | 3e18fc4852768c840131155eea84e2f70ebbbb07 /include/asm-powerpc/spinlock.h | |
parent | 21a6290220679d94912a068c75db2c5cd9c6552a (diff) |
powerpc: Merge spinlock.h
The result is mostly similar to the original ppc64 version but with
some adaptations for 32-bit compilation.
include/asm-ppc64 is now empty!
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc/spinlock.h')
-rw-r--r-- | include/asm-powerpc/spinlock.h | 269 |
1 files changed, 269 insertions, 0 deletions
diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h new file mode 100644 index 000000000000..caa4b14e0e94 --- /dev/null +++ b/include/asm-powerpc/spinlock.h | |||
@@ -0,0 +1,269 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | /* | ||
5 | * Simple spin lock operations. | ||
6 | * | ||
7 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | ||
8 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | ||
9 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | ||
10 | * Rework to support virtual processors | ||
11 | * | ||
12 | * Type of int is used as a full 64b word is not necessary. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * (the type definitions are in asm/spinlock_types.h) | ||
20 | */ | ||
21 | #ifdef CONFIG_PPC64 | ||
22 | #include <asm/paca.h> | ||
23 | #include <asm/hvcall.h> | ||
24 | #include <asm/iseries/hv_call.h> | ||
25 | #endif | ||
26 | #include <asm/asm-compat.h> | ||
27 | #include <asm/synch.h> | ||
28 | |||
29 | #define __raw_spin_is_locked(x) ((x)->slock != 0) | ||
30 | |||
31 | #ifdef CONFIG_PPC64 | ||
32 | /* use 0x800000yy when locked, where yy == CPU number */ | ||
33 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) | ||
34 | #else | ||
35 | #define LOCK_TOKEN 1 | ||
36 | #endif | ||
37 | |||
38 | /* | ||
39 | * This returns the old value in the lock, so we succeeded | ||
40 | * in getting the lock if the return value is 0. | ||
41 | */ | ||
42 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) | ||
43 | { | ||
44 | unsigned long tmp, token; | ||
45 | |||
46 | token = LOCK_TOKEN; | ||
47 | __asm__ __volatile__( | ||
48 | "1: lwarx %0,0,%2 # __spin_trylock\n\ | ||
49 | cmpwi 0,%0,0\n\ | ||
50 | bne- 2f\n\ | ||
51 | stwcx. %1,0,%2\n\ | ||
52 | bne- 1b\n\ | ||
53 | isync\n\ | ||
54 | 2:" : "=&r" (tmp) | ||
55 | : "r" (token), "r" (&lock->slock) | ||
56 | : "cr0", "memory"); | ||
57 | |||
58 | return tmp; | ||
59 | } | ||
60 | |||
61 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) | ||
62 | { | ||
63 | return __spin_trylock(lock) == 0; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * On a system with shared processors (that is, where a physical | ||
68 | * processor is multiplexed between several virtual processors), | ||
69 | * there is no point spinning on a lock if the holder of the lock | ||
70 | * isn't currently scheduled on a physical processor. Instead | ||
71 | * we detect this situation and ask the hypervisor to give the | ||
72 | * rest of our timeslice to the lock holder. | ||
73 | * | ||
74 | * So that we can tell which virtual processor is holding a lock, | ||
75 | * we put 0x80000000 | smp_processor_id() in the lock when it is | ||
76 | * held. Conveniently, we have a word in the paca that holds this | ||
77 | * value. | ||
78 | */ | ||
79 | |||
80 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | ||
81 | /* We only yield to the hypervisor if we are in shared processor mode */ | ||
82 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) | ||
83 | extern void __spin_yield(raw_spinlock_t *lock); | ||
84 | extern void __rw_yield(raw_rwlock_t *lock); | ||
85 | #else /* SPLPAR || ISERIES */ | ||
86 | #define __spin_yield(x) barrier() | ||
87 | #define __rw_yield(x) barrier() | ||
88 | #define SHARED_PROCESSOR 0 | ||
89 | #endif | ||
90 | |||
91 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) | ||
92 | { | ||
93 | while (1) { | ||
94 | if (likely(__spin_trylock(lock) == 0)) | ||
95 | break; | ||
96 | do { | ||
97 | HMT_low(); | ||
98 | if (SHARED_PROCESSOR) | ||
99 | __spin_yield(lock); | ||
100 | } while (unlikely(lock->slock != 0)); | ||
101 | HMT_medium(); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
106 | { | ||
107 | unsigned long flags_dis; | ||
108 | |||
109 | while (1) { | ||
110 | if (likely(__spin_trylock(lock) == 0)) | ||
111 | break; | ||
112 | local_save_flags(flags_dis); | ||
113 | local_irq_restore(flags); | ||
114 | do { | ||
115 | HMT_low(); | ||
116 | if (SHARED_PROCESSOR) | ||
117 | __spin_yield(lock); | ||
118 | } while (unlikely(lock->slock != 0)); | ||
119 | HMT_medium(); | ||
120 | local_irq_restore(flags_dis); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | ||
125 | { | ||
126 | __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock" | ||
127 | : : :"memory"); | ||
128 | lock->slock = 0; | ||
129 | } | ||
130 | |||
131 | #ifdef CONFIG_PPC64 | ||
132 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | ||
133 | #else | ||
134 | #define __raw_spin_unlock_wait(lock) \ | ||
135 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
136 | #endif | ||
137 | |||
138 | /* | ||
139 | * Read-write spinlocks, allowing multiple readers | ||
140 | * but only one writer. | ||
141 | * | ||
142 | * NOTE! it is quite common to have readers in interrupts | ||
143 | * but no interrupt writers. For those circumstances we | ||
144 | * can "mix" irq-safe locks - any writer needs to get a | ||
145 | * irq-safe write-lock, but readers can get non-irqsafe | ||
146 | * read-locks. | ||
147 | */ | ||
148 | |||
149 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) | ||
150 | #define __raw_write_can_lock(rw) (!(rw)->lock) | ||
151 | |||
152 | #ifdef CONFIG_PPC64 | ||
153 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" | ||
154 | #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ | ||
155 | #else | ||
156 | #define __DO_SIGN_EXTEND | ||
157 | #define WRLOCK_TOKEN (-1) | ||
158 | #endif | ||
159 | |||
160 | /* | ||
161 | * This returns the old value in the lock + 1, | ||
162 | * so we got a read lock if the return value is > 0. | ||
163 | */ | ||
164 | static long __inline__ __read_trylock(raw_rwlock_t *rw) | ||
165 | { | ||
166 | long tmp; | ||
167 | |||
168 | __asm__ __volatile__( | ||
169 | "1: lwarx %0,0,%1 # read_trylock\n" | ||
170 | __DO_SIGN_EXTEND | ||
171 | " addic. %0,%0,1\n\ | ||
172 | ble- 2f\n" | ||
173 | PPC405_ERR77(0,%1) | ||
174 | " stwcx. %0,0,%1\n\ | ||
175 | bne- 1b\n\ | ||
176 | isync\n\ | ||
177 | 2:" : "=&r" (tmp) | ||
178 | : "r" (&rw->lock) | ||
179 | : "cr0", "xer", "memory"); | ||
180 | |||
181 | return tmp; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * This returns the old value in the lock, | ||
186 | * so we got the write lock if the return value is 0. | ||
187 | */ | ||
188 | static __inline__ long __write_trylock(raw_rwlock_t *rw) | ||
189 | { | ||
190 | long tmp, token; | ||
191 | |||
192 | token = WRLOCK_TOKEN; | ||
193 | __asm__ __volatile__( | ||
194 | "1: lwarx %0,0,%2 # write_trylock\n\ | ||
195 | cmpwi 0,%0,0\n\ | ||
196 | bne- 2f\n" | ||
197 | PPC405_ERR77(0,%1) | ||
198 | " stwcx. %1,0,%2\n\ | ||
199 | bne- 1b\n\ | ||
200 | isync\n\ | ||
201 | 2:" : "=&r" (tmp) | ||
202 | : "r" (token), "r" (&rw->lock) | ||
203 | : "cr0", "memory"); | ||
204 | |||
205 | return tmp; | ||
206 | } | ||
207 | |||
208 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) | ||
209 | { | ||
210 | while (1) { | ||
211 | if (likely(__read_trylock(rw) > 0)) | ||
212 | break; | ||
213 | do { | ||
214 | HMT_low(); | ||
215 | if (SHARED_PROCESSOR) | ||
216 | __rw_yield(rw); | ||
217 | } while (unlikely(rw->lock < 0)); | ||
218 | HMT_medium(); | ||
219 | } | ||
220 | } | ||
221 | |||
222 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) | ||
223 | { | ||
224 | while (1) { | ||
225 | if (likely(__write_trylock(rw) == 0)) | ||
226 | break; | ||
227 | do { | ||
228 | HMT_low(); | ||
229 | if (SHARED_PROCESSOR) | ||
230 | __rw_yield(rw); | ||
231 | } while (unlikely(rw->lock != 0)); | ||
232 | HMT_medium(); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) | ||
237 | { | ||
238 | return __read_trylock(rw) > 0; | ||
239 | } | ||
240 | |||
241 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | ||
242 | { | ||
243 | return __write_trylock(rw) == 0; | ||
244 | } | ||
245 | |||
246 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | ||
247 | { | ||
248 | long tmp; | ||
249 | |||
250 | __asm__ __volatile__( | ||
251 | "eieio # read_unlock\n\ | ||
252 | 1: lwarx %0,0,%1\n\ | ||
253 | addic %0,%0,-1\n" | ||
254 | PPC405_ERR77(0,%1) | ||
255 | " stwcx. %0,0,%1\n\ | ||
256 | bne- 1b" | ||
257 | : "=&r"(tmp) | ||
258 | : "r"(&rw->lock) | ||
259 | : "cr0", "memory"); | ||
260 | } | ||
261 | |||
262 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | ||
263 | { | ||
264 | __asm__ __volatile__(SYNC_ON_SMP" # write_unlock" | ||
265 | : : :"memory"); | ||
266 | rw->lock = 0; | ||
267 | } | ||
268 | |||
269 | #endif /* __ASM_SPINLOCK_H */ | ||