diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2008-08-01 01:20:30 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-08-03 22:02:00 -0400 |
commit | b8b572e1015f81b4e748417be2629dfe51ab99f9 (patch) | |
tree | 7df58667d5ed71d6c8f8f4ce40ca16b6fb776d0b /arch/powerpc/include/asm/spinlock.h | |
parent | 2b12a4c524812fb3f6ee590a02e65b95c8c32229 (diff) |
powerpc: Move include files to arch/powerpc/include/asm
from include/asm-powerpc. This is the result of a
mkdir arch/powerpc/include/asm
git mv include/asm-powerpc/* arch/powerpc/include/asm
Followed by a few documentation/comment fixups and a couple of places
where <asm-powepc/...> was being used explicitly. Of the latter only
one was outside the arch code and it is a driver only built for powerpc.
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/include/asm/spinlock.h')
-rw-r--r-- | arch/powerpc/include/asm/spinlock.h | 295 |
1 files changed, 295 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h new file mode 100644 index 000000000000..f56a843f4705 --- /dev/null +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -0,0 +1,295 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* | ||
6 | * Simple spin lock operations. | ||
7 | * | ||
8 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | ||
9 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | ||
10 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | ||
11 | * Rework to support virtual processors | ||
12 | * | ||
13 | * Type of int is used as a full 64b word is not necessary. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | * | ||
20 | * (the type definitions are in asm/spinlock_types.h) | ||
21 | */ | ||
22 | #include <linux/irqflags.h> | ||
23 | #ifdef CONFIG_PPC64 | ||
24 | #include <asm/paca.h> | ||
25 | #include <asm/hvcall.h> | ||
26 | #include <asm/iseries/hv_call.h> | ||
27 | #endif | ||
28 | #include <asm/asm-compat.h> | ||
29 | #include <asm/synch.h> | ||
30 | |||
31 | #define __raw_spin_is_locked(x) ((x)->slock != 0) | ||
32 | |||
33 | #ifdef CONFIG_PPC64 | ||
34 | /* use 0x800000yy when locked, where yy == CPU number */ | ||
35 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) | ||
36 | #else | ||
37 | #define LOCK_TOKEN 1 | ||
38 | #endif | ||
39 | |||
40 | #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) | ||
41 | #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) | ||
42 | #define SYNC_IO do { \ | ||
43 | if (unlikely(get_paca()->io_sync)) { \ | ||
44 | mb(); \ | ||
45 | get_paca()->io_sync = 0; \ | ||
46 | } \ | ||
47 | } while (0) | ||
48 | #else | ||
49 | #define CLEAR_IO_SYNC | ||
50 | #define SYNC_IO | ||
51 | #endif | ||
52 | |||
53 | /* | ||
54 | * This returns the old value in the lock, so we succeeded | ||
55 | * in getting the lock if the return value is 0. | ||
56 | */ | ||
57 | static inline unsigned long __spin_trylock(raw_spinlock_t *lock) | ||
58 | { | ||
59 | unsigned long tmp, token; | ||
60 | |||
61 | token = LOCK_TOKEN; | ||
62 | __asm__ __volatile__( | ||
63 | "1: lwarx %0,0,%2\n\ | ||
64 | cmpwi 0,%0,0\n\ | ||
65 | bne- 2f\n\ | ||
66 | stwcx. %1,0,%2\n\ | ||
67 | bne- 1b\n\ | ||
68 | isync\n\ | ||
69 | 2:" : "=&r" (tmp) | ||
70 | : "r" (token), "r" (&lock->slock) | ||
71 | : "cr0", "memory"); | ||
72 | |||
73 | return tmp; | ||
74 | } | ||
75 | |||
76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
77 | { | ||
78 | CLEAR_IO_SYNC; | ||
79 | return __spin_trylock(lock) == 0; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * On a system with shared processors (that is, where a physical | ||
84 | * processor is multiplexed between several virtual processors), | ||
85 | * there is no point spinning on a lock if the holder of the lock | ||
86 | * isn't currently scheduled on a physical processor. Instead | ||
87 | * we detect this situation and ask the hypervisor to give the | ||
88 | * rest of our timeslice to the lock holder. | ||
89 | * | ||
90 | * So that we can tell which virtual processor is holding a lock, | ||
91 | * we put 0x80000000 | smp_processor_id() in the lock when it is | ||
92 | * held. Conveniently, we have a word in the paca that holds this | ||
93 | * value. | ||
94 | */ | ||
95 | |||
96 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | ||
97 | /* We only yield to the hypervisor if we are in shared processor mode */ | ||
98 | #define SHARED_PROCESSOR (get_lppaca()->shared_proc) | ||
99 | extern void __spin_yield(raw_spinlock_t *lock); | ||
100 | extern void __rw_yield(raw_rwlock_t *lock); | ||
101 | #else /* SPLPAR || ISERIES */ | ||
102 | #define __spin_yield(x) barrier() | ||
103 | #define __rw_yield(x) barrier() | ||
104 | #define SHARED_PROCESSOR 0 | ||
105 | #endif | ||
106 | |||
107 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
108 | { | ||
109 | CLEAR_IO_SYNC; | ||
110 | while (1) { | ||
111 | if (likely(__spin_trylock(lock) == 0)) | ||
112 | break; | ||
113 | do { | ||
114 | HMT_low(); | ||
115 | if (SHARED_PROCESSOR) | ||
116 | __spin_yield(lock); | ||
117 | } while (unlikely(lock->slock != 0)); | ||
118 | HMT_medium(); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static inline | ||
123 | void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
124 | { | ||
125 | unsigned long flags_dis; | ||
126 | |||
127 | CLEAR_IO_SYNC; | ||
128 | while (1) { | ||
129 | if (likely(__spin_trylock(lock) == 0)) | ||
130 | break; | ||
131 | local_save_flags(flags_dis); | ||
132 | local_irq_restore(flags); | ||
133 | do { | ||
134 | HMT_low(); | ||
135 | if (SHARED_PROCESSOR) | ||
136 | __spin_yield(lock); | ||
137 | } while (unlikely(lock->slock != 0)); | ||
138 | HMT_medium(); | ||
139 | local_irq_restore(flags_dis); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
144 | { | ||
145 | SYNC_IO; | ||
146 | __asm__ __volatile__("# __raw_spin_unlock\n\t" | ||
147 | LWSYNC_ON_SMP: : :"memory"); | ||
148 | lock->slock = 0; | ||
149 | } | ||
150 | |||
151 | #ifdef CONFIG_PPC64 | ||
152 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | ||
153 | #else | ||
154 | #define __raw_spin_unlock_wait(lock) \ | ||
155 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
156 | #endif | ||
157 | |||
158 | /* | ||
159 | * Read-write spinlocks, allowing multiple readers | ||
160 | * but only one writer. | ||
161 | * | ||
162 | * NOTE! it is quite common to have readers in interrupts | ||
163 | * but no interrupt writers. For those circumstances we | ||
164 | * can "mix" irq-safe locks - any writer needs to get a | ||
165 | * irq-safe write-lock, but readers can get non-irqsafe | ||
166 | * read-locks. | ||
167 | */ | ||
168 | |||
169 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) | ||
170 | #define __raw_write_can_lock(rw) (!(rw)->lock) | ||
171 | |||
172 | #ifdef CONFIG_PPC64 | ||
173 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" | ||
174 | #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ | ||
175 | #else | ||
176 | #define __DO_SIGN_EXTEND | ||
177 | #define WRLOCK_TOKEN (-1) | ||
178 | #endif | ||
179 | |||
180 | /* | ||
181 | * This returns the old value in the lock + 1, | ||
182 | * so we got a read lock if the return value is > 0. | ||
183 | */ | ||
184 | static inline long __read_trylock(raw_rwlock_t *rw) | ||
185 | { | ||
186 | long tmp; | ||
187 | |||
188 | __asm__ __volatile__( | ||
189 | "1: lwarx %0,0,%1\n" | ||
190 | __DO_SIGN_EXTEND | ||
191 | " addic. %0,%0,1\n\ | ||
192 | ble- 2f\n" | ||
193 | PPC405_ERR77(0,%1) | ||
194 | " stwcx. %0,0,%1\n\ | ||
195 | bne- 1b\n\ | ||
196 | isync\n\ | ||
197 | 2:" : "=&r" (tmp) | ||
198 | : "r" (&rw->lock) | ||
199 | : "cr0", "xer", "memory"); | ||
200 | |||
201 | return tmp; | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * This returns the old value in the lock, | ||
206 | * so we got the write lock if the return value is 0. | ||
207 | */ | ||
208 | static inline long __write_trylock(raw_rwlock_t *rw) | ||
209 | { | ||
210 | long tmp, token; | ||
211 | |||
212 | token = WRLOCK_TOKEN; | ||
213 | __asm__ __volatile__( | ||
214 | "1: lwarx %0,0,%2\n\ | ||
215 | cmpwi 0,%0,0\n\ | ||
216 | bne- 2f\n" | ||
217 | PPC405_ERR77(0,%1) | ||
218 | " stwcx. %1,0,%2\n\ | ||
219 | bne- 1b\n\ | ||
220 | isync\n\ | ||
221 | 2:" : "=&r" (tmp) | ||
222 | : "r" (token), "r" (&rw->lock) | ||
223 | : "cr0", "memory"); | ||
224 | |||
225 | return tmp; | ||
226 | } | ||
227 | |||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
229 | { | ||
230 | while (1) { | ||
231 | if (likely(__read_trylock(rw) > 0)) | ||
232 | break; | ||
233 | do { | ||
234 | HMT_low(); | ||
235 | if (SHARED_PROCESSOR) | ||
236 | __rw_yield(rw); | ||
237 | } while (unlikely(rw->lock < 0)); | ||
238 | HMT_medium(); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
243 | { | ||
244 | while (1) { | ||
245 | if (likely(__write_trylock(rw) == 0)) | ||
246 | break; | ||
247 | do { | ||
248 | HMT_low(); | ||
249 | if (SHARED_PROCESSOR) | ||
250 | __rw_yield(rw); | ||
251 | } while (unlikely(rw->lock != 0)); | ||
252 | HMT_medium(); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | ||
257 | { | ||
258 | return __read_trylock(rw) > 0; | ||
259 | } | ||
260 | |||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | ||
262 | { | ||
263 | return __write_trylock(rw) == 0; | ||
264 | } | ||
265 | |||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
267 | { | ||
268 | long tmp; | ||
269 | |||
270 | __asm__ __volatile__( | ||
271 | "# read_unlock\n\t" | ||
272 | LWSYNC_ON_SMP | ||
273 | "1: lwarx %0,0,%1\n\ | ||
274 | addic %0,%0,-1\n" | ||
275 | PPC405_ERR77(0,%1) | ||
276 | " stwcx. %0,0,%1\n\ | ||
277 | bne- 1b" | ||
278 | : "=&r"(tmp) | ||
279 | : "r"(&rw->lock) | ||
280 | : "cr0", "memory"); | ||
281 | } | ||
282 | |||
283 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
284 | { | ||
285 | __asm__ __volatile__("# write_unlock\n\t" | ||
286 | LWSYNC_ON_SMP: : :"memory"); | ||
287 | rw->lock = 0; | ||
288 | } | ||
289 | |||
290 | #define _raw_spin_relax(lock) __spin_yield(lock) | ||
291 | #define _raw_read_relax(lock) __rw_yield(lock) | ||
292 | #define _raw_write_relax(lock) __rw_yield(lock) | ||
293 | |||
294 | #endif /* __KERNEL__ */ | ||
295 | #endif /* __ASM_SPINLOCK_H */ | ||