diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ppc64/spinlock.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ppc64/spinlock.h')
-rw-r--r-- | include/asm-ppc64/spinlock.h | 260 |
1 files changed, 260 insertions, 0 deletions
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h new file mode 100644 index 000000000000..a9b2a1162cf7 --- /dev/null +++ b/include/asm-ppc64/spinlock.h | |||
@@ -0,0 +1,260 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | /* | ||
5 | * Simple spin lock operations. | ||
6 | * | ||
7 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM | ||
8 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | ||
9 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM | ||
10 | * Rework to support virtual processors | ||
11 | * | ||
12 | * Type of int is used as a full 64b word is not necessary. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | */ | ||
19 | #include <linux/config.h> | ||
20 | #include <asm/paca.h> | ||
21 | #include <asm/hvcall.h> | ||
22 | #include <asm/iSeries/HvCall.h> | ||
23 | |||
24 | typedef struct { | ||
25 | volatile unsigned int lock; | ||
26 | #ifdef CONFIG_PREEMPT | ||
27 | unsigned int break_lock; | ||
28 | #endif | ||
29 | } spinlock_t; | ||
30 | |||
31 | typedef struct { | ||
32 | volatile signed int lock; | ||
33 | #ifdef CONFIG_PREEMPT | ||
34 | unsigned int break_lock; | ||
35 | #endif | ||
36 | } rwlock_t; | ||
37 | |||
38 | #ifdef __KERNEL__ | ||
39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
40 | |||
41 | #define spin_is_locked(x) ((x)->lock != 0) | ||
42 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
43 | |||
44 | static __inline__ void _raw_spin_unlock(spinlock_t *lock) | ||
45 | { | ||
46 | __asm__ __volatile__("lwsync # spin_unlock": : :"memory"); | ||
47 | lock->lock = 0; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * On a system with shared processors (that is, where a physical | ||
52 | * processor is multiplexed between several virtual processors), | ||
53 | * there is no point spinning on a lock if the holder of the lock | ||
54 | * isn't currently scheduled on a physical processor. Instead | ||
55 | * we detect this situation and ask the hypervisor to give the | ||
56 | * rest of our timeslice to the lock holder. | ||
57 | * | ||
58 | * So that we can tell which virtual processor is holding a lock, | ||
59 | * we put 0x80000000 | smp_processor_id() in the lock when it is | ||
60 | * held. Conveniently, we have a word in the paca that holds this | ||
61 | * value. | ||
62 | */ | ||
63 | |||
64 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | ||
65 | /* We only yield to the hypervisor if we are in shared processor mode */ | ||
66 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) | ||
67 | extern void __spin_yield(spinlock_t *lock); | ||
68 | extern void __rw_yield(rwlock_t *lock); | ||
69 | #else /* SPLPAR || ISERIES */ | ||
70 | #define __spin_yield(x) barrier() | ||
71 | #define __rw_yield(x) barrier() | ||
72 | #define SHARED_PROCESSOR 0 | ||
73 | #endif | ||
74 | extern void spin_unlock_wait(spinlock_t *lock); | ||
75 | |||
76 | /* | ||
77 | * This returns the old value in the lock, so we succeeded | ||
78 | * in getting the lock if the return value is 0. | ||
79 | */ | ||
80 | static __inline__ unsigned long __spin_trylock(spinlock_t *lock) | ||
81 | { | ||
82 | unsigned long tmp, tmp2; | ||
83 | |||
84 | __asm__ __volatile__( | ||
85 | " lwz %1,%3(13) # __spin_trylock\n\ | ||
86 | 1: lwarx %0,0,%2\n\ | ||
87 | cmpwi 0,%0,0\n\ | ||
88 | bne- 2f\n\ | ||
89 | stwcx. %1,0,%2\n\ | ||
90 | bne- 1b\n\ | ||
91 | isync\n\ | ||
92 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
93 | : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token)) | ||
94 | : "cr0", "memory"); | ||
95 | |||
96 | return tmp; | ||
97 | } | ||
98 | |||
99 | static int __inline__ _raw_spin_trylock(spinlock_t *lock) | ||
100 | { | ||
101 | return __spin_trylock(lock) == 0; | ||
102 | } | ||
103 | |||
104 | static void __inline__ _raw_spin_lock(spinlock_t *lock) | ||
105 | { | ||
106 | while (1) { | ||
107 | if (likely(__spin_trylock(lock) == 0)) | ||
108 | break; | ||
109 | do { | ||
110 | HMT_low(); | ||
111 | if (SHARED_PROCESSOR) | ||
112 | __spin_yield(lock); | ||
113 | } while (likely(lock->lock != 0)); | ||
114 | HMT_medium(); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | ||
119 | { | ||
120 | unsigned long flags_dis; | ||
121 | |||
122 | while (1) { | ||
123 | if (likely(__spin_trylock(lock) == 0)) | ||
124 | break; | ||
125 | local_save_flags(flags_dis); | ||
126 | local_irq_restore(flags); | ||
127 | do { | ||
128 | HMT_low(); | ||
129 | if (SHARED_PROCESSOR) | ||
130 | __spin_yield(lock); | ||
131 | } while (likely(lock->lock != 0)); | ||
132 | HMT_medium(); | ||
133 | local_irq_restore(flags_dis); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Read-write spinlocks, allowing multiple readers | ||
139 | * but only one writer. | ||
140 | * | ||
141 | * NOTE! it is quite common to have readers in interrupts | ||
142 | * but no interrupt writers. For those circumstances we | ||
143 | * can "mix" irq-safe locks - any writer needs to get a | ||
144 | * irq-safe write-lock, but readers can get non-irqsafe | ||
145 | * read-locks. | ||
146 | */ | ||
147 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
148 | |||
149 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
150 | |||
151 | #define read_can_lock(rw) ((rw)->lock >= 0) | ||
152 | #define write_can_lock(rw) (!(rw)->lock) | ||
153 | |||
154 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | ||
155 | { | ||
156 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
157 | rw->lock = 0; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * This returns the old value in the lock + 1, | ||
162 | * so we got a read lock if the return value is > 0. | ||
163 | */ | ||
164 | static long __inline__ __read_trylock(rwlock_t *rw) | ||
165 | { | ||
166 | long tmp; | ||
167 | |||
168 | __asm__ __volatile__( | ||
169 | "1: lwarx %0,0,%1 # read_trylock\n\ | ||
170 | extsw %0,%0\n\ | ||
171 | addic. %0,%0,1\n\ | ||
172 | ble- 2f\n\ | ||
173 | stwcx. %0,0,%1\n\ | ||
174 | bne- 1b\n\ | ||
175 | isync\n\ | ||
176 | 2:" : "=&r" (tmp) | ||
177 | : "r" (&rw->lock) | ||
178 | : "cr0", "xer", "memory"); | ||
179 | |||
180 | return tmp; | ||
181 | } | ||
182 | |||
183 | static int __inline__ _raw_read_trylock(rwlock_t *rw) | ||
184 | { | ||
185 | return __read_trylock(rw) > 0; | ||
186 | } | ||
187 | |||
188 | static void __inline__ _raw_read_lock(rwlock_t *rw) | ||
189 | { | ||
190 | while (1) { | ||
191 | if (likely(__read_trylock(rw) > 0)) | ||
192 | break; | ||
193 | do { | ||
194 | HMT_low(); | ||
195 | if (SHARED_PROCESSOR) | ||
196 | __rw_yield(rw); | ||
197 | } while (likely(rw->lock < 0)); | ||
198 | HMT_medium(); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void __inline__ _raw_read_unlock(rwlock_t *rw) | ||
203 | { | ||
204 | long tmp; | ||
205 | |||
206 | __asm__ __volatile__( | ||
207 | "eieio # read_unlock\n\ | ||
208 | 1: lwarx %0,0,%1\n\ | ||
209 | addic %0,%0,-1\n\ | ||
210 | stwcx. %0,0,%1\n\ | ||
211 | bne- 1b" | ||
212 | : "=&r"(tmp) | ||
213 | : "r"(&rw->lock) | ||
214 | : "cr0", "memory"); | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * This returns the old value in the lock, | ||
219 | * so we got the write lock if the return value is 0. | ||
220 | */ | ||
221 | static __inline__ long __write_trylock(rwlock_t *rw) | ||
222 | { | ||
223 | long tmp, tmp2; | ||
224 | |||
225 | __asm__ __volatile__( | ||
226 | " lwz %1,%3(13) # write_trylock\n\ | ||
227 | 1: lwarx %0,0,%2\n\ | ||
228 | cmpwi 0,%0,0\n\ | ||
229 | bne- 2f\n\ | ||
230 | stwcx. %1,0,%2\n\ | ||
231 | bne- 1b\n\ | ||
232 | isync\n\ | ||
233 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
234 | : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token)) | ||
235 | : "cr0", "memory"); | ||
236 | |||
237 | return tmp; | ||
238 | } | ||
239 | |||
240 | static int __inline__ _raw_write_trylock(rwlock_t *rw) | ||
241 | { | ||
242 | return __write_trylock(rw) == 0; | ||
243 | } | ||
244 | |||
245 | static void __inline__ _raw_write_lock(rwlock_t *rw) | ||
246 | { | ||
247 | while (1) { | ||
248 | if (likely(__write_trylock(rw) == 0)) | ||
249 | break; | ||
250 | do { | ||
251 | HMT_low(); | ||
252 | if (SHARED_PROCESSOR) | ||
253 | __rw_yield(rw); | ||
254 | } while (likely(rw->lock != 0)); | ||
255 | HMT_medium(); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | #endif /* __KERNEL__ */ | ||
260 | #endif /* __ASM_SPINLOCK_H */ | ||