aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/rwsem.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-s390/rwsem.h')
-rw-r--r--include/asm-s390/rwsem.h355
1 files changed, 355 insertions, 0 deletions
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
new file mode 100644
index 000000000000..8c0cebbfc034
--- /dev/null
+++ b/include/asm-s390/rwsem.h
@@ -0,0 +1,355 @@
1#ifndef _S390_RWSEM_H
2#define _S390_RWSEM_H
3
4/*
5 * include/asm-s390/rwsem.h
6 *
7 * S390 version
8 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 *
11 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
12 */
13
14/*
15 *
16 * The MSW of the count is the negated number of active writers and waiting
17 * lockers, and the LSW is the total number of active locks
18 *
19 * The lock count is initialized to 0 (no active and no waiting lockers).
20 *
21 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
22 * uncontended lock. This can be determined because XADD returns the old value.
23 * Readers increment by 1 and see a positive value when uncontended, negative
24 * if there are writers (and maybe) readers waiting (in which case it goes to
25 * sleep).
26 *
27 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
28 * be extended to 65534 by manually checking the whole MSW rather than relying
29 * on the S flag.
30 *
31 * The value of ACTIVE_BIAS supports up to 65535 active processes.
32 *
33 * This should be totally fair - if anything is waiting, a process that wants a
34 * lock will go to the back of the queue. When the currently active lock is
35 * released, if there's a writer at the front of the queue, then that and only
36 * that will be woken up; if there's a bunch of consequtive readers at the
37 * front, then they'll all be woken up, but no other readers will be.
38 */
39
40#ifndef _LINUX_RWSEM_H
41#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
42#endif
43
44#ifdef __KERNEL__
45
46#include <linux/list.h>
47#include <linux/spinlock.h>
48
49struct rwsem_waiter;
50
51extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
52extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
53extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
54extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
55extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
56
57/*
58 * the semaphore definition
59 */
60struct rw_semaphore {
61 signed long count;
62 spinlock_t wait_lock;
63 struct list_head wait_list;
64};
65
66#ifndef __s390x__
67#define RWSEM_UNLOCKED_VALUE 0x00000000
68#define RWSEM_ACTIVE_BIAS 0x00000001
69#define RWSEM_ACTIVE_MASK 0x0000ffff
70#define RWSEM_WAITING_BIAS (-0x00010000)
71#else /* __s390x__ */
72#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
73#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
74#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
75#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
76#endif /* __s390x__ */
77#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
78#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
79
80/*
81 * initialisation
82 */
83#define __RWSEM_INITIALIZER(name) \
84{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
85
86#define DECLARE_RWSEM(name) \
87 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
88
89static inline void init_rwsem(struct rw_semaphore *sem)
90{
91 sem->count = RWSEM_UNLOCKED_VALUE;
92 spin_lock_init(&sem->wait_lock);
93 INIT_LIST_HEAD(&sem->wait_list);
94}
95
96/*
97 * lock for reading
98 */
99static inline void __down_read(struct rw_semaphore *sem)
100{
101 signed long old, new;
102
103 __asm__ __volatile__(
104#ifndef __s390x__
105 " l %0,0(%3)\n"
106 "0: lr %1,%0\n"
107 " ahi %1,%5\n"
108 " cs %0,%1,0(%3)\n"
109 " jl 0b"
110#else /* __s390x__ */
111 " lg %0,0(%3)\n"
112 "0: lgr %1,%0\n"
113 " aghi %1,%5\n"
114 " csg %0,%1,0(%3)\n"
115 " jl 0b"
116#endif /* __s390x__ */
117 : "=&d" (old), "=&d" (new), "=m" (sem->count)
118 : "a" (&sem->count), "m" (sem->count),
119 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
120 if (old < 0)
121 rwsem_down_read_failed(sem);
122}
123
124/*
125 * trylock for reading -- returns 1 if successful, 0 if contention
126 */
127static inline int __down_read_trylock(struct rw_semaphore *sem)
128{
129 signed long old, new;
130
131 __asm__ __volatile__(
132#ifndef __s390x__
133 " l %0,0(%3)\n"
134 "0: ltr %1,%0\n"
135 " jm 1f\n"
136 " ahi %1,%5\n"
137 " cs %0,%1,0(%3)\n"
138 " jl 0b\n"
139 "1:"
140#else /* __s390x__ */
141 " lg %0,0(%3)\n"
142 "0: ltgr %1,%0\n"
143 " jm 1f\n"
144 " aghi %1,%5\n"
145 " csg %0,%1,0(%3)\n"
146 " jl 0b\n"
147 "1:"
148#endif /* __s390x__ */
149 : "=&d" (old), "=&d" (new), "=m" (sem->count)
150 : "a" (&sem->count), "m" (sem->count),
151 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
152 return old >= 0 ? 1 : 0;
153}
154
155/*
156 * lock for writing
157 */
158static inline void __down_write(struct rw_semaphore *sem)
159{
160 signed long old, new, tmp;
161
162 tmp = RWSEM_ACTIVE_WRITE_BIAS;
163 __asm__ __volatile__(
164#ifndef __s390x__
165 " l %0,0(%3)\n"
166 "0: lr %1,%0\n"
167 " a %1,%5\n"
168 " cs %0,%1,0(%3)\n"
169 " jl 0b"
170#else /* __s390x__ */
171 " lg %0,0(%3)\n"
172 "0: lgr %1,%0\n"
173 " ag %1,%5\n"
174 " csg %0,%1,0(%3)\n"
175 " jl 0b"
176#endif /* __s390x__ */
177 : "=&d" (old), "=&d" (new), "=m" (sem->count)
178 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
179 : "cc", "memory" );
180 if (old != 0)
181 rwsem_down_write_failed(sem);
182}
183
184/*
185 * trylock for writing -- returns 1 if successful, 0 if contention
186 */
187static inline int __down_write_trylock(struct rw_semaphore *sem)
188{
189 signed long old;
190
191 __asm__ __volatile__(
192#ifndef __s390x__
193 " l %0,0(%2)\n"
194 "0: ltr %0,%0\n"
195 " jnz 1f\n"
196 " cs %0,%4,0(%2)\n"
197 " jl 0b\n"
198#else /* __s390x__ */
199 " lg %0,0(%2)\n"
200 "0: ltgr %0,%0\n"
201 " jnz 1f\n"
202 " csg %0,%4,0(%2)\n"
203 " jl 0b\n"
204#endif /* __s390x__ */
205 "1:"
206 : "=&d" (old), "=m" (sem->count)
207 : "a" (&sem->count), "m" (sem->count),
208 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" );
209 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
210}
211
212/*
213 * unlock after reading
214 */
215static inline void __up_read(struct rw_semaphore *sem)
216{
217 signed long old, new;
218
219 __asm__ __volatile__(
220#ifndef __s390x__
221 " l %0,0(%3)\n"
222 "0: lr %1,%0\n"
223 " ahi %1,%5\n"
224 " cs %0,%1,0(%3)\n"
225 " jl 0b"
226#else /* __s390x__ */
227 " lg %0,0(%3)\n"
228 "0: lgr %1,%0\n"
229 " aghi %1,%5\n"
230 " csg %0,%1,0(%3)\n"
231 " jl 0b"
232#endif /* __s390x__ */
233 : "=&d" (old), "=&d" (new), "=m" (sem->count)
234 : "a" (&sem->count), "m" (sem->count),
235 "i" (-RWSEM_ACTIVE_READ_BIAS)
236 : "cc", "memory" );
237 if (new < 0)
238 if ((new & RWSEM_ACTIVE_MASK) == 0)
239 rwsem_wake(sem);
240}
241
242/*
243 * unlock after writing
244 */
245static inline void __up_write(struct rw_semaphore *sem)
246{
247 signed long old, new, tmp;
248
249 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
250 __asm__ __volatile__(
251#ifndef __s390x__
252 " l %0,0(%3)\n"
253 "0: lr %1,%0\n"
254 " a %1,%5\n"
255 " cs %0,%1,0(%3)\n"
256 " jl 0b"
257#else /* __s390x__ */
258 " lg %0,0(%3)\n"
259 "0: lgr %1,%0\n"
260 " ag %1,%5\n"
261 " csg %0,%1,0(%3)\n"
262 " jl 0b"
263#endif /* __s390x__ */
264 : "=&d" (old), "=&d" (new), "=m" (sem->count)
265 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
266 : "cc", "memory" );
267 if (new < 0)
268 if ((new & RWSEM_ACTIVE_MASK) == 0)
269 rwsem_wake(sem);
270}
271
272/*
273 * downgrade write lock to read lock
274 */
275static inline void __downgrade_write(struct rw_semaphore *sem)
276{
277 signed long old, new, tmp;
278
279 tmp = -RWSEM_WAITING_BIAS;
280 __asm__ __volatile__(
281#ifndef __s390x__
282 " l %0,0(%3)\n"
283 "0: lr %1,%0\n"
284 " a %1,%5\n"
285 " cs %0,%1,0(%3)\n"
286 " jl 0b"
287#else /* __s390x__ */
288 " lg %0,0(%3)\n"
289 "0: lgr %1,%0\n"
290 " ag %1,%5\n"
291 " csg %0,%1,0(%3)\n"
292 " jl 0b"
293#endif /* __s390x__ */
294 : "=&d" (old), "=&d" (new), "=m" (sem->count)
295 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
296 : "cc", "memory" );
297 if (new > 1)
298 rwsem_downgrade_wake(sem);
299}
300
301/*
302 * implement atomic add functionality
303 */
304static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
305{
306 signed long old, new;
307
308 __asm__ __volatile__(
309#ifndef __s390x__
310 " l %0,0(%3)\n"
311 "0: lr %1,%0\n"
312 " ar %1,%5\n"
313 " cs %0,%1,0(%3)\n"
314 " jl 0b"
315#else /* __s390x__ */
316 " lg %0,0(%3)\n"
317 "0: lgr %1,%0\n"
318 " agr %1,%5\n"
319 " csg %0,%1,0(%3)\n"
320 " jl 0b"
321#endif /* __s390x__ */
322 : "=&d" (old), "=&d" (new), "=m" (sem->count)
323 : "a" (&sem->count), "m" (sem->count), "d" (delta)
324 : "cc", "memory" );
325}
326
327/*
328 * implement exchange and add functionality
329 */
330static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
331{
332 signed long old, new;
333
334 __asm__ __volatile__(
335#ifndef __s390x__
336 " l %0,0(%3)\n"
337 "0: lr %1,%0\n"
338 " ar %1,%5\n"
339 " cs %0,%1,0(%3)\n"
340 " jl 0b"
341#else /* __s390x__ */
342 " lg %0,0(%3)\n"
343 "0: lgr %1,%0\n"
344 " agr %1,%5\n"
345 " csg %0,%1,0(%3)\n"
346 " jl 0b"
347#endif /* __s390x__ */
348 : "=&d" (old), "=&d" (new), "=m" (sem->count)
349 : "a" (&sem->count), "m" (sem->count), "d" (delta)
350 : "cc", "memory" );
351 return new;
352}
353
354#endif /* __KERNEL__ */
355#endif /* _S390_RWSEM_H */