aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64/rwsem.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ppc64/rwsem.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-ppc64/rwsem.h')
-rw-r--r--include/asm-ppc64/rwsem.h167
1 files changed, 167 insertions, 0 deletions
diff --git a/include/asm-ppc64/rwsem.h b/include/asm-ppc64/rwsem.h
new file mode 100644
index 000000000000..bd5c2f093575
--- /dev/null
+++ b/include/asm-ppc64/rwsem.h
@@ -0,0 +1,167 @@
1/*
2 * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
3 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
4 * by Paul Mackerras <paulus@samba.org>.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _PPC64_RWSEM_H
13#define _PPC64_RWSEM_H
14
15#ifdef __KERNEL__
16#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <asm/atomic.h>
19#include <asm/system.h>
20
21/*
22 * the semaphore definition
23 */
24struct rw_semaphore {
25 /* XXX this should be able to be an atomic_t -- paulus */
26 signed int count;
27#define RWSEM_UNLOCKED_VALUE 0x00000000
28#define RWSEM_ACTIVE_BIAS 0x00000001
29#define RWSEM_ACTIVE_MASK 0x0000ffff
30#define RWSEM_WAITING_BIAS (-0x00010000)
31#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
32#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
33 spinlock_t wait_lock;
34 struct list_head wait_list;
35#if RWSEM_DEBUG
36 int debug;
37#endif
38};
39
40/*
41 * initialisation
42 */
43#if RWSEM_DEBUG
44#define __RWSEM_DEBUG_INIT , 0
45#else
46#define __RWSEM_DEBUG_INIT /* */
47#endif
48
49#define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
51 LIST_HEAD_INIT((name).wait_list) \
52 __RWSEM_DEBUG_INIT }
53
54#define DECLARE_RWSEM(name) \
55 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
56
57extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
58extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
59extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
60extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
61
62static inline void init_rwsem(struct rw_semaphore *sem)
63{
64 sem->count = RWSEM_UNLOCKED_VALUE;
65 spin_lock_init(&sem->wait_lock);
66 INIT_LIST_HEAD(&sem->wait_list);
67#if RWSEM_DEBUG
68 sem->debug = 0;
69#endif
70}
71
72/*
73 * lock for reading
74 */
75static inline void __down_read(struct rw_semaphore *sem)
76{
77 if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
78 rwsem_down_read_failed(sem);
79}
80
81static inline int __down_read_trylock(struct rw_semaphore *sem)
82{
83 int tmp;
84
85 while ((tmp = sem->count) >= 0) {
86 if (tmp == cmpxchg(&sem->count, tmp,
87 tmp + RWSEM_ACTIVE_READ_BIAS)) {
88 return 1;
89 }
90 }
91 return 0;
92}
93
94/*
95 * lock for writing
96 */
97static inline void __down_write(struct rw_semaphore *sem)
98{
99 int tmp;
100
101 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
102 (atomic_t *)(&sem->count));
103 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
104 rwsem_down_write_failed(sem);
105}
106
107static inline int __down_write_trylock(struct rw_semaphore *sem)
108{
109 int tmp;
110
111 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
112 RWSEM_ACTIVE_WRITE_BIAS);
113 return tmp == RWSEM_UNLOCKED_VALUE;
114}
115
116/*
117 * unlock after reading
118 */
119static inline void __up_read(struct rw_semaphore *sem)
120{
121 int tmp;
122
123 tmp = atomic_dec_return((atomic_t *)(&sem->count));
124 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
125 rwsem_wake(sem);
126}
127
128/*
129 * unlock after writing
130 */
131static inline void __up_write(struct rw_semaphore *sem)
132{
133 if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
134 (atomic_t *)(&sem->count)) < 0))
135 rwsem_wake(sem);
136}
137
138/*
139 * implement atomic add functionality
140 */
141static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
142{
143 atomic_add(delta, (atomic_t *)(&sem->count));
144}
145
146/*
147 * downgrade write lock to read lock
148 */
149static inline void __downgrade_write(struct rw_semaphore *sem)
150{
151 int tmp;
152
153 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
154 if (tmp < 0)
155 rwsem_downgrade_wake(sem);
156}
157
158/*
159 * implement exchange and add functionality
160 */
161static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
162{
163 return atomic_add_return(delta, (atomic_t *)(&sem->count));
164}
165
166#endif /* __KERNEL__ */
167#endif /* _PPC_RWSEM_XADD_H */