diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-i386/rwsem.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-i386/rwsem.h')
-rw-r--r-- | include/asm-i386/rwsem.h | 288 |
1 files changed, 288 insertions, 0 deletions
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h new file mode 100644 index 000000000000..7625a675852f --- /dev/null +++ b/include/asm-i386/rwsem.h | |||
@@ -0,0 +1,288 @@ | |||
1 | /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ | ||
2 | * | ||
3 | * Written by David Howells (dhowells@redhat.com). | ||
4 | * | ||
5 | * Derived from asm-i386/semaphore.h | ||
6 | * | ||
7 | * | ||
8 | * The MSW of the count is the negated number of active writers and waiting | ||
9 | * lockers, and the LSW is the total number of active locks | ||
10 | * | ||
11 | * The lock count is initialized to 0 (no active and no waiting lockers). | ||
12 | * | ||
13 | * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an | ||
14 | * uncontended lock. This can be determined because XADD returns the old value. | ||
15 | * Readers increment by 1 and see a positive value when uncontended, negative | ||
16 | * if there are writers (and maybe) readers waiting (in which case it goes to | ||
17 | * sleep). | ||
18 | * | ||
19 | * The value of WAITING_BIAS supports up to 32766 waiting processes. This can | ||
20 | * be extended to 65534 by manually checking the whole MSW rather than relying | ||
21 | * on the S flag. | ||
22 | * | ||
23 | * The value of ACTIVE_BIAS supports up to 65535 active processes. | ||
24 | * | ||
25 | * This should be totally fair - if anything is waiting, a process that wants a | ||
26 | * lock will go to the back of the queue. When the currently active lock is | ||
27 | * released, if there's a writer at the front of the queue, then that and only | ||
28 | * that will be woken up; if there's a bunch of consequtive readers at the | ||
29 | * front, then they'll all be woken up, but no other readers will be. | ||
30 | */ | ||
31 | |||
32 | #ifndef _I386_RWSEM_H | ||
33 | #define _I386_RWSEM_H | ||
34 | |||
35 | #ifndef _LINUX_RWSEM_H | ||
36 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" | ||
37 | #endif | ||
38 | |||
39 | #ifdef __KERNEL__ | ||
40 | |||
41 | #include <linux/list.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | |||
44 | struct rwsem_waiter; | ||
45 | |||
46 | extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); | ||
47 | extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); | ||
48 | extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); | ||
49 | extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); | ||
50 | |||
51 | /* | ||
52 | * the semaphore definition | ||
53 | */ | ||
54 | struct rw_semaphore { | ||
55 | signed long count; | ||
56 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | ||
57 | #define RWSEM_ACTIVE_BIAS 0x00000001 | ||
58 | #define RWSEM_ACTIVE_MASK 0x0000ffff | ||
59 | #define RWSEM_WAITING_BIAS (-0x00010000) | ||
60 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | ||
61 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | ||
62 | spinlock_t wait_lock; | ||
63 | struct list_head wait_list; | ||
64 | #if RWSEM_DEBUG | ||
65 | int debug; | ||
66 | #endif | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * initialisation | ||
71 | */ | ||
72 | #if RWSEM_DEBUG | ||
73 | #define __RWSEM_DEBUG_INIT , 0 | ||
74 | #else | ||
75 | #define __RWSEM_DEBUG_INIT /* */ | ||
76 | #endif | ||
77 | |||
78 | #define __RWSEM_INITIALIZER(name) \ | ||
79 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ | ||
80 | __RWSEM_DEBUG_INIT } | ||
81 | |||
82 | #define DECLARE_RWSEM(name) \ | ||
83 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
84 | |||
85 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
86 | { | ||
87 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
88 | spin_lock_init(&sem->wait_lock); | ||
89 | INIT_LIST_HEAD(&sem->wait_list); | ||
90 | #if RWSEM_DEBUG | ||
91 | sem->debug = 0; | ||
92 | #endif | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * lock for reading | ||
97 | */ | ||
98 | static inline void __down_read(struct rw_semaphore *sem) | ||
99 | { | ||
100 | __asm__ __volatile__( | ||
101 | "# beginning down_read\n\t" | ||
102 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ | ||
103 | " js 2f\n\t" /* jump if we weren't granted the lock */ | ||
104 | "1:\n\t" | ||
105 | LOCK_SECTION_START("") | ||
106 | "2:\n\t" | ||
107 | " pushl %%ecx\n\t" | ||
108 | " pushl %%edx\n\t" | ||
109 | " call rwsem_down_read_failed\n\t" | ||
110 | " popl %%edx\n\t" | ||
111 | " popl %%ecx\n\t" | ||
112 | " jmp 1b\n" | ||
113 | LOCK_SECTION_END | ||
114 | "# ending down_read\n\t" | ||
115 | : "=m"(sem->count) | ||
116 | : "a"(sem), "m"(sem->count) | ||
117 | : "memory", "cc"); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * trylock for reading -- returns 1 if successful, 0 if contention | ||
122 | */ | ||
123 | static inline int __down_read_trylock(struct rw_semaphore *sem) | ||
124 | { | ||
125 | __s32 result, tmp; | ||
126 | __asm__ __volatile__( | ||
127 | "# beginning __down_read_trylock\n\t" | ||
128 | " movl %0,%1\n\t" | ||
129 | "1:\n\t" | ||
130 | " movl %1,%2\n\t" | ||
131 | " addl %3,%2\n\t" | ||
132 | " jle 2f\n\t" | ||
133 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | ||
134 | " jnz 1b\n\t" | ||
135 | "2:\n\t" | ||
136 | "# ending __down_read_trylock\n\t" | ||
137 | : "+m"(sem->count), "=&a"(result), "=&r"(tmp) | ||
138 | : "i"(RWSEM_ACTIVE_READ_BIAS) | ||
139 | : "memory", "cc"); | ||
140 | return result>=0 ? 1 : 0; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * lock for writing | ||
145 | */ | ||
146 | static inline void __down_write(struct rw_semaphore *sem) | ||
147 | { | ||
148 | int tmp; | ||
149 | |||
150 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | ||
151 | __asm__ __volatile__( | ||
152 | "# beginning down_write\n\t" | ||
153 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ | ||
154 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ | ||
155 | " jnz 2f\n\t" /* jump if we weren't granted the lock */ | ||
156 | "1:\n\t" | ||
157 | LOCK_SECTION_START("") | ||
158 | "2:\n\t" | ||
159 | " pushl %%ecx\n\t" | ||
160 | " call rwsem_down_write_failed\n\t" | ||
161 | " popl %%ecx\n\t" | ||
162 | " jmp 1b\n" | ||
163 | LOCK_SECTION_END | ||
164 | "# ending down_write" | ||
165 | : "=m"(sem->count), "=d"(tmp) | ||
166 | : "a"(sem), "1"(tmp), "m"(sem->count) | ||
167 | : "memory", "cc"); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * trylock for writing -- returns 1 if successful, 0 if contention | ||
172 | */ | ||
173 | static inline int __down_write_trylock(struct rw_semaphore *sem) | ||
174 | { | ||
175 | signed long ret = cmpxchg(&sem->count, | ||
176 | RWSEM_UNLOCKED_VALUE, | ||
177 | RWSEM_ACTIVE_WRITE_BIAS); | ||
178 | if (ret == RWSEM_UNLOCKED_VALUE) | ||
179 | return 1; | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * unlock after reading | ||
185 | */ | ||
186 | static inline void __up_read(struct rw_semaphore *sem) | ||
187 | { | ||
188 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | ||
189 | __asm__ __volatile__( | ||
190 | "# beginning __up_read\n\t" | ||
191 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ | ||
192 | " js 2f\n\t" /* jump if the lock is being waited upon */ | ||
193 | "1:\n\t" | ||
194 | LOCK_SECTION_START("") | ||
195 | "2:\n\t" | ||
196 | " decw %%dx\n\t" /* do nothing if still outstanding active readers */ | ||
197 | " jnz 1b\n\t" | ||
198 | " pushl %%ecx\n\t" | ||
199 | " call rwsem_wake\n\t" | ||
200 | " popl %%ecx\n\t" | ||
201 | " jmp 1b\n" | ||
202 | LOCK_SECTION_END | ||
203 | "# ending __up_read\n" | ||
204 | : "=m"(sem->count), "=d"(tmp) | ||
205 | : "a"(sem), "1"(tmp), "m"(sem->count) | ||
206 | : "memory", "cc"); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * unlock after writing | ||
211 | */ | ||
212 | static inline void __up_write(struct rw_semaphore *sem) | ||
213 | { | ||
214 | __asm__ __volatile__( | ||
215 | "# beginning __up_write\n\t" | ||
216 | " movl %2,%%edx\n\t" | ||
217 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ | ||
218 | " jnz 2f\n\t" /* jump if the lock is being waited upon */ | ||
219 | "1:\n\t" | ||
220 | LOCK_SECTION_START("") | ||
221 | "2:\n\t" | ||
222 | " decw %%dx\n\t" /* did the active count reduce to 0? */ | ||
223 | " jnz 1b\n\t" /* jump back if not */ | ||
224 | " pushl %%ecx\n\t" | ||
225 | " call rwsem_wake\n\t" | ||
226 | " popl %%ecx\n\t" | ||
227 | " jmp 1b\n" | ||
228 | LOCK_SECTION_END | ||
229 | "# ending __up_write\n" | ||
230 | : "=m"(sem->count) | ||
231 | : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count) | ||
232 | : "memory", "cc", "edx"); | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * downgrade write lock to read lock | ||
237 | */ | ||
238 | static inline void __downgrade_write(struct rw_semaphore *sem) | ||
239 | { | ||
240 | __asm__ __volatile__( | ||
241 | "# beginning __downgrade_write\n\t" | ||
242 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | ||
243 | " js 2f\n\t" /* jump if the lock is being waited upon */ | ||
244 | "1:\n\t" | ||
245 | LOCK_SECTION_START("") | ||
246 | "2:\n\t" | ||
247 | " pushl %%ecx\n\t" | ||
248 | " pushl %%edx\n\t" | ||
249 | " call rwsem_downgrade_wake\n\t" | ||
250 | " popl %%edx\n\t" | ||
251 | " popl %%ecx\n\t" | ||
252 | " jmp 1b\n" | ||
253 | LOCK_SECTION_END | ||
254 | "# ending __downgrade_write\n" | ||
255 | : "=m"(sem->count) | ||
256 | : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count) | ||
257 | : "memory", "cc"); | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * implement atomic add functionality | ||
262 | */ | ||
263 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | ||
264 | { | ||
265 | __asm__ __volatile__( | ||
266 | LOCK_PREFIX "addl %1,%0" | ||
267 | : "=m"(sem->count) | ||
268 | : "ir"(delta), "m"(sem->count)); | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * implement exchange and add functionality | ||
273 | */ | ||
274 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | ||
275 | { | ||
276 | int tmp = delta; | ||
277 | |||
278 | __asm__ __volatile__( | ||
279 | LOCK_PREFIX "xadd %0,(%2)" | ||
280 | : "+r"(tmp), "=m"(sem->count) | ||
281 | : "r"(sem), "m"(sem->count) | ||
282 | : "memory"); | ||
283 | |||
284 | return tmp+delta; | ||
285 | } | ||
286 | |||
287 | #endif /* __KERNEL__ */ | ||
288 | #endif /* _I386_RWSEM_H */ | ||