diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/kernel/semaphore.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/i386/kernel/semaphore.c')
-rw-r--r-- | arch/i386/kernel/semaphore.c | 297 |
1 files changed, 297 insertions, 0 deletions
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c new file mode 100644 index 000000000000..469f496e55c0 --- /dev/null +++ b/arch/i386/kernel/semaphore.c | |||
@@ -0,0 +1,297 @@ | |||
1 | /* | ||
2 | * i386 semaphore implementation. | ||
3 | * | ||
4 | * (C) Copyright 1999 Linus Torvalds | ||
5 | * | ||
6 | * Portions Copyright 1999 Red Hat, Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | ||
14 | */ | ||
15 | #include <linux/config.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/semaphore.h> | ||
20 | |||
21 | /* | ||
22 | * Semaphores are implemented using a two-way counter: | ||
23 | * The "count" variable is decremented for each process | ||
24 | * that tries to acquire the semaphore, while the "sleeping" | ||
25 | * variable is a count of such acquires. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * "sleeping" and the contention routine ordering is protected | ||
33 | * by the spinlock in the semaphore's waitqueue head. | ||
34 | * | ||
35 | * Note that these functions are only called when there is | ||
36 | * contention on the lock, and as such all this is the | ||
37 | * "non-critical" part of the whole semaphore business. The | ||
38 | * critical part is the inline stuff in <asm/semaphore.h> | ||
39 | * where we want to avoid any extra jumps and calls. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * Logic: | ||
44 | * - only on a boundary condition do we need to care. When we go | ||
45 | * from a negative count to a non-negative, we wake people up. | ||
46 | * - when we go from a non-negative count to a negative do we | ||
47 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
48 | * that we're on the wakeup list before we synchronize so that | ||
49 | * we cannot lose wakeup events. | ||
50 | */ | ||
51 | |||
52 | static fastcall void __attribute_used__ __up(struct semaphore *sem) | ||
53 | { | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | |||
57 | static fastcall void __attribute_used__ __sched __down(struct semaphore * sem) | ||
58 | { | ||
59 | struct task_struct *tsk = current; | ||
60 | DECLARE_WAITQUEUE(wait, tsk); | ||
61 | unsigned long flags; | ||
62 | |||
63 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
64 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
65 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
66 | |||
67 | sem->sleepers++; | ||
68 | for (;;) { | ||
69 | int sleepers = sem->sleepers; | ||
70 | |||
71 | /* | ||
72 | * Add "everybody else" into it. They aren't | ||
73 | * playing, because we own the spinlock in | ||
74 | * the wait_queue_head. | ||
75 | */ | ||
76 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
77 | sem->sleepers = 0; | ||
78 | break; | ||
79 | } | ||
80 | sem->sleepers = 1; /* us - see -1 above */ | ||
81 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
82 | |||
83 | schedule(); | ||
84 | |||
85 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
86 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
87 | } | ||
88 | remove_wait_queue_locked(&sem->wait, &wait); | ||
89 | wake_up_locked(&sem->wait); | ||
90 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
91 | tsk->state = TASK_RUNNING; | ||
92 | } | ||
93 | |||
94 | static fastcall int __attribute_used__ __sched __down_interruptible(struct semaphore * sem) | ||
95 | { | ||
96 | int retval = 0; | ||
97 | struct task_struct *tsk = current; | ||
98 | DECLARE_WAITQUEUE(wait, tsk); | ||
99 | unsigned long flags; | ||
100 | |||
101 | tsk->state = TASK_INTERRUPTIBLE; | ||
102 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
103 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
104 | |||
105 | sem->sleepers++; | ||
106 | for (;;) { | ||
107 | int sleepers = sem->sleepers; | ||
108 | |||
109 | /* | ||
110 | * With signals pending, this turns into | ||
111 | * the trylock failure case - we won't be | ||
112 | * sleeping, and we* can't get the lock as | ||
113 | * it has contention. Just correct the count | ||
114 | * and exit. | ||
115 | */ | ||
116 | if (signal_pending(current)) { | ||
117 | retval = -EINTR; | ||
118 | sem->sleepers = 0; | ||
119 | atomic_add(sleepers, &sem->count); | ||
120 | break; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Add "everybody else" into it. They aren't | ||
125 | * playing, because we own the spinlock in | ||
126 | * wait_queue_head. The "-1" is because we're | ||
127 | * still hoping to get the semaphore. | ||
128 | */ | ||
129 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
130 | sem->sleepers = 0; | ||
131 | break; | ||
132 | } | ||
133 | sem->sleepers = 1; /* us - see -1 above */ | ||
134 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
135 | |||
136 | schedule(); | ||
137 | |||
138 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
139 | tsk->state = TASK_INTERRUPTIBLE; | ||
140 | } | ||
141 | remove_wait_queue_locked(&sem->wait, &wait); | ||
142 | wake_up_locked(&sem->wait); | ||
143 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
144 | |||
145 | tsk->state = TASK_RUNNING; | ||
146 | return retval; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Trylock failed - make sure we correct for | ||
151 | * having decremented the count. | ||
152 | * | ||
153 | * We could have done the trylock with a | ||
154 | * single "cmpxchg" without failure cases, | ||
155 | * but then it wouldn't work on a 386. | ||
156 | */ | ||
157 | static fastcall int __attribute_used__ __down_trylock(struct semaphore * sem) | ||
158 | { | ||
159 | int sleepers; | ||
160 | unsigned long flags; | ||
161 | |||
162 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
163 | sleepers = sem->sleepers + 1; | ||
164 | sem->sleepers = 0; | ||
165 | |||
166 | /* | ||
167 | * Add "everybody else" and us into it. They aren't | ||
168 | * playing, because we own the spinlock in the | ||
169 | * wait_queue_head. | ||
170 | */ | ||
171 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
172 | wake_up_locked(&sem->wait); | ||
173 | } | ||
174 | |||
175 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | |||
180 | /* | ||
181 | * The semaphore operations have a special calling sequence that | ||
182 | * allow us to do a simpler in-line version of them. These routines | ||
183 | * need to convert that sequence back into the C sequence when | ||
184 | * there is contention on the semaphore. | ||
185 | * | ||
186 | * %eax contains the semaphore pointer on entry. Save the C-clobbered | ||
187 | * registers (%eax, %edx and %ecx) except %eax whish is either a return | ||
188 | * value or just clobbered.. | ||
189 | */ | ||
190 | asm( | ||
191 | ".section .sched.text\n" | ||
192 | ".align 4\n" | ||
193 | ".globl __down_failed\n" | ||
194 | "__down_failed:\n\t" | ||
195 | #if defined(CONFIG_FRAME_POINTER) | ||
196 | "pushl %ebp\n\t" | ||
197 | "movl %esp,%ebp\n\t" | ||
198 | #endif | ||
199 | "pushl %edx\n\t" | ||
200 | "pushl %ecx\n\t" | ||
201 | "call __down\n\t" | ||
202 | "popl %ecx\n\t" | ||
203 | "popl %edx\n\t" | ||
204 | #if defined(CONFIG_FRAME_POINTER) | ||
205 | "movl %ebp,%esp\n\t" | ||
206 | "popl %ebp\n\t" | ||
207 | #endif | ||
208 | "ret" | ||
209 | ); | ||
210 | |||
211 | asm( | ||
212 | ".section .sched.text\n" | ||
213 | ".align 4\n" | ||
214 | ".globl __down_failed_interruptible\n" | ||
215 | "__down_failed_interruptible:\n\t" | ||
216 | #if defined(CONFIG_FRAME_POINTER) | ||
217 | "pushl %ebp\n\t" | ||
218 | "movl %esp,%ebp\n\t" | ||
219 | #endif | ||
220 | "pushl %edx\n\t" | ||
221 | "pushl %ecx\n\t" | ||
222 | "call __down_interruptible\n\t" | ||
223 | "popl %ecx\n\t" | ||
224 | "popl %edx\n\t" | ||
225 | #if defined(CONFIG_FRAME_POINTER) | ||
226 | "movl %ebp,%esp\n\t" | ||
227 | "popl %ebp\n\t" | ||
228 | #endif | ||
229 | "ret" | ||
230 | ); | ||
231 | |||
232 | asm( | ||
233 | ".section .sched.text\n" | ||
234 | ".align 4\n" | ||
235 | ".globl __down_failed_trylock\n" | ||
236 | "__down_failed_trylock:\n\t" | ||
237 | #if defined(CONFIG_FRAME_POINTER) | ||
238 | "pushl %ebp\n\t" | ||
239 | "movl %esp,%ebp\n\t" | ||
240 | #endif | ||
241 | "pushl %edx\n\t" | ||
242 | "pushl %ecx\n\t" | ||
243 | "call __down_trylock\n\t" | ||
244 | "popl %ecx\n\t" | ||
245 | "popl %edx\n\t" | ||
246 | #if defined(CONFIG_FRAME_POINTER) | ||
247 | "movl %ebp,%esp\n\t" | ||
248 | "popl %ebp\n\t" | ||
249 | #endif | ||
250 | "ret" | ||
251 | ); | ||
252 | |||
253 | asm( | ||
254 | ".section .sched.text\n" | ||
255 | ".align 4\n" | ||
256 | ".globl __up_wakeup\n" | ||
257 | "__up_wakeup:\n\t" | ||
258 | "pushl %edx\n\t" | ||
259 | "pushl %ecx\n\t" | ||
260 | "call __up\n\t" | ||
261 | "popl %ecx\n\t" | ||
262 | "popl %edx\n\t" | ||
263 | "ret" | ||
264 | ); | ||
265 | |||
266 | /* | ||
267 | * rw spinlock fallbacks | ||
268 | */ | ||
269 | #if defined(CONFIG_SMP) | ||
270 | asm( | ||
271 | ".section .sched.text\n" | ||
272 | ".align 4\n" | ||
273 | ".globl __write_lock_failed\n" | ||
274 | "__write_lock_failed:\n\t" | ||
275 | LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" | ||
276 | "1: rep; nop\n\t" | ||
277 | "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" | ||
278 | "jne 1b\n\t" | ||
279 | LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" | ||
280 | "jnz __write_lock_failed\n\t" | ||
281 | "ret" | ||
282 | ); | ||
283 | |||
284 | asm( | ||
285 | ".section .sched.text\n" | ||
286 | ".align 4\n" | ||
287 | ".globl __read_lock_failed\n" | ||
288 | "__read_lock_failed:\n\t" | ||
289 | LOCK "incl (%eax)\n" | ||
290 | "1: rep; nop\n\t" | ||
291 | "cmpl $1,(%eax)\n\t" | ||
292 | "js 1b\n\t" | ||
293 | LOCK "decl (%eax)\n\t" | ||
294 | "js __read_lock_failed\n\t" | ||
295 | "ret" | ||
296 | ); | ||
297 | #endif | ||