diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 2 | ||||
-rw-r--r-- | kernel/semaphore.c | 264 |
2 files changed, 265 insertions, 1 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 6c584c55a6e9..f45c69e69688 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
8 | signal.o sys.o kmod.o workqueue.o pid.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o \ |
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o | 12 | notifier.o ksysfs.o pm_qos_params.o |
13 | 13 | ||
14 | obj-$(CONFIG_SYSCTL) += sysctl_check.o | 14 | obj-$(CONFIG_SYSCTL) += sysctl_check.o |
diff --git a/kernel/semaphore.c b/kernel/semaphore.c new file mode 100644 index 000000000000..5c2942e768cd --- /dev/null +++ b/kernel/semaphore.c | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 Intel Corporation | ||
3 | * Author: Matthew Wilcox <willy@linux.intel.com> | ||
4 | * | ||
5 | * Distributed under the terms of the GNU GPL, version 2 | ||
6 | * | ||
7 | * This file implements counting semaphores. | ||
8 | * A counting semaphore may be acquired 'n' times before sleeping. | ||
9 | * See mutex.c for single-acquisition sleeping locks which enforce | ||
10 | * rules which allow code to be debugged more easily. | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | * Some notes on the implementation: | ||
15 | * | ||
16 | * The spinlock controls access to the other members of the semaphore. | ||
17 | * down_trylock() and up() can be called from interrupt context, so we | ||
18 | * have to disable interrupts when taking the lock. It turns out various | ||
19 | * parts of the kernel expect to be able to use down() on a semaphore in | ||
20 | * interrupt context when they know it will succeed, so we have to use | ||
21 | * irqsave variants for down(), down_interruptible() and down_killable() | ||
22 | * too. | ||
23 | * | ||
24 | * The ->count variable represents how many more tasks can acquire this | ||
25 | * semaphore. If it's zero, there may be tasks waiting on the wait_list. | ||
26 | */ | ||
27 | |||
28 | #include <linux/compiler.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/semaphore.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | |||
35 | static noinline void __down(struct semaphore *sem); | ||
36 | static noinline int __down_interruptible(struct semaphore *sem); | ||
37 | static noinline int __down_killable(struct semaphore *sem); | ||
38 | static noinline int __down_timeout(struct semaphore *sem, long jiffies); | ||
39 | static noinline void __up(struct semaphore *sem); | ||
40 | |||
41 | /** | ||
42 | * down - acquire the semaphore | ||
43 | * @sem: the semaphore to be acquired | ||
44 | * | ||
45 | * Acquires the semaphore. If no more tasks are allowed to acquire the | ||
46 | * semaphore, calling this function will put the task to sleep until the | ||
47 | * semaphore is released. | ||
48 | * | ||
49 | * Use of this function is deprecated, please use down_interruptible() or | ||
50 | * down_killable() instead. | ||
51 | */ | ||
52 | void down(struct semaphore *sem) | ||
53 | { | ||
54 | unsigned long flags; | ||
55 | |||
56 | spin_lock_irqsave(&sem->lock, flags); | ||
57 | if (likely(sem->count > 0)) | ||
58 | sem->count--; | ||
59 | else | ||
60 | __down(sem); | ||
61 | spin_unlock_irqrestore(&sem->lock, flags); | ||
62 | } | ||
63 | EXPORT_SYMBOL(down); | ||
64 | |||
65 | /** | ||
66 | * down_interruptible - acquire the semaphore unless interrupted | ||
67 | * @sem: the semaphore to be acquired | ||
68 | * | ||
69 | * Attempts to acquire the semaphore. If no more tasks are allowed to | ||
70 | * acquire the semaphore, calling this function will put the task to sleep. | ||
71 | * If the sleep is interrupted by a signal, this function will return -EINTR. | ||
72 | * If the semaphore is successfully acquired, this function returns 0. | ||
73 | */ | ||
74 | int down_interruptible(struct semaphore *sem) | ||
75 | { | ||
76 | unsigned long flags; | ||
77 | int result = 0; | ||
78 | |||
79 | spin_lock_irqsave(&sem->lock, flags); | ||
80 | if (likely(sem->count > 0)) | ||
81 | sem->count--; | ||
82 | else | ||
83 | result = __down_interruptible(sem); | ||
84 | spin_unlock_irqrestore(&sem->lock, flags); | ||
85 | |||
86 | return result; | ||
87 | } | ||
88 | EXPORT_SYMBOL(down_interruptible); | ||
89 | |||
90 | /** | ||
91 | * down_killable - acquire the semaphore unless killed | ||
92 | * @sem: the semaphore to be acquired | ||
93 | * | ||
94 | * Attempts to acquire the semaphore. If no more tasks are allowed to | ||
95 | * acquire the semaphore, calling this function will put the task to sleep. | ||
96 | * If the sleep is interrupted by a fatal signal, this function will return | ||
97 | * -EINTR. If the semaphore is successfully acquired, this function returns | ||
98 | * 0. | ||
99 | */ | ||
100 | int down_killable(struct semaphore *sem) | ||
101 | { | ||
102 | unsigned long flags; | ||
103 | int result = 0; | ||
104 | |||
105 | spin_lock_irqsave(&sem->lock, flags); | ||
106 | if (likely(sem->count > 0)) | ||
107 | sem->count--; | ||
108 | else | ||
109 | result = __down_killable(sem); | ||
110 | spin_unlock_irqrestore(&sem->lock, flags); | ||
111 | |||
112 | return result; | ||
113 | } | ||
114 | EXPORT_SYMBOL(down_killable); | ||
115 | |||
116 | /** | ||
117 | * down_trylock - try to acquire the semaphore, without waiting | ||
118 | * @sem: the semaphore to be acquired | ||
119 | * | ||
120 | * Try to acquire the semaphore atomically. Returns 0 if the mutex has | ||
121 | * been acquired successfully or 1 if it it cannot be acquired. | ||
122 | * | ||
123 | * NOTE: This return value is inverted from both spin_trylock and | ||
124 | * mutex_trylock! Be careful about this when converting code. | ||
125 | * | ||
126 | * Unlike mutex_trylock, this function can be used from interrupt context, | ||
127 | * and the semaphore can be released by any task or interrupt. | ||
128 | */ | ||
129 | int down_trylock(struct semaphore *sem) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | int count; | ||
133 | |||
134 | spin_lock_irqsave(&sem->lock, flags); | ||
135 | count = sem->count - 1; | ||
136 | if (likely(count >= 0)) | ||
137 | sem->count = count; | ||
138 | spin_unlock_irqrestore(&sem->lock, flags); | ||
139 | |||
140 | return (count < 0); | ||
141 | } | ||
142 | EXPORT_SYMBOL(down_trylock); | ||
143 | |||
144 | /** | ||
145 | * down_timeout - acquire the semaphore within a specified time | ||
146 | * @sem: the semaphore to be acquired | ||
147 | * @jiffies: how long to wait before failing | ||
148 | * | ||
149 | * Attempts to acquire the semaphore. If no more tasks are allowed to | ||
150 | * acquire the semaphore, calling this function will put the task to sleep. | ||
151 | * If the semaphore is not released within the specified number of jiffies, | ||
152 | * this function returns -ETIME. It returns 0 if the semaphore was acquired. | ||
153 | */ | ||
154 | int down_timeout(struct semaphore *sem, long jiffies) | ||
155 | { | ||
156 | unsigned long flags; | ||
157 | int result = 0; | ||
158 | |||
159 | spin_lock_irqsave(&sem->lock, flags); | ||
160 | if (likely(sem->count > 0)) | ||
161 | sem->count--; | ||
162 | else | ||
163 | result = __down_timeout(sem, jiffies); | ||
164 | spin_unlock_irqrestore(&sem->lock, flags); | ||
165 | |||
166 | return result; | ||
167 | } | ||
168 | EXPORT_SYMBOL(down_timeout); | ||
169 | |||
170 | /** | ||
171 | * up - release the semaphore | ||
172 | * @sem: the semaphore to release | ||
173 | * | ||
174 | * Release the semaphore. Unlike mutexes, up() may be called from any | ||
175 | * context and even by tasks which have never called down(). | ||
176 | */ | ||
177 | void up(struct semaphore *sem) | ||
178 | { | ||
179 | unsigned long flags; | ||
180 | |||
181 | spin_lock_irqsave(&sem->lock, flags); | ||
182 | if (likely(list_empty(&sem->wait_list))) | ||
183 | sem->count++; | ||
184 | else | ||
185 | __up(sem); | ||
186 | spin_unlock_irqrestore(&sem->lock, flags); | ||
187 | } | ||
188 | EXPORT_SYMBOL(up); | ||
189 | |||
190 | /* Functions for the contended case */ | ||
191 | |||
192 | struct semaphore_waiter { | ||
193 | struct list_head list; | ||
194 | struct task_struct *task; | ||
195 | int up; | ||
196 | }; | ||
197 | |||
198 | /* | ||
199 | * Because this function is inlined, the 'state' parameter will be | ||
200 | * constant, and thus optimised away by the compiler. Likewise the | ||
201 | * 'timeout' parameter for the cases without timeouts. | ||
202 | */ | ||
203 | static inline int __sched __down_common(struct semaphore *sem, long state, | ||
204 | long timeout) | ||
205 | { | ||
206 | struct task_struct *task = current; | ||
207 | struct semaphore_waiter waiter; | ||
208 | |||
209 | list_add_tail(&waiter.list, &sem->wait_list); | ||
210 | waiter.task = task; | ||
211 | waiter.up = 0; | ||
212 | |||
213 | for (;;) { | ||
214 | if (state == TASK_INTERRUPTIBLE && signal_pending(task)) | ||
215 | goto interrupted; | ||
216 | if (state == TASK_KILLABLE && fatal_signal_pending(task)) | ||
217 | goto interrupted; | ||
218 | if (timeout <= 0) | ||
219 | goto timed_out; | ||
220 | __set_task_state(task, state); | ||
221 | spin_unlock_irq(&sem->lock); | ||
222 | timeout = schedule_timeout(timeout); | ||
223 | spin_lock_irq(&sem->lock); | ||
224 | if (waiter.up) | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | timed_out: | ||
229 | list_del(&waiter.list); | ||
230 | return -ETIME; | ||
231 | |||
232 | interrupted: | ||
233 | list_del(&waiter.list); | ||
234 | return -EINTR; | ||
235 | } | ||
236 | |||
237 | static noinline void __sched __down(struct semaphore *sem) | ||
238 | { | ||
239 | __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
240 | } | ||
241 | |||
242 | static noinline int __sched __down_interruptible(struct semaphore *sem) | ||
243 | { | ||
244 | return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
245 | } | ||
246 | |||
247 | static noinline int __sched __down_killable(struct semaphore *sem) | ||
248 | { | ||
249 | return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT); | ||
250 | } | ||
251 | |||
252 | static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies) | ||
253 | { | ||
254 | return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); | ||
255 | } | ||
256 | |||
257 | static noinline void __sched __up(struct semaphore *sem) | ||
258 | { | ||
259 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, | ||
260 | struct semaphore_waiter, list); | ||
261 | list_del(&waiter->list); | ||
262 | waiter->up = 1; | ||
263 | wake_up_process(waiter->task); | ||
264 | } | ||