diff options
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 315 |
1 files changed, 315 insertions, 0 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c new file mode 100644 index 000000000000..5449b210d9ed --- /dev/null +++ b/kernel/mutex.c | |||
@@ -0,0 +1,315 @@ | |||
1 | /* | ||
2 | * kernel/mutex.c | ||
3 | * | ||
4 | * Mutexes: blocking mutual exclusion locks | ||
5 | * | ||
6 | * Started by Ingo Molnar: | ||
7 | * | ||
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
9 | * | ||
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | ||
11 | * David Howells for suggestions and improvements. | ||
12 | * | ||
13 | * Also see Documentation/mutex-design.txt. | ||
14 | */ | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | |||
21 | /* | ||
22 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | ||
23 | * which forces all calls into the slowpath: | ||
24 | */ | ||
25 | #ifdef CONFIG_DEBUG_MUTEXES | ||
26 | # include "mutex-debug.h" | ||
27 | # include <asm-generic/mutex-null.h> | ||
28 | #else | ||
29 | # include "mutex.h" | ||
30 | # include <asm/mutex.h> | ||
31 | #endif | ||
32 | |||
33 | /*** | ||
34 | * mutex_init - initialize the mutex | ||
35 | * @lock: the mutex to be initialized | ||
36 | * | ||
37 | * Initialize the mutex to unlocked state. | ||
38 | * | ||
39 | * It is not allowed to initialize an already locked mutex. | ||
40 | */ | ||
41 | void fastcall __mutex_init(struct mutex *lock, const char *name) | ||
42 | { | ||
43 | atomic_set(&lock->count, 1); | ||
44 | spin_lock_init(&lock->wait_lock); | ||
45 | INIT_LIST_HEAD(&lock->wait_list); | ||
46 | |||
47 | debug_mutex_init(lock, name); | ||
48 | } | ||
49 | |||
50 | EXPORT_SYMBOL(__mutex_init); | ||
51 | |||
52 | /* | ||
53 | * We split the mutex lock/unlock logic into separate fastpath and | ||
54 | * slowpath functions, to reduce the register pressure on the fastpath. | ||
55 | * We also put the fastpath first in the kernel image, to make sure the | ||
56 | * branch is predicted by the CPU as default-untaken. | ||
57 | */ | ||
58 | static void fastcall noinline __sched | ||
59 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | ||
60 | |||
61 | /*** | ||
62 | * mutex_lock - acquire the mutex | ||
63 | * @lock: the mutex to be acquired | ||
64 | * | ||
65 | * Lock the mutex exclusively for this task. If the mutex is not | ||
66 | * available right now, it will sleep until it can get it. | ||
67 | * | ||
68 | * The mutex must later on be released by the same task that | ||
69 | * acquired it. Recursive locking is not allowed. The task | ||
70 | * may not exit without first unlocking the mutex. Also, kernel | ||
71 | * memory where the mutex resides mutex must not be freed with | ||
72 | * the mutex still locked. The mutex must first be initialized | ||
73 | * (or statically defined) before it can be locked. memset()-ing | ||
74 | * the mutex to 0 is not allowed. | ||
75 | * | ||
76 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | ||
77 | * checks that will enforce the restrictions and will also do | ||
78 | * deadlock debugging. ) | ||
79 | * | ||
80 | * This function is similar to (but not equivalent to) down(). | ||
81 | */ | ||
82 | void fastcall __sched mutex_lock(struct mutex *lock) | ||
83 | { | ||
84 | might_sleep(); | ||
85 | /* | ||
86 | * The locking fastpath is the 1->0 transition from | ||
87 | * 'unlocked' into 'locked' state. | ||
88 | */ | ||
89 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | ||
90 | } | ||
91 | |||
92 | EXPORT_SYMBOL(mutex_lock); | ||
93 | |||
94 | static void fastcall noinline __sched | ||
95 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); | ||
96 | |||
97 | /*** | ||
98 | * mutex_unlock - release the mutex | ||
99 | * @lock: the mutex to be released | ||
100 | * | ||
101 | * Unlock a mutex that has been locked by this task previously. | ||
102 | * | ||
103 | * This function must not be used in interrupt context. Unlocking | ||
104 | * of a not locked mutex is not allowed. | ||
105 | * | ||
106 | * This function is similar to (but not equivalent to) up(). | ||
107 | */ | ||
108 | void fastcall __sched mutex_unlock(struct mutex *lock) | ||
109 | { | ||
110 | /* | ||
111 | * The unlocking fastpath is the 0->1 transition from 'locked' | ||
112 | * into 'unlocked' state: | ||
113 | */ | ||
114 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | ||
115 | } | ||
116 | |||
117 | EXPORT_SYMBOL(mutex_unlock); | ||
118 | |||
119 | /* | ||
120 | * Lock a mutex (possibly interruptible), slowpath: | ||
121 | */ | ||
122 | static inline int __sched | ||
123 | __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | ||
124 | { | ||
125 | struct task_struct *task = current; | ||
126 | struct mutex_waiter waiter; | ||
127 | unsigned int old_val; | ||
128 | |||
129 | debug_mutex_init_waiter(&waiter); | ||
130 | |||
131 | spin_lock_mutex(&lock->wait_lock); | ||
132 | |||
133 | debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); | ||
134 | |||
135 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | ||
136 | list_add_tail(&waiter.list, &lock->wait_list); | ||
137 | waiter.task = task; | ||
138 | |||
139 | for (;;) { | ||
140 | /* | ||
141 | * Lets try to take the lock again - this is needed even if | ||
142 | * we get here for the first time (shortly after failing to | ||
143 | * acquire the lock), to make sure that we get a wakeup once | ||
144 | * it's unlocked. Later on, if we sleep, this is the | ||
145 | * operation that gives us the lock. We xchg it to -1, so | ||
146 | * that when we release the lock, we properly wake up the | ||
147 | * other waiters: | ||
148 | */ | ||
149 | old_val = atomic_xchg(&lock->count, -1); | ||
150 | if (old_val == 1) | ||
151 | break; | ||
152 | |||
153 | /* | ||
154 | * got a signal? (This code gets eliminated in the | ||
155 | * TASK_UNINTERRUPTIBLE case.) | ||
156 | */ | ||
157 | if (unlikely(state == TASK_INTERRUPTIBLE && | ||
158 | signal_pending(task))) { | ||
159 | mutex_remove_waiter(lock, &waiter, task->thread_info); | ||
160 | spin_unlock_mutex(&lock->wait_lock); | ||
161 | |||
162 | debug_mutex_free_waiter(&waiter); | ||
163 | return -EINTR; | ||
164 | } | ||
165 | __set_task_state(task, state); | ||
166 | |||
167 | /* didnt get the lock, go to sleep: */ | ||
168 | spin_unlock_mutex(&lock->wait_lock); | ||
169 | schedule(); | ||
170 | spin_lock_mutex(&lock->wait_lock); | ||
171 | } | ||
172 | |||
173 | /* got the lock - rejoice! */ | ||
174 | mutex_remove_waiter(lock, &waiter, task->thread_info); | ||
175 | debug_mutex_set_owner(lock, task->thread_info __IP__); | ||
176 | |||
177 | /* set it to 0 if there are no waiters left: */ | ||
178 | if (likely(list_empty(&lock->wait_list))) | ||
179 | atomic_set(&lock->count, 0); | ||
180 | |||
181 | spin_unlock_mutex(&lock->wait_lock); | ||
182 | |||
183 | debug_mutex_free_waiter(&waiter); | ||
184 | |||
185 | DEBUG_WARN_ON(list_empty(&lock->held_list)); | ||
186 | DEBUG_WARN_ON(lock->owner != task->thread_info); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static void fastcall noinline __sched | ||
192 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) | ||
193 | { | ||
194 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
195 | |||
196 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Release the lock, slowpath: | ||
201 | */ | ||
202 | static fastcall noinline void | ||
203 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | ||
204 | { | ||
205 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
206 | |||
207 | DEBUG_WARN_ON(lock->owner != current_thread_info()); | ||
208 | |||
209 | spin_lock_mutex(&lock->wait_lock); | ||
210 | |||
211 | /* | ||
212 | * some architectures leave the lock unlocked in the fastpath failure | ||
213 | * case, others need to leave it locked. In the later case we have to | ||
214 | * unlock it here | ||
215 | */ | ||
216 | if (__mutex_slowpath_needs_to_unlock()) | ||
217 | atomic_set(&lock->count, 1); | ||
218 | |||
219 | debug_mutex_unlock(lock); | ||
220 | |||
221 | if (!list_empty(&lock->wait_list)) { | ||
222 | /* get the first entry from the wait-list: */ | ||
223 | struct mutex_waiter *waiter = | ||
224 | list_entry(lock->wait_list.next, | ||
225 | struct mutex_waiter, list); | ||
226 | |||
227 | debug_mutex_wake_waiter(lock, waiter); | ||
228 | |||
229 | wake_up_process(waiter->task); | ||
230 | } | ||
231 | |||
232 | debug_mutex_clear_owner(lock); | ||
233 | |||
234 | spin_unlock_mutex(&lock->wait_lock); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Here come the less common (and hence less performance-critical) APIs: | ||
239 | * mutex_lock_interruptible() and mutex_trylock(). | ||
240 | */ | ||
241 | static int fastcall noinline __sched | ||
242 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); | ||
243 | |||
244 | /*** | ||
245 | * mutex_lock_interruptible - acquire the mutex, interruptable | ||
246 | * @lock: the mutex to be acquired | ||
247 | * | ||
248 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | ||
249 | * been acquired or sleep until the mutex becomes available. If a | ||
250 | * signal arrives while waiting for the lock then this function | ||
251 | * returns -EINTR. | ||
252 | * | ||
253 | * This function is similar to (but not equivalent to) down_interruptible(). | ||
254 | */ | ||
255 | int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | ||
256 | { | ||
257 | might_sleep(); | ||
258 | return __mutex_fastpath_lock_retval | ||
259 | (&lock->count, __mutex_lock_interruptible_slowpath); | ||
260 | } | ||
261 | |||
262 | EXPORT_SYMBOL(mutex_lock_interruptible); | ||
263 | |||
264 | static int fastcall noinline __sched | ||
265 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | ||
266 | { | ||
267 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
268 | |||
269 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Spinlock based trylock, we take the spinlock and check whether we | ||
274 | * can get the lock: | ||
275 | */ | ||
276 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | ||
277 | { | ||
278 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
279 | int prev; | ||
280 | |||
281 | spin_lock_mutex(&lock->wait_lock); | ||
282 | |||
283 | prev = atomic_xchg(&lock->count, -1); | ||
284 | if (likely(prev == 1)) | ||
285 | debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); | ||
286 | /* Set it back to 0 if there are no waiters: */ | ||
287 | if (likely(list_empty(&lock->wait_list))) | ||
288 | atomic_set(&lock->count, 0); | ||
289 | |||
290 | spin_unlock_mutex(&lock->wait_lock); | ||
291 | |||
292 | return prev == 1; | ||
293 | } | ||
294 | |||
295 | /*** | ||
296 | * mutex_trylock - try acquire the mutex, without waiting | ||
297 | * @lock: the mutex to be acquired | ||
298 | * | ||
299 | * Try to acquire the mutex atomically. Returns 1 if the mutex | ||
300 | * has been acquired successfully, and 0 on contention. | ||
301 | * | ||
302 | * NOTE: this function follows the spin_trylock() convention, so | ||
303 | * it is negated to the down_trylock() return values! Be careful | ||
304 | * about this when converting semaphore users to mutexes. | ||
305 | * | ||
306 | * This function must not be used in interrupt context. The | ||
307 | * mutex must be released by the same task that acquired it. | ||
308 | */ | ||
309 | int fastcall mutex_trylock(struct mutex *lock) | ||
310 | { | ||
311 | return __mutex_fastpath_trylock(&lock->count, | ||
312 | __mutex_trylock_slowpath); | ||
313 | } | ||
314 | |||
315 | EXPORT_SYMBOL(mutex_trylock); | ||