diff options
-rw-r--r-- | include/linux/mutex.h | 119 | ||||
-rw-r--r-- | kernel/Makefile | 2 | ||||
-rw-r--r-- | kernel/mutex.c | 325 | ||||
-rw-r--r-- | kernel/mutex.h | 35 |
4 files changed, 480 insertions, 1 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h new file mode 100644 index 000000000000..9bce0fee68d4 --- /dev/null +++ b/include/linux/mutex.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * Mutexes: blocking mutual exclusion locks | ||
3 | * | ||
4 | * started by Ingo Molnar: | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * | ||
8 | * This file contains the main data structure and API definitions. | ||
9 | */ | ||
10 | #ifndef __LINUX_MUTEX_H | ||
11 | #define __LINUX_MUTEX_H | ||
12 | |||
13 | #include <linux/list.h> | ||
14 | #include <linux/spinlock_types.h> | ||
15 | |||
16 | #include <asm/atomic.h> | ||
17 | |||
18 | /* | ||
19 | * Simple, straightforward mutexes with strict semantics: | ||
20 | * | ||
21 | * - only one task can hold the mutex at a time | ||
22 | * - only the owner can unlock the mutex | ||
23 | * - multiple unlocks are not permitted | ||
24 | * - recursive locking is not permitted | ||
25 | * - a mutex object must be initialized via the API | ||
26 | * - a mutex object must not be initialized via memset or copying | ||
27 | * - task may not exit with mutex held | ||
28 | * - memory areas where held locks reside must not be freed | ||
29 | * - held mutexes must not be reinitialized | ||
30 | * - mutexes may not be used in irq contexts | ||
31 | * | ||
32 | * These semantics are fully enforced when DEBUG_MUTEXES is | ||
33 | * enabled. Furthermore, besides enforcing the above rules, the mutex | ||
34 | * debugging code also implements a number of additional features | ||
35 | * that make lock debugging easier and faster: | ||
36 | * | ||
37 | * - uses symbolic names of mutexes, whenever they are printed in debug output | ||
38 | * - point-of-acquire tracking, symbolic lookup of function names | ||
39 | * - list of all locks held in the system, printout of them | ||
40 | * - owner tracking | ||
41 | * - detects self-recursing locks and prints out all relevant info | ||
42 | * - detects multi-task circular deadlocks and prints out all affected | ||
43 | * locks and tasks (and only those tasks) | ||
44 | */ | ||
45 | struct mutex { | ||
46 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ | ||
47 | atomic_t count; | ||
48 | spinlock_t wait_lock; | ||
49 | struct list_head wait_list; | ||
50 | #ifdef CONFIG_DEBUG_MUTEXES | ||
51 | struct thread_info *owner; | ||
52 | struct list_head held_list; | ||
53 | unsigned long acquire_ip; | ||
54 | const char *name; | ||
55 | void *magic; | ||
56 | #endif | ||
57 | }; | ||
58 | |||
59 | /* | ||
60 | * This is the control structure for tasks blocked on mutex, | ||
61 | * which resides on the blocked task's kernel stack: | ||
62 | */ | ||
63 | struct mutex_waiter { | ||
64 | struct list_head list; | ||
65 | struct task_struct *task; | ||
66 | #ifdef CONFIG_DEBUG_MUTEXES | ||
67 | struct mutex *lock; | ||
68 | void *magic; | ||
69 | #endif | ||
70 | }; | ||
71 | |||
72 | #ifdef CONFIG_DEBUG_MUTEXES | ||
73 | # include <linux/mutex-debug.h> | ||
74 | #else | ||
75 | # define __DEBUG_MUTEX_INITIALIZER(lockname) | ||
76 | # define mutex_init(mutex) __mutex_init(mutex, NULL) | ||
77 | # define mutex_destroy(mutex) do { } while (0) | ||
78 | # define mutex_debug_show_all_locks() do { } while (0) | ||
79 | # define mutex_debug_show_held_locks(p) do { } while (0) | ||
80 | # define mutex_debug_check_no_locks_held(task) do { } while (0) | ||
81 | # define mutex_debug_check_no_locks_freed(from, to) do { } while (0) | ||
82 | #endif | ||
83 | |||
84 | #define __MUTEX_INITIALIZER(lockname) \ | ||
85 | { .count = ATOMIC_INIT(1) \ | ||
86 | , .wait_lock = SPIN_LOCK_UNLOCKED \ | ||
87 | , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ | ||
88 | __DEBUG_MUTEX_INITIALIZER(lockname) } | ||
89 | |||
90 | #define DEFINE_MUTEX(mutexname) \ | ||
91 | struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) | ||
92 | |||
93 | extern void fastcall __mutex_init(struct mutex *lock, const char *name); | ||
94 | |||
95 | /*** | ||
96 | * mutex_is_locked - is the mutex locked | ||
97 | * @lock: the mutex to be queried | ||
98 | * | ||
99 | * Returns 1 if the mutex is locked, 0 if unlocked. | ||
100 | */ | ||
101 | static inline int fastcall mutex_is_locked(struct mutex *lock) | ||
102 | { | ||
103 | return atomic_read(&lock->count) != 1; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * See kernel/mutex.c for detailed documentation of these APIs. | ||
108 | * Also see Documentation/mutex-design.txt. | ||
109 | */ | ||
110 | extern void fastcall mutex_lock(struct mutex *lock); | ||
111 | extern int fastcall mutex_lock_interruptible(struct mutex *lock); | ||
112 | /* | ||
113 | * NOTE: mutex_trylock() follows the spin_trylock() convention, | ||
114 | * not the down_trylock() convention! | ||
115 | */ | ||
116 | extern int fastcall mutex_trylock(struct mutex *lock); | ||
117 | extern void fastcall mutex_unlock(struct mutex *lock); | ||
118 | |||
119 | #endif | ||
diff --git a/kernel/Makefile b/kernel/Makefile index 4f5a1453093a..de580b4d54a4 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -7,7 +7,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
7 | sysctl.o capability.o ptrace.o timer.o user.o \ | 7 | sysctl.o capability.o ptrace.o timer.o user.o \ |
8 | signal.o sys.o kmod.o workqueue.o pid.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o \ |
9 | rcupdate.o intermodule.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o intermodule.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o |
11 | 11 | ||
12 | obj-$(CONFIG_FUTEX) += futex.o | 12 | obj-$(CONFIG_FUTEX) += futex.o |
13 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 13 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
diff --git a/kernel/mutex.c b/kernel/mutex.c new file mode 100644 index 000000000000..7eb960661441 --- /dev/null +++ b/kernel/mutex.c | |||
@@ -0,0 +1,325 @@ | |||
1 | /* | ||
2 | * kernel/mutex.c | ||
3 | * | ||
4 | * Mutexes: blocking mutual exclusion locks | ||
5 | * | ||
6 | * Started by Ingo Molnar: | ||
7 | * | ||
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
9 | * | ||
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | ||
11 | * David Howells for suggestions and improvements. | ||
12 | * | ||
13 | * Also see Documentation/mutex-design.txt. | ||
14 | */ | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | |||
21 | /* | ||
22 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | ||
23 | * which forces all calls into the slowpath: | ||
24 | */ | ||
25 | #ifdef CONFIG_DEBUG_MUTEXES | ||
26 | # include "mutex-debug.h" | ||
27 | # include <asm-generic/mutex-null.h> | ||
28 | #else | ||
29 | # include "mutex.h" | ||
30 | # include <asm/mutex.h> | ||
31 | #endif | ||
32 | |||
33 | /*** | ||
34 | * mutex_init - initialize the mutex | ||
35 | * @lock: the mutex to be initialized | ||
36 | * | ||
37 | * Initialize the mutex to unlocked state. | ||
38 | * | ||
39 | * It is not allowed to initialize an already locked mutex. | ||
40 | */ | ||
41 | void fastcall __mutex_init(struct mutex *lock, const char *name) | ||
42 | { | ||
43 | atomic_set(&lock->count, 1); | ||
44 | spin_lock_init(&lock->wait_lock); | ||
45 | INIT_LIST_HEAD(&lock->wait_list); | ||
46 | |||
47 | debug_mutex_init(lock, name); | ||
48 | } | ||
49 | |||
50 | EXPORT_SYMBOL(__mutex_init); | ||
51 | |||
52 | /* | ||
53 | * We split the mutex lock/unlock logic into separate fastpath and | ||
54 | * slowpath functions, to reduce the register pressure on the fastpath. | ||
55 | * We also put the fastpath first in the kernel image, to make sure the | ||
56 | * branch is predicted by the CPU as default-untaken. | ||
57 | */ | ||
58 | static void fastcall noinline __sched | ||
59 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | ||
60 | |||
61 | /*** | ||
62 | * mutex_lock - acquire the mutex | ||
63 | * @lock: the mutex to be acquired | ||
64 | * | ||
65 | * Lock the mutex exclusively for this task. If the mutex is not | ||
66 | * available right now, it will sleep until it can get it. | ||
67 | * | ||
68 | * The mutex must later on be released by the same task that | ||
69 | * acquired it. Recursive locking is not allowed. The task | ||
70 | * may not exit without first unlocking the mutex. Also, kernel | ||
71 | * memory where the mutex resides mutex must not be freed with | ||
72 | * the mutex still locked. The mutex must first be initialized | ||
73 | * (or statically defined) before it can be locked. memset()-ing | ||
74 | * the mutex to 0 is not allowed. | ||
75 | * | ||
76 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | ||
77 | * checks that will enforce the restrictions and will also do | ||
78 | * deadlock debugging. ) | ||
79 | * | ||
80 | * This function is similar to (but not equivalent to) down(). | ||
81 | */ | ||
82 | void fastcall __sched mutex_lock(struct mutex *lock) | ||
83 | { | ||
84 | /* | ||
85 | * The locking fastpath is the 1->0 transition from | ||
86 | * 'unlocked' into 'locked' state. | ||
87 | * | ||
88 | * NOTE: if asm/mutex.h is included, then some architectures | ||
89 | * rely on mutex_lock() having _no other code_ here but this | ||
90 | * fastpath. That allows the assembly fastpath to do | ||
91 | * tail-merging optimizations. (If you want to put testcode | ||
92 | * here, do it under #ifndef CONFIG_MUTEX_DEBUG.) | ||
93 | */ | ||
94 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | ||
95 | } | ||
96 | |||
97 | EXPORT_SYMBOL(mutex_lock); | ||
98 | |||
99 | static void fastcall noinline __sched | ||
100 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); | ||
101 | |||
102 | /*** | ||
103 | * mutex_unlock - release the mutex | ||
104 | * @lock: the mutex to be released | ||
105 | * | ||
106 | * Unlock a mutex that has been locked by this task previously. | ||
107 | * | ||
108 | * This function must not be used in interrupt context. Unlocking | ||
109 | * of a not locked mutex is not allowed. | ||
110 | * | ||
111 | * This function is similar to (but not equivalent to) up(). | ||
112 | */ | ||
113 | void fastcall __sched mutex_unlock(struct mutex *lock) | ||
114 | { | ||
115 | /* | ||
116 | * The unlocking fastpath is the 0->1 transition from 'locked' | ||
117 | * into 'unlocked' state: | ||
118 | * | ||
119 | * NOTE: no other code must be here - see mutex_lock() . | ||
120 | */ | ||
121 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | ||
122 | } | ||
123 | |||
124 | EXPORT_SYMBOL(mutex_unlock); | ||
125 | |||
126 | /* | ||
127 | * Lock a mutex (possibly interruptible), slowpath: | ||
128 | */ | ||
129 | static inline int __sched | ||
130 | __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | ||
131 | { | ||
132 | struct task_struct *task = current; | ||
133 | struct mutex_waiter waiter; | ||
134 | unsigned int old_val; | ||
135 | |||
136 | debug_mutex_init_waiter(&waiter); | ||
137 | |||
138 | spin_lock_mutex(&lock->wait_lock); | ||
139 | |||
140 | debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); | ||
141 | |||
142 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | ||
143 | list_add_tail(&waiter.list, &lock->wait_list); | ||
144 | waiter.task = task; | ||
145 | |||
146 | for (;;) { | ||
147 | /* | ||
148 | * Lets try to take the lock again - this is needed even if | ||
149 | * we get here for the first time (shortly after failing to | ||
150 | * acquire the lock), to make sure that we get a wakeup once | ||
151 | * it's unlocked. Later on, if we sleep, this is the | ||
152 | * operation that gives us the lock. We xchg it to -1, so | ||
153 | * that when we release the lock, we properly wake up the | ||
154 | * other waiters: | ||
155 | */ | ||
156 | old_val = atomic_xchg(&lock->count, -1); | ||
157 | if (old_val == 1) | ||
158 | break; | ||
159 | |||
160 | /* | ||
161 | * got a signal? (This code gets eliminated in the | ||
162 | * TASK_UNINTERRUPTIBLE case.) | ||
163 | */ | ||
164 | if (unlikely(state == TASK_INTERRUPTIBLE && | ||
165 | signal_pending(task))) { | ||
166 | mutex_remove_waiter(lock, &waiter, task->thread_info); | ||
167 | spin_unlock_mutex(&lock->wait_lock); | ||
168 | |||
169 | debug_mutex_free_waiter(&waiter); | ||
170 | return -EINTR; | ||
171 | } | ||
172 | __set_task_state(task, state); | ||
173 | |||
174 | /* didnt get the lock, go to sleep: */ | ||
175 | spin_unlock_mutex(&lock->wait_lock); | ||
176 | schedule(); | ||
177 | spin_lock_mutex(&lock->wait_lock); | ||
178 | } | ||
179 | |||
180 | /* got the lock - rejoice! */ | ||
181 | mutex_remove_waiter(lock, &waiter, task->thread_info); | ||
182 | debug_mutex_set_owner(lock, task->thread_info __IP__); | ||
183 | |||
184 | /* set it to 0 if there are no waiters left: */ | ||
185 | if (likely(list_empty(&lock->wait_list))) | ||
186 | atomic_set(&lock->count, 0); | ||
187 | |||
188 | spin_unlock_mutex(&lock->wait_lock); | ||
189 | |||
190 | debug_mutex_free_waiter(&waiter); | ||
191 | |||
192 | DEBUG_WARN_ON(list_empty(&lock->held_list)); | ||
193 | DEBUG_WARN_ON(lock->owner != task->thread_info); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static void fastcall noinline __sched | ||
199 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) | ||
200 | { | ||
201 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
202 | |||
203 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * Release the lock, slowpath: | ||
208 | */ | ||
209 | static fastcall noinline void | ||
210 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | ||
211 | { | ||
212 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
213 | |||
214 | DEBUG_WARN_ON(lock->owner != current_thread_info()); | ||
215 | |||
216 | spin_lock_mutex(&lock->wait_lock); | ||
217 | |||
218 | /* | ||
219 | * some architectures leave the lock unlocked in the fastpath failure | ||
220 | * case, others need to leave it locked. In the later case we have to | ||
221 | * unlock it here | ||
222 | */ | ||
223 | if (__mutex_slowpath_needs_to_unlock()) | ||
224 | atomic_set(&lock->count, 1); | ||
225 | |||
226 | debug_mutex_unlock(lock); | ||
227 | |||
228 | if (!list_empty(&lock->wait_list)) { | ||
229 | /* get the first entry from the wait-list: */ | ||
230 | struct mutex_waiter *waiter = | ||
231 | list_entry(lock->wait_list.next, | ||
232 | struct mutex_waiter, list); | ||
233 | |||
234 | debug_mutex_wake_waiter(lock, waiter); | ||
235 | |||
236 | wake_up_process(waiter->task); | ||
237 | } | ||
238 | |||
239 | debug_mutex_clear_owner(lock); | ||
240 | |||
241 | spin_unlock_mutex(&lock->wait_lock); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Here come the less common (and hence less performance-critical) APIs: | ||
246 | * mutex_lock_interruptible() and mutex_trylock(). | ||
247 | */ | ||
248 | static int fastcall noinline __sched | ||
249 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); | ||
250 | |||
251 | /*** | ||
252 | * mutex_lock_interruptible - acquire the mutex, interruptable | ||
253 | * @lock: the mutex to be acquired | ||
254 | * | ||
255 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | ||
256 | * been acquired or sleep until the mutex becomes available. If a | ||
257 | * signal arrives while waiting for the lock then this function | ||
258 | * returns -EINTR. | ||
259 | * | ||
260 | * This function is similar to (but not equivalent to) down_interruptible(). | ||
261 | */ | ||
262 | int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | ||
263 | { | ||
264 | /* NOTE: no other code must be here - see mutex_lock() */ | ||
265 | return __mutex_fastpath_lock_retval | ||
266 | (&lock->count, __mutex_lock_interruptible_slowpath); | ||
267 | } | ||
268 | |||
269 | EXPORT_SYMBOL(mutex_lock_interruptible); | ||
270 | |||
271 | static int fastcall noinline __sched | ||
272 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | ||
273 | { | ||
274 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
275 | |||
276 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Spinlock based trylock, we take the spinlock and check whether we | ||
281 | * can get the lock: | ||
282 | */ | ||
283 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | ||
284 | { | ||
285 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
286 | int prev; | ||
287 | |||
288 | spin_lock_mutex(&lock->wait_lock); | ||
289 | |||
290 | prev = atomic_xchg(&lock->count, -1); | ||
291 | if (likely(prev == 1)) | ||
292 | debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); | ||
293 | /* Set it back to 0 if there are no waiters: */ | ||
294 | if (likely(list_empty(&lock->wait_list))) | ||
295 | atomic_set(&lock->count, 0); | ||
296 | |||
297 | spin_unlock_mutex(&lock->wait_lock); | ||
298 | |||
299 | return prev == 1; | ||
300 | } | ||
301 | |||
302 | /*** | ||
303 | * mutex_trylock - try acquire the mutex, without waiting | ||
304 | * @lock: the mutex to be acquired | ||
305 | * | ||
306 | * Try to acquire the mutex atomically. Returns 1 if the mutex | ||
307 | * has been acquired successfully, and 0 on contention. | ||
308 | * | ||
309 | * NOTE: this function follows the spin_trylock() convention, so | ||
310 | * it is negated to the down_trylock() return values! Be careful | ||
311 | * about this when converting semaphore users to mutexes. | ||
312 | * | ||
313 | * This function must not be used in interrupt context. The | ||
314 | * mutex must be released by the same task that acquired it. | ||
315 | */ | ||
316 | int fastcall mutex_trylock(struct mutex *lock) | ||
317 | { | ||
318 | return __mutex_fastpath_trylock(&lock->count, | ||
319 | __mutex_trylock_slowpath); | ||
320 | } | ||
321 | |||
322 | EXPORT_SYMBOL(mutex_trylock); | ||
323 | |||
324 | |||
325 | |||
diff --git a/kernel/mutex.h b/kernel/mutex.h new file mode 100644 index 000000000000..00fe84e7b672 --- /dev/null +++ b/kernel/mutex.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Mutexes: blocking mutual exclusion locks | ||
3 | * | ||
4 | * started by Ingo Molnar: | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * | ||
8 | * This file contains mutex debugging related internal prototypes, for the | ||
9 | * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs: | ||
10 | */ | ||
11 | |||
12 | #define spin_lock_mutex(lock) spin_lock(lock) | ||
13 | #define spin_unlock_mutex(lock) spin_unlock(lock) | ||
14 | #define mutex_remove_waiter(lock, waiter, ti) \ | ||
15 | __list_del((waiter)->list.prev, (waiter)->list.next) | ||
16 | |||
17 | #define DEBUG_WARN_ON(c) do { } while (0) | ||
18 | #define debug_mutex_set_owner(lock, new_owner) do { } while (0) | ||
19 | #define debug_mutex_clear_owner(lock) do { } while (0) | ||
20 | #define debug_mutex_init_waiter(waiter) do { } while (0) | ||
21 | #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) | ||
22 | #define debug_mutex_free_waiter(waiter) do { } while (0) | ||
23 | #define debug_mutex_add_waiter(lock, waiter, ti, ip) do { } while (0) | ||
24 | #define debug_mutex_unlock(lock) do { } while (0) | ||
25 | #define debug_mutex_init(lock, name) do { } while (0) | ||
26 | |||
27 | /* | ||
28 | * Return-address parameters/declarations. They are very useful for | ||
29 | * debugging, but add overhead in the !DEBUG case - so we go the | ||
30 | * trouble of using this not too elegant but zero-cost solution: | ||
31 | */ | ||
32 | #define __IP_DECL__ | ||
33 | #define __IP__ | ||
34 | #define __RET_IP__ | ||
35 | |||