diff options
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 95 |
1 files changed, 59 insertions, 36 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 5449b210d9ed..8c71cf72a497 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/debug_locks.h> | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | 23 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
@@ -38,13 +39,14 @@ | |||
38 | * | 39 | * |
39 | * It is not allowed to initialize an already locked mutex. | 40 | * It is not allowed to initialize an already locked mutex. |
40 | */ | 41 | */ |
41 | void fastcall __mutex_init(struct mutex *lock, const char *name) | 42 | void |
43 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | ||
42 | { | 44 | { |
43 | atomic_set(&lock->count, 1); | 45 | atomic_set(&lock->count, 1); |
44 | spin_lock_init(&lock->wait_lock); | 46 | spin_lock_init(&lock->wait_lock); |
45 | INIT_LIST_HEAD(&lock->wait_list); | 47 | INIT_LIST_HEAD(&lock->wait_list); |
46 | 48 | ||
47 | debug_mutex_init(lock, name); | 49 | debug_mutex_init(lock, name, key); |
48 | } | 50 | } |
49 | 51 | ||
50 | EXPORT_SYMBOL(__mutex_init); | 52 | EXPORT_SYMBOL(__mutex_init); |
@@ -56,7 +58,7 @@ EXPORT_SYMBOL(__mutex_init); | |||
56 | * branch is predicted by the CPU as default-untaken. | 58 | * branch is predicted by the CPU as default-untaken. |
57 | */ | 59 | */ |
58 | static void fastcall noinline __sched | 60 | static void fastcall noinline __sched |
59 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | 61 | __mutex_lock_slowpath(atomic_t *lock_count); |
60 | 62 | ||
61 | /*** | 63 | /*** |
62 | * mutex_lock - acquire the mutex | 64 | * mutex_lock - acquire the mutex |
@@ -79,7 +81,7 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | |||
79 | * | 81 | * |
80 | * This function is similar to (but not equivalent to) down(). | 82 | * This function is similar to (but not equivalent to) down(). |
81 | */ | 83 | */ |
82 | void fastcall __sched mutex_lock(struct mutex *lock) | 84 | void inline fastcall __sched mutex_lock(struct mutex *lock) |
83 | { | 85 | { |
84 | might_sleep(); | 86 | might_sleep(); |
85 | /* | 87 | /* |
@@ -92,7 +94,7 @@ void fastcall __sched mutex_lock(struct mutex *lock) | |||
92 | EXPORT_SYMBOL(mutex_lock); | 94 | EXPORT_SYMBOL(mutex_lock); |
93 | 95 | ||
94 | static void fastcall noinline __sched | 96 | static void fastcall noinline __sched |
95 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); | 97 | __mutex_unlock_slowpath(atomic_t *lock_count); |
96 | 98 | ||
97 | /*** | 99 | /*** |
98 | * mutex_unlock - release the mutex | 100 | * mutex_unlock - release the mutex |
@@ -120,17 +122,18 @@ EXPORT_SYMBOL(mutex_unlock); | |||
120 | * Lock a mutex (possibly interruptible), slowpath: | 122 | * Lock a mutex (possibly interruptible), slowpath: |
121 | */ | 123 | */ |
122 | static inline int __sched | 124 | static inline int __sched |
123 | __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | 125 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) |
124 | { | 126 | { |
125 | struct task_struct *task = current; | 127 | struct task_struct *task = current; |
126 | struct mutex_waiter waiter; | 128 | struct mutex_waiter waiter; |
127 | unsigned int old_val; | 129 | unsigned int old_val; |
130 | unsigned long flags; | ||
128 | 131 | ||
129 | debug_mutex_init_waiter(&waiter); | 132 | spin_lock_mutex(&lock->wait_lock, flags); |
130 | 133 | ||
131 | spin_lock_mutex(&lock->wait_lock); | 134 | debug_mutex_lock_common(lock, &waiter); |
132 | 135 | mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | |
133 | debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); | 136 | debug_mutex_add_waiter(lock, &waiter, task->thread_info); |
134 | 137 | ||
135 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 138 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
136 | list_add_tail(&waiter.list, &lock->wait_list); | 139 | list_add_tail(&waiter.list, &lock->wait_list); |
@@ -157,7 +160,8 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |||
157 | if (unlikely(state == TASK_INTERRUPTIBLE && | 160 | if (unlikely(state == TASK_INTERRUPTIBLE && |
158 | signal_pending(task))) { | 161 | signal_pending(task))) { |
159 | mutex_remove_waiter(lock, &waiter, task->thread_info); | 162 | mutex_remove_waiter(lock, &waiter, task->thread_info); |
160 | spin_unlock_mutex(&lock->wait_lock); | 163 | mutex_release(&lock->dep_map, 1, _RET_IP_); |
164 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
161 | 165 | ||
162 | debug_mutex_free_waiter(&waiter); | 166 | debug_mutex_free_waiter(&waiter); |
163 | return -EINTR; | 167 | return -EINTR; |
@@ -165,48 +169,57 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |||
165 | __set_task_state(task, state); | 169 | __set_task_state(task, state); |
166 | 170 | ||
167 | /* didnt get the lock, go to sleep: */ | 171 | /* didnt get the lock, go to sleep: */ |
168 | spin_unlock_mutex(&lock->wait_lock); | 172 | spin_unlock_mutex(&lock->wait_lock, flags); |
169 | schedule(); | 173 | schedule(); |
170 | spin_lock_mutex(&lock->wait_lock); | 174 | spin_lock_mutex(&lock->wait_lock, flags); |
171 | } | 175 | } |
172 | 176 | ||
173 | /* got the lock - rejoice! */ | 177 | /* got the lock - rejoice! */ |
174 | mutex_remove_waiter(lock, &waiter, task->thread_info); | 178 | mutex_remove_waiter(lock, &waiter, task->thread_info); |
175 | debug_mutex_set_owner(lock, task->thread_info __IP__); | 179 | debug_mutex_set_owner(lock, task->thread_info); |
176 | 180 | ||
177 | /* set it to 0 if there are no waiters left: */ | 181 | /* set it to 0 if there are no waiters left: */ |
178 | if (likely(list_empty(&lock->wait_list))) | 182 | if (likely(list_empty(&lock->wait_list))) |
179 | atomic_set(&lock->count, 0); | 183 | atomic_set(&lock->count, 0); |
180 | 184 | ||
181 | spin_unlock_mutex(&lock->wait_lock); | 185 | spin_unlock_mutex(&lock->wait_lock, flags); |
182 | 186 | ||
183 | debug_mutex_free_waiter(&waiter); | 187 | debug_mutex_free_waiter(&waiter); |
184 | 188 | ||
185 | DEBUG_WARN_ON(list_empty(&lock->held_list)); | ||
186 | DEBUG_WARN_ON(lock->owner != task->thread_info); | ||
187 | |||
188 | return 0; | 189 | return 0; |
189 | } | 190 | } |
190 | 191 | ||
191 | static void fastcall noinline __sched | 192 | static void fastcall noinline __sched |
192 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) | 193 | __mutex_lock_slowpath(atomic_t *lock_count) |
193 | { | 194 | { |
194 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 195 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
195 | 196 | ||
196 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); | 197 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); |
198 | } | ||
199 | |||
200 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
201 | void __sched | ||
202 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | ||
203 | { | ||
204 | might_sleep(); | ||
205 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); | ||
197 | } | 206 | } |
198 | 207 | ||
208 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | ||
209 | #endif | ||
210 | |||
199 | /* | 211 | /* |
200 | * Release the lock, slowpath: | 212 | * Release the lock, slowpath: |
201 | */ | 213 | */ |
202 | static fastcall noinline void | 214 | static fastcall inline void |
203 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | 215 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
204 | { | 216 | { |
205 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 217 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
218 | unsigned long flags; | ||
206 | 219 | ||
207 | DEBUG_WARN_ON(lock->owner != current_thread_info()); | 220 | spin_lock_mutex(&lock->wait_lock, flags); |
208 | 221 | mutex_release(&lock->dep_map, nested, _RET_IP_); | |
209 | spin_lock_mutex(&lock->wait_lock); | 222 | debug_mutex_unlock(lock); |
210 | 223 | ||
211 | /* | 224 | /* |
212 | * some architectures leave the lock unlocked in the fastpath failure | 225 | * some architectures leave the lock unlocked in the fastpath failure |
@@ -216,8 +229,6 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | |||
216 | if (__mutex_slowpath_needs_to_unlock()) | 229 | if (__mutex_slowpath_needs_to_unlock()) |
217 | atomic_set(&lock->count, 1); | 230 | atomic_set(&lock->count, 1); |
218 | 231 | ||
219 | debug_mutex_unlock(lock); | ||
220 | |||
221 | if (!list_empty(&lock->wait_list)) { | 232 | if (!list_empty(&lock->wait_list)) { |
222 | /* get the first entry from the wait-list: */ | 233 | /* get the first entry from the wait-list: */ |
223 | struct mutex_waiter *waiter = | 234 | struct mutex_waiter *waiter = |
@@ -231,7 +242,16 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | |||
231 | 242 | ||
232 | debug_mutex_clear_owner(lock); | 243 | debug_mutex_clear_owner(lock); |
233 | 244 | ||
234 | spin_unlock_mutex(&lock->wait_lock); | 245 | spin_unlock_mutex(&lock->wait_lock, flags); |
246 | } | ||
247 | |||
248 | /* | ||
249 | * Release the lock, slowpath: | ||
250 | */ | ||
251 | static fastcall noinline void | ||
252 | __mutex_unlock_slowpath(atomic_t *lock_count) | ||
253 | { | ||
254 | __mutex_unlock_common_slowpath(lock_count, 1); | ||
235 | } | 255 | } |
236 | 256 | ||
237 | /* | 257 | /* |
@@ -239,7 +259,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | |||
239 | * mutex_lock_interruptible() and mutex_trylock(). | 259 | * mutex_lock_interruptible() and mutex_trylock(). |
240 | */ | 260 | */ |
241 | static int fastcall noinline __sched | 261 | static int fastcall noinline __sched |
242 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); | 262 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
243 | 263 | ||
244 | /*** | 264 | /*** |
245 | * mutex_lock_interruptible - acquire the mutex, interruptable | 265 | * mutex_lock_interruptible - acquire the mutex, interruptable |
@@ -262,11 +282,11 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | |||
262 | EXPORT_SYMBOL(mutex_lock_interruptible); | 282 | EXPORT_SYMBOL(mutex_lock_interruptible); |
263 | 283 | ||
264 | static int fastcall noinline __sched | 284 | static int fastcall noinline __sched |
265 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | 285 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
266 | { | 286 | { |
267 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 287 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
268 | 288 | ||
269 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); | 289 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); |
270 | } | 290 | } |
271 | 291 | ||
272 | /* | 292 | /* |
@@ -276,18 +296,21 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | |||
276 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | 296 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) |
277 | { | 297 | { |
278 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 298 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
299 | unsigned long flags; | ||
279 | int prev; | 300 | int prev; |
280 | 301 | ||
281 | spin_lock_mutex(&lock->wait_lock); | 302 | spin_lock_mutex(&lock->wait_lock, flags); |
282 | 303 | ||
283 | prev = atomic_xchg(&lock->count, -1); | 304 | prev = atomic_xchg(&lock->count, -1); |
284 | if (likely(prev == 1)) | 305 | if (likely(prev == 1)) { |
285 | debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); | 306 | debug_mutex_set_owner(lock, current_thread_info()); |
307 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
308 | } | ||
286 | /* Set it back to 0 if there are no waiters: */ | 309 | /* Set it back to 0 if there are no waiters: */ |
287 | if (likely(list_empty(&lock->wait_list))) | 310 | if (likely(list_empty(&lock->wait_list))) |
288 | atomic_set(&lock->count, 0); | 311 | atomic_set(&lock->count, 0); |
289 | 312 | ||
290 | spin_unlock_mutex(&lock->wait_lock); | 313 | spin_unlock_mutex(&lock->wait_lock, flags); |
291 | 314 | ||
292 | return prev == 1; | 315 | return prev == 1; |
293 | } | 316 | } |
@@ -306,7 +329,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
306 | * This function must not be used in interrupt context. The | 329 | * This function must not be used in interrupt context. The |
307 | * mutex must be released by the same task that acquired it. | 330 | * mutex must be released by the same task that acquired it. |
308 | */ | 331 | */ |
309 | int fastcall mutex_trylock(struct mutex *lock) | 332 | int fastcall __sched mutex_trylock(struct mutex *lock) |
310 | { | 333 | { |
311 | return __mutex_fastpath_trylock(&lock->count, | 334 | return __mutex_fastpath_trylock(&lock->count, |
312 | __mutex_trylock_slowpath); | 335 | __mutex_trylock_slowpath); |