diff options
Diffstat (limited to 'kernel/mutex.c')
| -rw-r--r-- | kernel/mutex.c | 121 |
1 files changed, 104 insertions, 17 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4f45d4b658ef..5d79781394a3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
| @@ -10,6 +10,11 @@ | |||
| 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 11 | * David Howells for suggestions and improvements. | 11 | * David Howells for suggestions and improvements. |
| 12 | * | 12 | * |
| 13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline | ||
| 14 | * from the -rt tree, where it was originally implemented for rtmutexes | ||
| 15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | ||
| 16 | * and Sven Dietrich. | ||
| 17 | * | ||
| 13 | * Also see Documentation/mutex-design.txt. | 18 | * Also see Documentation/mutex-design.txt. |
| 14 | */ | 19 | */ |
| 15 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
| @@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
| 46 | atomic_set(&lock->count, 1); | 51 | atomic_set(&lock->count, 1); |
| 47 | spin_lock_init(&lock->wait_lock); | 52 | spin_lock_init(&lock->wait_lock); |
| 48 | INIT_LIST_HEAD(&lock->wait_list); | 53 | INIT_LIST_HEAD(&lock->wait_list); |
| 54 | mutex_clear_owner(lock); | ||
| 49 | 55 | ||
| 50 | debug_mutex_init(lock, name, key); | 56 | debug_mutex_init(lock, name, key); |
| 51 | } | 57 | } |
| @@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock) | |||
| 91 | * 'unlocked' into 'locked' state. | 97 | * 'unlocked' into 'locked' state. |
| 92 | */ | 98 | */ |
| 93 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | 99 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
| 100 | mutex_set_owner(lock); | ||
| 94 | } | 101 | } |
| 95 | 102 | ||
| 96 | EXPORT_SYMBOL(mutex_lock); | 103 | EXPORT_SYMBOL(mutex_lock); |
| @@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock) | |||
| 115 | * The unlocking fastpath is the 0->1 transition from 'locked' | 122 | * The unlocking fastpath is the 0->1 transition from 'locked' |
| 116 | * into 'unlocked' state: | 123 | * into 'unlocked' state: |
| 117 | */ | 124 | */ |
| 125 | #ifndef CONFIG_DEBUG_MUTEXES | ||
| 126 | /* | ||
| 127 | * When debugging is enabled we must not clear the owner before time, | ||
| 128 | * the slow path will always be taken, and that clears the owner field | ||
| 129 | * after verifying that it was indeed current. | ||
| 130 | */ | ||
| 131 | mutex_clear_owner(lock); | ||
| 132 | #endif | ||
| 118 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | 133 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
| 119 | } | 134 | } |
| 120 | 135 | ||
| @@ -129,21 +144,75 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 129 | { | 144 | { |
| 130 | struct task_struct *task = current; | 145 | struct task_struct *task = current; |
| 131 | struct mutex_waiter waiter; | 146 | struct mutex_waiter waiter; |
| 132 | unsigned int old_val; | ||
| 133 | unsigned long flags; | 147 | unsigned long flags; |
| 134 | 148 | ||
| 149 | preempt_disable(); | ||
| 150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
| 151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) | ||
| 152 | /* | ||
| 153 | * Optimistic spinning. | ||
| 154 | * | ||
| 155 | * We try to spin for acquisition when we find that there are no | ||
| 156 | * pending waiters and the lock owner is currently running on a | ||
| 157 | * (different) CPU. | ||
| 158 | * | ||
| 159 | * The rationale is that if the lock owner is running, it is likely to | ||
| 160 | * release the lock soon. | ||
| 161 | * | ||
| 162 | * Since this needs the lock owner, and this mutex implementation | ||
| 163 | * doesn't track the owner atomically in the lock field, we need to | ||
| 164 | * track it non-atomically. | ||
| 165 | * | ||
| 166 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | ||
| 167 | * to serialize everything. | ||
| 168 | */ | ||
| 169 | |||
| 170 | for (;;) { | ||
| 171 | struct thread_info *owner; | ||
| 172 | |||
| 173 | /* | ||
| 174 | * If there's an owner, wait for it to either | ||
| 175 | * release the lock or go to sleep. | ||
| 176 | */ | ||
| 177 | owner = ACCESS_ONCE(lock->owner); | ||
| 178 | if (owner && !mutex_spin_on_owner(lock, owner)) | ||
| 179 | break; | ||
| 180 | |||
| 181 | if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { | ||
| 182 | lock_acquired(&lock->dep_map, ip); | ||
| 183 | mutex_set_owner(lock); | ||
| 184 | preempt_enable(); | ||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | |||
| 188 | /* | ||
| 189 | * When there's no owner, we might have preempted between the | ||
| 190 | * owner acquiring the lock and setting the owner field. If | ||
| 191 | * we're an RT task that will live-lock because we won't let | ||
| 192 | * the owner complete. | ||
| 193 | */ | ||
| 194 | if (!owner && (need_resched() || rt_task(task))) | ||
| 195 | break; | ||
| 196 | |||
| 197 | /* | ||
| 198 | * The cpu_relax() call is a compiler barrier which forces | ||
| 199 | * everything in this loop to be re-loaded. We don't need | ||
| 200 | * memory barriers as we'll eventually observe the right | ||
| 201 | * values at the cost of a few extra spins. | ||
| 202 | */ | ||
| 203 | cpu_relax(); | ||
| 204 | } | ||
| 205 | #endif | ||
| 135 | spin_lock_mutex(&lock->wait_lock, flags); | 206 | spin_lock_mutex(&lock->wait_lock, flags); |
| 136 | 207 | ||
| 137 | debug_mutex_lock_common(lock, &waiter); | 208 | debug_mutex_lock_common(lock, &waiter); |
| 138 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
| 139 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 209 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
| 140 | 210 | ||
| 141 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 211 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
| 142 | list_add_tail(&waiter.list, &lock->wait_list); | 212 | list_add_tail(&waiter.list, &lock->wait_list); |
| 143 | waiter.task = task; | 213 | waiter.task = task; |
| 144 | 214 | ||
| 145 | old_val = atomic_xchg(&lock->count, -1); | 215 | if (atomic_xchg(&lock->count, -1) == 1) |
| 146 | if (old_val == 1) | ||
| 147 | goto done; | 216 | goto done; |
| 148 | 217 | ||
| 149 | lock_contended(&lock->dep_map, ip); | 218 | lock_contended(&lock->dep_map, ip); |
| @@ -158,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 158 | * that when we release the lock, we properly wake up the | 227 | * that when we release the lock, we properly wake up the |
| 159 | * other waiters: | 228 | * other waiters: |
| 160 | */ | 229 | */ |
| 161 | old_val = atomic_xchg(&lock->count, -1); | 230 | if (atomic_xchg(&lock->count, -1) == 1) |
| 162 | if (old_val == 1) | ||
| 163 | break; | 231 | break; |
| 164 | 232 | ||
| 165 | /* | 233 | /* |
| @@ -173,21 +241,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 173 | spin_unlock_mutex(&lock->wait_lock, flags); | 241 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 174 | 242 | ||
| 175 | debug_mutex_free_waiter(&waiter); | 243 | debug_mutex_free_waiter(&waiter); |
| 244 | preempt_enable(); | ||
| 176 | return -EINTR; | 245 | return -EINTR; |
| 177 | } | 246 | } |
| 178 | __set_task_state(task, state); | 247 | __set_task_state(task, state); |
| 179 | 248 | ||
| 180 | /* didnt get the lock, go to sleep: */ | 249 | /* didnt get the lock, go to sleep: */ |
| 181 | spin_unlock_mutex(&lock->wait_lock, flags); | 250 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 182 | schedule(); | 251 | __schedule(); |
| 183 | spin_lock_mutex(&lock->wait_lock, flags); | 252 | spin_lock_mutex(&lock->wait_lock, flags); |
| 184 | } | 253 | } |
| 185 | 254 | ||
| 186 | done: | 255 | done: |
| 187 | lock_acquired(&lock->dep_map, ip); | 256 | lock_acquired(&lock->dep_map, ip); |
| 188 | /* got the lock - rejoice! */ | 257 | /* got the lock - rejoice! */ |
| 189 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 258 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
| 190 | debug_mutex_set_owner(lock, task_thread_info(task)); | 259 | mutex_set_owner(lock); |
| 191 | 260 | ||
| 192 | /* set it to 0 if there are no waiters left: */ | 261 | /* set it to 0 if there are no waiters left: */ |
| 193 | if (likely(list_empty(&lock->wait_list))) | 262 | if (likely(list_empty(&lock->wait_list))) |
| @@ -196,6 +265,7 @@ done: | |||
| 196 | spin_unlock_mutex(&lock->wait_lock, flags); | 265 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 197 | 266 | ||
| 198 | debug_mutex_free_waiter(&waiter); | 267 | debug_mutex_free_waiter(&waiter); |
| 268 | preempt_enable(); | ||
| 199 | 269 | ||
| 200 | return 0; | 270 | return 0; |
| 201 | } | 271 | } |
| @@ -222,7 +292,8 @@ int __sched | |||
| 222 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | 292 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 223 | { | 293 | { |
| 224 | might_sleep(); | 294 | might_sleep(); |
| 225 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); | 295 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
| 296 | subclass, _RET_IP_); | ||
| 226 | } | 297 | } |
| 227 | 298 | ||
| 228 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 299 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
| @@ -260,8 +331,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
| 260 | wake_up_process(waiter->task); | 331 | wake_up_process(waiter->task); |
| 261 | } | 332 | } |
| 262 | 333 | ||
| 263 | debug_mutex_clear_owner(lock); | ||
| 264 | |||
| 265 | spin_unlock_mutex(&lock->wait_lock, flags); | 334 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 266 | } | 335 | } |
| 267 | 336 | ||
| @@ -298,18 +367,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | |||
| 298 | */ | 367 | */ |
| 299 | int __sched mutex_lock_interruptible(struct mutex *lock) | 368 | int __sched mutex_lock_interruptible(struct mutex *lock) |
| 300 | { | 369 | { |
| 370 | int ret; | ||
| 371 | |||
| 301 | might_sleep(); | 372 | might_sleep(); |
| 302 | return __mutex_fastpath_lock_retval | 373 | ret = __mutex_fastpath_lock_retval |
| 303 | (&lock->count, __mutex_lock_interruptible_slowpath); | 374 | (&lock->count, __mutex_lock_interruptible_slowpath); |
| 375 | if (!ret) | ||
| 376 | mutex_set_owner(lock); | ||
| 377 | |||
| 378 | return ret; | ||
| 304 | } | 379 | } |
| 305 | 380 | ||
| 306 | EXPORT_SYMBOL(mutex_lock_interruptible); | 381 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 307 | 382 | ||
| 308 | int __sched mutex_lock_killable(struct mutex *lock) | 383 | int __sched mutex_lock_killable(struct mutex *lock) |
| 309 | { | 384 | { |
| 385 | int ret; | ||
| 386 | |||
| 310 | might_sleep(); | 387 | might_sleep(); |
| 311 | return __mutex_fastpath_lock_retval | 388 | ret = __mutex_fastpath_lock_retval |
| 312 | (&lock->count, __mutex_lock_killable_slowpath); | 389 | (&lock->count, __mutex_lock_killable_slowpath); |
| 390 | if (!ret) | ||
| 391 | mutex_set_owner(lock); | ||
| 392 | |||
| 393 | return ret; | ||
| 313 | } | 394 | } |
| 314 | EXPORT_SYMBOL(mutex_lock_killable); | 395 | EXPORT_SYMBOL(mutex_lock_killable); |
| 315 | 396 | ||
| @@ -352,9 +433,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
| 352 | 433 | ||
| 353 | prev = atomic_xchg(&lock->count, -1); | 434 | prev = atomic_xchg(&lock->count, -1); |
| 354 | if (likely(prev == 1)) { | 435 | if (likely(prev == 1)) { |
| 355 | debug_mutex_set_owner(lock, current_thread_info()); | 436 | mutex_set_owner(lock); |
| 356 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 437 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 357 | } | 438 | } |
| 439 | |||
| 358 | /* Set it back to 0 if there are no waiters: */ | 440 | /* Set it back to 0 if there are no waiters: */ |
| 359 | if (likely(list_empty(&lock->wait_list))) | 441 | if (likely(list_empty(&lock->wait_list))) |
| 360 | atomic_set(&lock->count, 0); | 442 | atomic_set(&lock->count, 0); |
| @@ -380,8 +462,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
| 380 | */ | 462 | */ |
| 381 | int __sched mutex_trylock(struct mutex *lock) | 463 | int __sched mutex_trylock(struct mutex *lock) |
| 382 | { | 464 | { |
| 383 | return __mutex_fastpath_trylock(&lock->count, | 465 | int ret; |
| 384 | __mutex_trylock_slowpath); | 466 | |
| 467 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | ||
| 468 | if (ret) | ||
| 469 | mutex_set_owner(lock); | ||
| 470 | |||
| 471 | return ret; | ||
| 385 | } | 472 | } |
| 386 | 473 | ||
| 387 | EXPORT_SYMBOL(mutex_trylock); | 474 | EXPORT_SYMBOL(mutex_trylock); |
