aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c122
1 files changed, 105 insertions, 17 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4f45d4b658ef..507cf2b5e9f1 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -10,6 +10,11 @@
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements. 11 * David Howells for suggestions and improvements.
12 * 12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
13 * Also see Documentation/mutex-design.txt. 18 * Also see Documentation/mutex-design.txt.
14 */ 19 */
15#include <linux/mutex.h> 20#include <linux/mutex.h>
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
46 atomic_set(&lock->count, 1); 51 atomic_set(&lock->count, 1);
47 spin_lock_init(&lock->wait_lock); 52 spin_lock_init(&lock->wait_lock);
48 INIT_LIST_HEAD(&lock->wait_list); 53 INIT_LIST_HEAD(&lock->wait_list);
54 mutex_clear_owner(lock);
49 55
50 debug_mutex_init(lock, name, key); 56 debug_mutex_init(lock, name, key);
51} 57}
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock)
91 * 'unlocked' into 'locked' state. 97 * 'unlocked' into 'locked' state.
92 */ 98 */
93 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 99 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
100 mutex_set_owner(lock);
94} 101}
95 102
96EXPORT_SYMBOL(mutex_lock); 103EXPORT_SYMBOL(mutex_lock);
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock)
115 * The unlocking fastpath is the 0->1 transition from 'locked' 122 * The unlocking fastpath is the 0->1 transition from 'locked'
116 * into 'unlocked' state: 123 * into 'unlocked' state:
117 */ 124 */
125#ifndef CONFIG_DEBUG_MUTEXES
126 /*
127 * When debugging is enabled we must not clear the owner before time,
128 * the slow path will always be taken, and that clears the owner field
129 * after verifying that it was indeed current.
130 */
131 mutex_clear_owner(lock);
132#endif
118 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 133 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119} 134}
120 135
@@ -129,21 +144,76 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
129{ 144{
130 struct task_struct *task = current; 145 struct task_struct *task = current;
131 struct mutex_waiter waiter; 146 struct mutex_waiter waiter;
132 unsigned int old_val;
133 unsigned long flags; 147 unsigned long flags;
134 148
149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \
152 !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES)
153 /*
154 * Optimistic spinning.
155 *
156 * We try to spin for acquisition when we find that there are no
157 * pending waiters and the lock owner is currently running on a
158 * (different) CPU.
159 *
160 * The rationale is that if the lock owner is running, it is likely to
161 * release the lock soon.
162 *
163 * Since this needs the lock owner, and this mutex implementation
164 * doesn't track the owner atomically in the lock field, we need to
165 * track it non-atomically.
166 *
167 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
168 * to serialize everything.
169 */
170
171 for (;;) {
172 struct thread_info *owner;
173
174 /*
175 * If there's an owner, wait for it to either
176 * release the lock or go to sleep.
177 */
178 owner = ACCESS_ONCE(lock->owner);
179 if (owner && !mutex_spin_on_owner(lock, owner))
180 break;
181
182 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
183 lock_acquired(&lock->dep_map, ip);
184 mutex_set_owner(lock);
185 preempt_enable();
186 return 0;
187 }
188
189 /*
190 * When there's no owner, we might have preempted between the
191 * owner acquiring the lock and setting the owner field. If
192 * we're an RT task that will live-lock because we won't let
193 * the owner complete.
194 */
195 if (!owner && (need_resched() || rt_task(task)))
196 break;
197
198 /*
199 * The cpu_relax() call is a compiler barrier which forces
200 * everything in this loop to be re-loaded. We don't need
201 * memory barriers as we'll eventually observe the right
202 * values at the cost of a few extra spins.
203 */
204 cpu_relax();
205 }
206#endif
135 spin_lock_mutex(&lock->wait_lock, flags); 207 spin_lock_mutex(&lock->wait_lock, flags);
136 208
137 debug_mutex_lock_common(lock, &waiter); 209 debug_mutex_lock_common(lock, &waiter);
138 mutex_acquire(&lock->dep_map, subclass, 0, ip);
139 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 210 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
140 211
141 /* add waiting tasks to the end of the waitqueue (FIFO): */ 212 /* add waiting tasks to the end of the waitqueue (FIFO): */
142 list_add_tail(&waiter.list, &lock->wait_list); 213 list_add_tail(&waiter.list, &lock->wait_list);
143 waiter.task = task; 214 waiter.task = task;
144 215
145 old_val = atomic_xchg(&lock->count, -1); 216 if (atomic_xchg(&lock->count, -1) == 1)
146 if (old_val == 1)
147 goto done; 217 goto done;
148 218
149 lock_contended(&lock->dep_map, ip); 219 lock_contended(&lock->dep_map, ip);
@@ -158,8 +228,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
158 * that when we release the lock, we properly wake up the 228 * that when we release the lock, we properly wake up the
159 * other waiters: 229 * other waiters:
160 */ 230 */
161 old_val = atomic_xchg(&lock->count, -1); 231 if (atomic_xchg(&lock->count, -1) == 1)
162 if (old_val == 1)
163 break; 232 break;
164 233
165 /* 234 /*
@@ -173,21 +242,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
173 spin_unlock_mutex(&lock->wait_lock, flags); 242 spin_unlock_mutex(&lock->wait_lock, flags);
174 243
175 debug_mutex_free_waiter(&waiter); 244 debug_mutex_free_waiter(&waiter);
245 preempt_enable();
176 return -EINTR; 246 return -EINTR;
177 } 247 }
178 __set_task_state(task, state); 248 __set_task_state(task, state);
179 249
180 /* didnt get the lock, go to sleep: */ 250 /* didnt get the lock, go to sleep: */
181 spin_unlock_mutex(&lock->wait_lock, flags); 251 spin_unlock_mutex(&lock->wait_lock, flags);
182 schedule(); 252 __schedule();
183 spin_lock_mutex(&lock->wait_lock, flags); 253 spin_lock_mutex(&lock->wait_lock, flags);
184 } 254 }
185 255
186done: 256done:
187 lock_acquired(&lock->dep_map, ip); 257 lock_acquired(&lock->dep_map, ip);
188 /* got the lock - rejoice! */ 258 /* got the lock - rejoice! */
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 259 mutex_remove_waiter(lock, &waiter, current_thread_info());
190 debug_mutex_set_owner(lock, task_thread_info(task)); 260 mutex_set_owner(lock);
191 261
192 /* set it to 0 if there are no waiters left: */ 262 /* set it to 0 if there are no waiters left: */
193 if (likely(list_empty(&lock->wait_list))) 263 if (likely(list_empty(&lock->wait_list)))
@@ -196,6 +266,7 @@ done:
196 spin_unlock_mutex(&lock->wait_lock, flags); 266 spin_unlock_mutex(&lock->wait_lock, flags);
197 267
198 debug_mutex_free_waiter(&waiter); 268 debug_mutex_free_waiter(&waiter);
269 preempt_enable();
199 270
200 return 0; 271 return 0;
201} 272}
@@ -222,7 +293,8 @@ int __sched
222mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 293mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
223{ 294{
224 might_sleep(); 295 might_sleep();
225 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); 296 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
297 subclass, _RET_IP_);
226} 298}
227 299
228EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 300EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -260,8 +332,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
260 wake_up_process(waiter->task); 332 wake_up_process(waiter->task);
261 } 333 }
262 334
263 debug_mutex_clear_owner(lock);
264
265 spin_unlock_mutex(&lock->wait_lock, flags); 335 spin_unlock_mutex(&lock->wait_lock, flags);
266} 336}
267 337
@@ -298,18 +368,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
298 */ 368 */
299int __sched mutex_lock_interruptible(struct mutex *lock) 369int __sched mutex_lock_interruptible(struct mutex *lock)
300{ 370{
371 int ret;
372
301 might_sleep(); 373 might_sleep();
302 return __mutex_fastpath_lock_retval 374 ret = __mutex_fastpath_lock_retval
303 (&lock->count, __mutex_lock_interruptible_slowpath); 375 (&lock->count, __mutex_lock_interruptible_slowpath);
376 if (!ret)
377 mutex_set_owner(lock);
378
379 return ret;
304} 380}
305 381
306EXPORT_SYMBOL(mutex_lock_interruptible); 382EXPORT_SYMBOL(mutex_lock_interruptible);
307 383
308int __sched mutex_lock_killable(struct mutex *lock) 384int __sched mutex_lock_killable(struct mutex *lock)
309{ 385{
386 int ret;
387
310 might_sleep(); 388 might_sleep();
311 return __mutex_fastpath_lock_retval 389 ret = __mutex_fastpath_lock_retval
312 (&lock->count, __mutex_lock_killable_slowpath); 390 (&lock->count, __mutex_lock_killable_slowpath);
391 if (!ret)
392 mutex_set_owner(lock);
393
394 return ret;
313} 395}
314EXPORT_SYMBOL(mutex_lock_killable); 396EXPORT_SYMBOL(mutex_lock_killable);
315 397
@@ -352,9 +434,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
352 434
353 prev = atomic_xchg(&lock->count, -1); 435 prev = atomic_xchg(&lock->count, -1);
354 if (likely(prev == 1)) { 436 if (likely(prev == 1)) {
355 debug_mutex_set_owner(lock, current_thread_info()); 437 mutex_set_owner(lock);
356 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 438 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
357 } 439 }
440
358 /* Set it back to 0 if there are no waiters: */ 441 /* Set it back to 0 if there are no waiters: */
359 if (likely(list_empty(&lock->wait_list))) 442 if (likely(list_empty(&lock->wait_list)))
360 atomic_set(&lock->count, 0); 443 atomic_set(&lock->count, 0);
@@ -380,8 +463,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
380 */ 463 */
381int __sched mutex_trylock(struct mutex *lock) 464int __sched mutex_trylock(struct mutex *lock)
382{ 465{
383 return __mutex_fastpath_trylock(&lock->count, 466 int ret;
384 __mutex_trylock_slowpath); 467
468 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
469 if (ret)
470 mutex_set_owner(lock);
471
472 return ret;
385} 473}
386 474
387EXPORT_SYMBOL(mutex_trylock); 475EXPORT_SYMBOL(mutex_trylock);