aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c35
1 files changed, 20 insertions, 15 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 691b86564dd9..d7fe50cc556f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
51 51
52EXPORT_SYMBOL(__mutex_init); 52EXPORT_SYMBOL(__mutex_init);
53 53
54#ifndef CONFIG_DEBUG_LOCK_ALLOC
54/* 55/*
55 * We split the mutex lock/unlock logic into separate fastpath and 56 * We split the mutex lock/unlock logic into separate fastpath and
56 * slowpath functions, to reduce the register pressure on the fastpath. 57 * slowpath functions, to reduce the register pressure on the fastpath.
@@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
92} 93}
93 94
94EXPORT_SYMBOL(mutex_lock); 95EXPORT_SYMBOL(mutex_lock);
96#endif
95 97
96static void fastcall noinline __sched 98static void fastcall noinline __sched
97__mutex_unlock_slowpath(atomic_t *lock_count); 99__mutex_unlock_slowpath(atomic_t *lock_count);
@@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock);
122 * Lock a mutex (possibly interruptible), slowpath: 124 * Lock a mutex (possibly interruptible), slowpath:
123 */ 125 */
124static inline int __sched 126static inline int __sched
125__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) 127__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
128 unsigned long ip)
126{ 129{
127 struct task_struct *task = current; 130 struct task_struct *task = current;
128 struct mutex_waiter waiter; 131 struct mutex_waiter waiter;
@@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
132 spin_lock_mutex(&lock->wait_lock, flags); 135 spin_lock_mutex(&lock->wait_lock, flags);
133 136
134 debug_mutex_lock_common(lock, &waiter); 137 debug_mutex_lock_common(lock, &waiter);
135 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 138 mutex_acquire(&lock->dep_map, subclass, 0, ip);
136 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 139 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
137 140
138 /* add waiting tasks to the end of the waitqueue (FIFO): */ 141 /* add waiting tasks to the end of the waitqueue (FIFO): */
@@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
143 if (old_val == 1) 146 if (old_val == 1)
144 goto done; 147 goto done;
145 148
146 lock_contended(&lock->dep_map, _RET_IP_); 149 lock_contended(&lock->dep_map, ip);
147 150
148 for (;;) { 151 for (;;) {
149 /* 152 /*
@@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
166 if (unlikely(state == TASK_INTERRUPTIBLE && 169 if (unlikely(state == TASK_INTERRUPTIBLE &&
167 signal_pending(task))) { 170 signal_pending(task))) {
168 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 171 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
169 mutex_release(&lock->dep_map, 1, _RET_IP_); 172 mutex_release(&lock->dep_map, 1, ip);
170 spin_unlock_mutex(&lock->wait_lock, flags); 173 spin_unlock_mutex(&lock->wait_lock, flags);
171 174
172 debug_mutex_free_waiter(&waiter); 175 debug_mutex_free_waiter(&waiter);
@@ -197,20 +200,12 @@ done:
197 return 0; 200 return 0;
198} 201}
199 202
200static void fastcall noinline __sched
201__mutex_lock_slowpath(atomic_t *lock_count)
202{
203 struct mutex *lock = container_of(lock_count, struct mutex, count);
204
205 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
206}
207
208#ifdef CONFIG_DEBUG_LOCK_ALLOC 203#ifdef CONFIG_DEBUG_LOCK_ALLOC
209void __sched 204void __sched
210mutex_lock_nested(struct mutex *lock, unsigned int subclass) 205mutex_lock_nested(struct mutex *lock, unsigned int subclass)
211{ 206{
212 might_sleep(); 207 might_sleep();
213 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); 208 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
214} 209}
215 210
216EXPORT_SYMBOL_GPL(mutex_lock_nested); 211EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -219,7 +214,7 @@ int __sched
219mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 214mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
220{ 215{
221 might_sleep(); 216 might_sleep();
222 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass); 217 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
223} 218}
224 219
225EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 220EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
271 __mutex_unlock_common_slowpath(lock_count, 1); 266 __mutex_unlock_common_slowpath(lock_count, 1);
272} 267}
273 268
269#ifndef CONFIG_DEBUG_LOCK_ALLOC
274/* 270/*
275 * Here come the less common (and hence less performance-critical) APIs: 271 * Here come the less common (and hence less performance-critical) APIs:
276 * mutex_lock_interruptible() and mutex_trylock(). 272 * mutex_lock_interruptible() and mutex_trylock().
@@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
298 294
299EXPORT_SYMBOL(mutex_lock_interruptible); 295EXPORT_SYMBOL(mutex_lock_interruptible);
300 296
297static void fastcall noinline __sched
298__mutex_lock_slowpath(atomic_t *lock_count)
299{
300 struct mutex *lock = container_of(lock_count, struct mutex, count);
301
302 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
303}
304
301static int fastcall noinline __sched 305static int fastcall noinline __sched
302__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 306__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
303{ 307{
304 struct mutex *lock = container_of(lock_count, struct mutex, count); 308 struct mutex *lock = container_of(lock_count, struct mutex, count);
305 309
306 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); 310 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
307} 311}
312#endif
308 313
309/* 314/*
310 * Spinlock based trylock, we take the spinlock and check whether we 315 * Spinlock based trylock, we take the spinlock and check whether we