diff options
-rw-r--r-- | include/linux/mutex.h | 9 | ||||
-rw-r--r-- | kernel/mutex.c | 35 |
2 files changed, 26 insertions, 18 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 0d50ea3df689..6a735c72f23f 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -120,14 +120,17 @@ static inline int fastcall mutex_is_locked(struct mutex *lock) | |||
120 | * See kernel/mutex.c for detailed documentation of these APIs. | 120 | * See kernel/mutex.c for detailed documentation of these APIs. |
121 | * Also see Documentation/mutex-design.txt. | 121 | * Also see Documentation/mutex-design.txt. |
122 | */ | 122 | */ |
123 | extern void fastcall mutex_lock(struct mutex *lock); | ||
124 | extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); | ||
125 | |||
126 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 123 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
127 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | 124 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
128 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, | 125 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, |
129 | unsigned int subclass); | 126 | unsigned int subclass); |
127 | |||
128 | #define mutex_lock(lock) mutex_lock_nested(lock, 0) | ||
129 | #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) | ||
130 | #else | 130 | #else |
131 | extern void fastcall mutex_lock(struct mutex *lock); | ||
132 | extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); | ||
133 | |||
131 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) | 134 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
132 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) | 135 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
133 | #endif | 136 | #endif |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 691b86564dd9..d7fe50cc556f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
51 | 51 | ||
52 | EXPORT_SYMBOL(__mutex_init); | 52 | EXPORT_SYMBOL(__mutex_init); |
53 | 53 | ||
54 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | ||
54 | /* | 55 | /* |
55 | * We split the mutex lock/unlock logic into separate fastpath and | 56 | * We split the mutex lock/unlock logic into separate fastpath and |
56 | * slowpath functions, to reduce the register pressure on the fastpath. | 57 | * slowpath functions, to reduce the register pressure on the fastpath. |
@@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock) | |||
92 | } | 93 | } |
93 | 94 | ||
94 | EXPORT_SYMBOL(mutex_lock); | 95 | EXPORT_SYMBOL(mutex_lock); |
96 | #endif | ||
95 | 97 | ||
96 | static void fastcall noinline __sched | 98 | static void fastcall noinline __sched |
97 | __mutex_unlock_slowpath(atomic_t *lock_count); | 99 | __mutex_unlock_slowpath(atomic_t *lock_count); |
@@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock); | |||
122 | * Lock a mutex (possibly interruptible), slowpath: | 124 | * Lock a mutex (possibly interruptible), slowpath: |
123 | */ | 125 | */ |
124 | static inline int __sched | 126 | static inline int __sched |
125 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | 127 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
128 | unsigned long ip) | ||
126 | { | 129 | { |
127 | struct task_struct *task = current; | 130 | struct task_struct *task = current; |
128 | struct mutex_waiter waiter; | 131 | struct mutex_waiter waiter; |
@@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | |||
132 | spin_lock_mutex(&lock->wait_lock, flags); | 135 | spin_lock_mutex(&lock->wait_lock, flags); |
133 | 136 | ||
134 | debug_mutex_lock_common(lock, &waiter); | 137 | debug_mutex_lock_common(lock, &waiter); |
135 | mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 138 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
136 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 139 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
137 | 140 | ||
138 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 141 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
@@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | |||
143 | if (old_val == 1) | 146 | if (old_val == 1) |
144 | goto done; | 147 | goto done; |
145 | 148 | ||
146 | lock_contended(&lock->dep_map, _RET_IP_); | 149 | lock_contended(&lock->dep_map, ip); |
147 | 150 | ||
148 | for (;;) { | 151 | for (;;) { |
149 | /* | 152 | /* |
@@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | |||
166 | if (unlikely(state == TASK_INTERRUPTIBLE && | 169 | if (unlikely(state == TASK_INTERRUPTIBLE && |
167 | signal_pending(task))) { | 170 | signal_pending(task))) { |
168 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 171 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); |
169 | mutex_release(&lock->dep_map, 1, _RET_IP_); | 172 | mutex_release(&lock->dep_map, 1, ip); |
170 | spin_unlock_mutex(&lock->wait_lock, flags); | 173 | spin_unlock_mutex(&lock->wait_lock, flags); |
171 | 174 | ||
172 | debug_mutex_free_waiter(&waiter); | 175 | debug_mutex_free_waiter(&waiter); |
@@ -197,20 +200,12 @@ done: | |||
197 | return 0; | 200 | return 0; |
198 | } | 201 | } |
199 | 202 | ||
200 | static void fastcall noinline __sched | ||
201 | __mutex_lock_slowpath(atomic_t *lock_count) | ||
202 | { | ||
203 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
204 | |||
205 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); | ||
206 | } | ||
207 | |||
208 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 203 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
209 | void __sched | 204 | void __sched |
210 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | 205 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
211 | { | 206 | { |
212 | might_sleep(); | 207 | might_sleep(); |
213 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); | 208 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); |
214 | } | 209 | } |
215 | 210 | ||
216 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | 211 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
@@ -219,7 +214,7 @@ int __sched | |||
219 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | 214 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
220 | { | 215 | { |
221 | might_sleep(); | 216 | might_sleep(); |
222 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass); | 217 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); |
223 | } | 218 | } |
224 | 219 | ||
225 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 220 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
@@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count) | |||
271 | __mutex_unlock_common_slowpath(lock_count, 1); | 266 | __mutex_unlock_common_slowpath(lock_count, 1); |
272 | } | 267 | } |
273 | 268 | ||
269 | #ifndef CONFIG_DEBUG_LOCK_ALLOC | ||
274 | /* | 270 | /* |
275 | * Here come the less common (and hence less performance-critical) APIs: | 271 | * Here come the less common (and hence less performance-critical) APIs: |
276 | * mutex_lock_interruptible() and mutex_trylock(). | 272 | * mutex_lock_interruptible() and mutex_trylock(). |
@@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | |||
298 | 294 | ||
299 | EXPORT_SYMBOL(mutex_lock_interruptible); | 295 | EXPORT_SYMBOL(mutex_lock_interruptible); |
300 | 296 | ||
297 | static void fastcall noinline __sched | ||
298 | __mutex_lock_slowpath(atomic_t *lock_count) | ||
299 | { | ||
300 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
301 | |||
302 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | ||
303 | } | ||
304 | |||
301 | static int fastcall noinline __sched | 305 | static int fastcall noinline __sched |
302 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | 306 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
303 | { | 307 | { |
304 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 308 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
305 | 309 | ||
306 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); | 310 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); |
307 | } | 311 | } |
312 | #endif | ||
308 | 313 | ||
309 | /* | 314 | /* |
310 | * Spinlock based trylock, we take the spinlock and check whether we | 315 | * Spinlock based trylock, we take the spinlock and check whether we |