aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mutex.h5
-rw-r--r--kernel/mutex.c36
2 files changed, 38 insertions, 3 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 601479772b98..05c590352dd7 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -125,15 +125,20 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
125extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 125extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
126extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 126extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
127 unsigned int subclass); 127 unsigned int subclass);
128extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
129 unsigned int subclass);
128 130
129#define mutex_lock(lock) mutex_lock_nested(lock, 0) 131#define mutex_lock(lock) mutex_lock_nested(lock, 0)
130#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 132#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
133#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
131#else 134#else
132extern void fastcall mutex_lock(struct mutex *lock); 135extern void fastcall mutex_lock(struct mutex *lock);
133extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock); 136extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
137extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
134 138
135# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 139# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
136# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 140# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
141# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
137#endif 142#endif
138 143
139/* 144/*
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d7fe50cc556f..d9ec9b666250 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
166 * got a signal? (This code gets eliminated in the 166 * got a signal? (This code gets eliminated in the
167 * TASK_UNINTERRUPTIBLE case.) 167 * TASK_UNINTERRUPTIBLE case.)
168 */ 168 */
169 if (unlikely(state == TASK_INTERRUPTIBLE && 169 if (unlikely((state == TASK_INTERRUPTIBLE &&
170 signal_pending(task))) { 170 signal_pending(task)) ||
171 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 171 (state == TASK_KILLABLE &&
172 fatal_signal_pending(task)))) {
173 mutex_remove_waiter(lock, &waiter,
174 task_thread_info(task));
172 mutex_release(&lock->dep_map, 1, ip); 175 mutex_release(&lock->dep_map, 1, ip);
173 spin_unlock_mutex(&lock->wait_lock, flags); 176 spin_unlock_mutex(&lock->wait_lock, flags);
174 177
@@ -211,6 +214,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
211EXPORT_SYMBOL_GPL(mutex_lock_nested); 214EXPORT_SYMBOL_GPL(mutex_lock_nested);
212 215
213int __sched 216int __sched
217mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
218{
219 might_sleep();
220 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
221}
222EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
223
224int __sched
214mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 225mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
215{ 226{
216 might_sleep(); 227 might_sleep();
@@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
272 * mutex_lock_interruptible() and mutex_trylock(). 283 * mutex_lock_interruptible() and mutex_trylock().
273 */ 284 */
274static int fastcall noinline __sched 285static int fastcall noinline __sched
286__mutex_lock_killable_slowpath(atomic_t *lock_count);
287
288static noinline int fastcall __sched
275__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 289__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
276 290
277/*** 291/***
@@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
294 308
295EXPORT_SYMBOL(mutex_lock_interruptible); 309EXPORT_SYMBOL(mutex_lock_interruptible);
296 310
311int fastcall __sched mutex_lock_killable(struct mutex *lock)
312{
313 might_sleep();
314 return __mutex_fastpath_lock_retval
315 (&lock->count, __mutex_lock_killable_slowpath);
316}
317EXPORT_SYMBOL(mutex_lock_killable);
318
297static void fastcall noinline __sched 319static void fastcall noinline __sched
298__mutex_lock_slowpath(atomic_t *lock_count) 320__mutex_lock_slowpath(atomic_t *lock_count)
299{ 321{
@@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
303} 325}
304 326
305static int fastcall noinline __sched 327static int fastcall noinline __sched
328__mutex_lock_killable_slowpath(atomic_t *lock_count)
329{
330 struct mutex *lock = container_of(lock_count, struct mutex, count);
331
332 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
333}
334
335static noinline int fastcall __sched
306__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 336__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
307{ 337{
308 struct mutex *lock = container_of(lock_count, struct mutex, count); 338 struct mutex *lock = container_of(lock_count, struct mutex, count);