diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-31 19:45:47 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-01-31 19:45:47 -0500 |
commit | 75659ca0c10992dcb39258518368a0f6f56e935d (patch) | |
tree | 5d014ceb2f10158061a23d0d976f9a613d85e659 /kernel/mutex.c | |
parent | fbdde7bd274d74729954190f99afcb1e3d9bbfba (diff) | |
parent | 2dfe485a2c8afa54cb069fcf48476f6c90ea3fdf (diff) |
Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits)
Remove commented-out code copied from NFS
NFS: Switch from intr mount option to TASK_KILLABLE
Add wait_for_completion_killable
Add wait_event_killable
Add schedule_timeout_killable
Use mutex_lock_killable in vfs_readdir
Add mutex_lock_killable
Use lock_page_killable
Add lock_page_killable
Add fatal_signal_pending
Add TASK_WAKEKILL
exit: Use task_is_*
signal: Use task_is_*
sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL
ptrace: Use task_is_*
power: Use task_is_*
wait: Use TASK_NORMAL
proc/base.c: Use task_is_*
proc/array.c: Use TASK_REPORT
perfmon: Use task_is_*
...
Fixed up conflicts in NFS/sunrpc manually..
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index d7fe50cc556f..d9ec9b666250 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
166 | * got a signal? (This code gets eliminated in the | 166 | * got a signal? (This code gets eliminated in the |
167 | * TASK_UNINTERRUPTIBLE case.) | 167 | * TASK_UNINTERRUPTIBLE case.) |
168 | */ | 168 | */ |
169 | if (unlikely(state == TASK_INTERRUPTIBLE && | 169 | if (unlikely((state == TASK_INTERRUPTIBLE && |
170 | signal_pending(task))) { | 170 | signal_pending(task)) || |
171 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 171 | (state == TASK_KILLABLE && |
172 | fatal_signal_pending(task)))) { | ||
173 | mutex_remove_waiter(lock, &waiter, | ||
174 | task_thread_info(task)); | ||
172 | mutex_release(&lock->dep_map, 1, ip); | 175 | mutex_release(&lock->dep_map, 1, ip); |
173 | spin_unlock_mutex(&lock->wait_lock, flags); | 176 | spin_unlock_mutex(&lock->wait_lock, flags); |
174 | 177 | ||
@@ -211,6 +214,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |||
211 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | 214 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
212 | 215 | ||
213 | int __sched | 216 | int __sched |
217 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | ||
218 | { | ||
219 | might_sleep(); | ||
220 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); | ||
221 | } | ||
222 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | ||
223 | |||
224 | int __sched | ||
214 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | 225 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
215 | { | 226 | { |
216 | might_sleep(); | 227 | might_sleep(); |
@@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count) | |||
272 | * mutex_lock_interruptible() and mutex_trylock(). | 283 | * mutex_lock_interruptible() and mutex_trylock(). |
273 | */ | 284 | */ |
274 | static int fastcall noinline __sched | 285 | static int fastcall noinline __sched |
286 | __mutex_lock_killable_slowpath(atomic_t *lock_count); | ||
287 | |||
288 | static noinline int fastcall __sched | ||
275 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | 289 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
276 | 290 | ||
277 | /*** | 291 | /*** |
@@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | |||
294 | 308 | ||
295 | EXPORT_SYMBOL(mutex_lock_interruptible); | 309 | EXPORT_SYMBOL(mutex_lock_interruptible); |
296 | 310 | ||
311 | int fastcall __sched mutex_lock_killable(struct mutex *lock) | ||
312 | { | ||
313 | might_sleep(); | ||
314 | return __mutex_fastpath_lock_retval | ||
315 | (&lock->count, __mutex_lock_killable_slowpath); | ||
316 | } | ||
317 | EXPORT_SYMBOL(mutex_lock_killable); | ||
318 | |||
297 | static void fastcall noinline __sched | 319 | static void fastcall noinline __sched |
298 | __mutex_lock_slowpath(atomic_t *lock_count) | 320 | __mutex_lock_slowpath(atomic_t *lock_count) |
299 | { | 321 | { |
@@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count) | |||
303 | } | 325 | } |
304 | 326 | ||
305 | static int fastcall noinline __sched | 327 | static int fastcall noinline __sched |
328 | __mutex_lock_killable_slowpath(atomic_t *lock_count) | ||
329 | { | ||
330 | struct mutex *lock = container_of(lock_count, struct mutex, count); | ||
331 | |||
332 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); | ||
333 | } | ||
334 | |||
335 | static noinline int fastcall __sched | ||
306 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | 336 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
307 | { | 337 | { |
308 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 338 | struct mutex *lock = container_of(lock_count, struct mutex, count); |