aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-03-14 13:19:33 -0400
committerMatthew Wilcox <willy@linux.intel.com>2008-04-17 10:42:40 -0400
commitf06d96865861c3dd01520f47e2e61c899db1631f (patch)
treed2fbd08af06a96b10192f7b27dd3ea2df1bb93d8 /kernel
parent64ac24e738823161693bf791f87adc802cf529ff (diff)
Introduce down_killable()
down_killable() is the functional counterpart of mutex_lock_killable. Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/semaphore.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index d5a72702f261..2da2aed950f3 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -34,6 +34,7 @@
34 34
35static noinline void __down(struct semaphore *sem); 35static noinline void __down(struct semaphore *sem);
36static noinline int __down_interruptible(struct semaphore *sem); 36static noinline int __down_interruptible(struct semaphore *sem);
37static noinline int __down_killable(struct semaphore *sem);
37static noinline void __up(struct semaphore *sem); 38static noinline void __up(struct semaphore *sem);
38 39
39void down(struct semaphore *sem) 40void down(struct semaphore *sem)
@@ -61,6 +62,20 @@ int down_interruptible(struct semaphore *sem)
61} 62}
62EXPORT_SYMBOL(down_interruptible); 63EXPORT_SYMBOL(down_interruptible);
63 64
65int down_killable(struct semaphore *sem)
66{
67 unsigned long flags;
68 int result = 0;
69
70 spin_lock_irqsave(&sem->lock, flags);
71 if (unlikely(sem->count-- <= 0))
72 result = __down_killable(sem);
73 spin_unlock_irqrestore(&sem->lock, flags);
74
75 return result;
76}
77EXPORT_SYMBOL(down_killable);
78
64/** 79/**
65 * down_trylock - try to acquire the semaphore, without waiting 80 * down_trylock - try to acquire the semaphore, without waiting
66 * @sem: the semaphore to be acquired 81 * @sem: the semaphore to be acquired
@@ -143,6 +158,8 @@ static inline int __sched __down_common(struct semaphore *sem, long state)
143 for (;;) { 158 for (;;) {
144 if (state == TASK_INTERRUPTIBLE && signal_pending(task)) 159 if (state == TASK_INTERRUPTIBLE && signal_pending(task))
145 goto interrupted; 160 goto interrupted;
161 if (state == TASK_KILLABLE && fatal_signal_pending(task))
162 goto interrupted;
146 __set_task_state(task, state); 163 __set_task_state(task, state);
147 spin_unlock_irq(&sem->lock); 164 spin_unlock_irq(&sem->lock);
148 schedule(); 165 schedule();
@@ -178,6 +195,11 @@ static noinline int __sched __down_interruptible(struct semaphore *sem)
178 return __down_common(sem, TASK_INTERRUPTIBLE); 195 return __down_common(sem, TASK_INTERRUPTIBLE);
179} 196}
180 197
198static noinline int __sched __down_killable(struct semaphore *sem)
199{
200 return __down_common(sem, TASK_KILLABLE);
201}
202
181static noinline void __sched __up(struct semaphore *sem) 203static noinline void __sched __up(struct semaphore *sem)
182{ 204{
183 if (unlikely(list_empty(&sem->wait_list))) 205 if (unlikely(list_empty(&sem->wait_list)))