aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-03-14 13:43:13 -0400
committerMatthew Wilcox <willy@linux.intel.com>2008-04-17 10:42:46 -0400
commitf1241c87a16c4fe9f4f51d6ed3589f031c505e8d (patch)
tree2e0ee0f2b864c89eda9067bda0d8a98596e022e7 /kernel
parentf06d96865861c3dd01520f47e2e61c899db1631f (diff)
Add down_timeout and change ACPI to use it
ACPI currently emulates a timeout for semaphores with calls to down_trylock and sleep. This produces horrible behaviour in terms of fairness and excessive wakeups. Now that we have a unified semaphore implementation, adding a real down_trylock is almost trivial. Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/semaphore.c42
1 files changed, 35 insertions, 7 deletions
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 2da2aed950f3..5a12a8558982 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -35,6 +35,7 @@
35static noinline void __down(struct semaphore *sem); 35static noinline void __down(struct semaphore *sem);
36static noinline int __down_interruptible(struct semaphore *sem); 36static noinline int __down_interruptible(struct semaphore *sem);
37static noinline int __down_killable(struct semaphore *sem); 37static noinline int __down_killable(struct semaphore *sem);
38static noinline int __down_timeout(struct semaphore *sem, long jiffies);
38static noinline void __up(struct semaphore *sem); 39static noinline void __up(struct semaphore *sem);
39 40
40void down(struct semaphore *sem) 41void down(struct semaphore *sem)
@@ -104,6 +105,20 @@ int down_trylock(struct semaphore *sem)
104} 105}
105EXPORT_SYMBOL(down_trylock); 106EXPORT_SYMBOL(down_trylock);
106 107
108int down_timeout(struct semaphore *sem, long jiffies)
109{
110 unsigned long flags;
111 int result = 0;
112
113 spin_lock_irqsave(&sem->lock, flags);
114 if (unlikely(sem->count-- <= 0))
115 result = __down_timeout(sem, jiffies);
116 spin_unlock_irqrestore(&sem->lock, flags);
117
118 return result;
119}
120EXPORT_SYMBOL(down_timeout);
121
107void up(struct semaphore *sem) 122void up(struct semaphore *sem)
108{ 123{
109 unsigned long flags; 124 unsigned long flags;
@@ -142,10 +157,12 @@ static noinline void __sched __up_down_common(struct semaphore *sem)
142} 157}
143 158
144/* 159/*
145 * Because this function is inlined, the 'state' parameter will be constant, 160 * Because this function is inlined, the 'state' parameter will be
146 * and thus optimised away by the compiler. 161 * constant, and thus optimised away by the compiler. Likewise the
162 * 'timeout' parameter for the cases without timeouts.
147 */ 163 */
148static inline int __sched __down_common(struct semaphore *sem, long state) 164static inline int __sched __down_common(struct semaphore *sem, long state,
165 long timeout)
149{ 166{
150 int result = 0; 167 int result = 0;
151 struct task_struct *task = current; 168 struct task_struct *task = current;
@@ -160,14 +177,20 @@ static inline int __sched __down_common(struct semaphore *sem, long state)
160 goto interrupted; 177 goto interrupted;
161 if (state == TASK_KILLABLE && fatal_signal_pending(task)) 178 if (state == TASK_KILLABLE && fatal_signal_pending(task))
162 goto interrupted; 179 goto interrupted;
180 if (timeout <= 0)
181 goto timed_out;
163 __set_task_state(task, state); 182 __set_task_state(task, state);
164 spin_unlock_irq(&sem->lock); 183 spin_unlock_irq(&sem->lock);
165 schedule(); 184 timeout = schedule_timeout(timeout);
166 spin_lock_irq(&sem->lock); 185 spin_lock_irq(&sem->lock);
167 if (waiter.up) 186 if (waiter.up)
168 goto woken; 187 goto woken;
169 } 188 }
170 189
190 timed_out:
191 list_del(&waiter.list);
192 result = -ETIME;
193 goto woken;
171 interrupted: 194 interrupted:
172 list_del(&waiter.list); 195 list_del(&waiter.list);
173 result = -EINTR; 196 result = -EINTR;
@@ -187,17 +210,22 @@ static inline int __sched __down_common(struct semaphore *sem, long state)
187 210
188static noinline void __sched __down(struct semaphore *sem) 211static noinline void __sched __down(struct semaphore *sem)
189{ 212{
190 __down_common(sem, TASK_UNINTERRUPTIBLE); 213 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
191} 214}
192 215
193static noinline int __sched __down_interruptible(struct semaphore *sem) 216static noinline int __sched __down_interruptible(struct semaphore *sem)
194{ 217{
195 return __down_common(sem, TASK_INTERRUPTIBLE); 218 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
196} 219}
197 220
198static noinline int __sched __down_killable(struct semaphore *sem) 221static noinline int __sched __down_killable(struct semaphore *sem)
199{ 222{
200 return __down_common(sem, TASK_KILLABLE); 223 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
224}
225
226static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
227{
228 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
201} 229}
202 230
203static noinline void __sched __up(struct semaphore *sem) 231static noinline void __sched __up(struct semaphore *sem)