aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-03-14 13:43:13 -0400
committerMatthew Wilcox <willy@linux.intel.com>2008-04-17 10:42:46 -0400
commitf1241c87a16c4fe9f4f51d6ed3589f031c505e8d (patch)
tree2e0ee0f2b864c89eda9067bda0d8a98596e022e7
parentf06d96865861c3dd01520f47e2e61c899db1631f (diff)
Add down_timeout and change ACPI to use it
ACPI currently emulates a timeout for semaphores with calls to down_trylock and sleep. This produces horrible behaviour in terms of fairness and excessive wakeups. Now that we have a unified semaphore implementation, adding a real down_trylock is almost trivial. Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
-rw-r--r--drivers/acpi/osl.c89
-rw-r--r--include/linux/semaphore.h6
-rw-r--r--kernel/semaphore.c42
3 files changed, 62 insertions, 75 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index a697fb6cf050..a498a6cc68fe 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2000 Andrew Henroid 4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
7 * 9 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * 11 *
@@ -37,15 +39,18 @@
37#include <linux/workqueue.h> 39#include <linux/workqueue.h>
38#include <linux/nmi.h> 40#include <linux/nmi.h>
39#include <linux/acpi.h> 41#include <linux/acpi.h>
40#include <acpi/acpi.h>
41#include <asm/io.h>
42#include <acpi/acpi_bus.h>
43#include <acpi/processor.h>
44#include <asm/uaccess.h>
45
46#include <linux/efi.h> 42#include <linux/efi.h>
47#include <linux/ioport.h> 43#include <linux/ioport.h>
48#include <linux/list.h> 44#include <linux/list.h>
45#include <linux/jiffies.h>
46#include <linux/semaphore.h>
47
48#include <asm/io.h>
49#include <asm/uaccess.h>
50
51#include <acpi/acpi.h>
52#include <acpi/acpi_bus.h>
53#include <acpi/processor.h>
49 54
50#define _COMPONENT ACPI_OS_SERVICES 55#define _COMPONENT ACPI_OS_SERVICES
51ACPI_MODULE_NAME("osl"); 56ACPI_MODULE_NAME("osl");
@@ -764,7 +769,6 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
764{ 769{
765 struct semaphore *sem = NULL; 770 struct semaphore *sem = NULL;
766 771
767
768 sem = acpi_os_allocate(sizeof(struct semaphore)); 772 sem = acpi_os_allocate(sizeof(struct semaphore));
769 if (!sem) 773 if (!sem)
770 return AE_NO_MEMORY; 774 return AE_NO_MEMORY;
@@ -791,12 +795,12 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
791{ 795{
792 struct semaphore *sem = (struct semaphore *)handle; 796 struct semaphore *sem = (struct semaphore *)handle;
793 797
794
795 if (!sem) 798 if (!sem)
796 return AE_BAD_PARAMETER; 799 return AE_BAD_PARAMETER;
797 800
798 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 801 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
799 802
803 BUG_ON(!list_empty(&sem->wait_list));
800 kfree(sem); 804 kfree(sem);
801 sem = NULL; 805 sem = NULL;
802 806
@@ -804,21 +808,15 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
804} 808}
805 809
806/* 810/*
807 * TODO: The kernel doesn't have a 'down_timeout' function -- had to
808 * improvise. The process is to sleep for one scheduler quantum
809 * until the semaphore becomes available. Downside is that this
810 * may result in starvation for timeout-based waits when there's
811 * lots of semaphore activity.
812 *
813 * TODO: Support for units > 1? 811 * TODO: Support for units > 1?
814 */ 812 */
815acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 813acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
816{ 814{
817 acpi_status status = AE_OK; 815 acpi_status status = AE_OK;
818 struct semaphore *sem = (struct semaphore *)handle; 816 struct semaphore *sem = (struct semaphore *)handle;
817 long jiffies;
819 int ret = 0; 818 int ret = 0;
820 819
821
822 if (!sem || (units < 1)) 820 if (!sem || (units < 1))
823 return AE_BAD_PARAMETER; 821 return AE_BAD_PARAMETER;
824 822
@@ -828,58 +826,14 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
828 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 826 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
829 handle, units, timeout)); 827 handle, units, timeout));
830 828
831 /* 829 if (timeout == ACPI_WAIT_FOREVER)
832 * This can be called during resume with interrupts off. 830 jiffies = MAX_SCHEDULE_TIMEOUT;
833 * Like boot-time, we should be single threaded and will 831 else
834 * always get the lock if we try -- timeout or not. 832 jiffies = msecs_to_jiffies(timeout);
835 * If this doesn't succeed, then we will oops courtesy of 833
836 * might_sleep() in down(). 834 ret = down_timeout(sem, jiffies);
837 */ 835 if (ret)
838 if (!down_trylock(sem)) 836 status = AE_TIME;
839 return AE_OK;
840
841 switch (timeout) {
842 /*
843 * No Wait:
844 * --------
845 * A zero timeout value indicates that we shouldn't wait - just
846 * acquire the semaphore if available otherwise return AE_TIME
847 * (a.k.a. 'would block').
848 */
849 case 0:
850 if (down_trylock(sem))
851 status = AE_TIME;
852 break;
853
854 /*
855 * Wait Indefinitely:
856 * ------------------
857 */
858 case ACPI_WAIT_FOREVER:
859 down(sem);
860 break;
861
862 /*
863 * Wait w/ Timeout:
864 * ----------------
865 */
866 default:
867 // TODO: A better timeout algorithm?
868 {
869 int i = 0;
870 static const int quantum_ms = 1000 / HZ;
871
872 ret = down_trylock(sem);
873 for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
874 schedule_timeout_interruptible(1);
875 ret = down_trylock(sem);
876 }
877
878 if (ret != 0)
879 status = AE_TIME;
880 }
881 break;
882 }
883 837
884 if (ACPI_FAILURE(status)) { 838 if (ACPI_FAILURE(status)) {
885 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 839 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
@@ -902,7 +856,6 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
902{ 856{
903 struct semaphore *sem = (struct semaphore *)handle; 857 struct semaphore *sem = (struct semaphore *)handle;
904 858
905
906 if (!sem || (units < 1)) 859 if (!sem || (units < 1))
907 return AE_BAD_PARAMETER; 860 return AE_BAD_PARAMETER;
908 861
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 88f2a28cc0f1..a107aebd9148 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -75,6 +75,12 @@ extern int __must_check down_killable(struct semaphore *sem);
75extern int __must_check down_trylock(struct semaphore *sem); 75extern int __must_check down_trylock(struct semaphore *sem);
76 76
77/* 77/*
78 * As down(), except this function will return -ETIME if it fails to
79 * acquire the semaphore within the specified number of jiffies.
80 */
81extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
82
83/*
78 * Release the semaphore. Unlike mutexes, up() may be called from any 84 * Release the semaphore. Unlike mutexes, up() may be called from any
79 * context and even by tasks which have never called down(). 85 * context and even by tasks which have never called down().
80 */ 86 */
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 2da2aed950f3..5a12a8558982 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -35,6 +35,7 @@
35static noinline void __down(struct semaphore *sem); 35static noinline void __down(struct semaphore *sem);
36static noinline int __down_interruptible(struct semaphore *sem); 36static noinline int __down_interruptible(struct semaphore *sem);
37static noinline int __down_killable(struct semaphore *sem); 37static noinline int __down_killable(struct semaphore *sem);
38static noinline int __down_timeout(struct semaphore *sem, long jiffies);
38static noinline void __up(struct semaphore *sem); 39static noinline void __up(struct semaphore *sem);
39 40
40void down(struct semaphore *sem) 41void down(struct semaphore *sem)
@@ -104,6 +105,20 @@ int down_trylock(struct semaphore *sem)
104} 105}
105EXPORT_SYMBOL(down_trylock); 106EXPORT_SYMBOL(down_trylock);
106 107
108int down_timeout(struct semaphore *sem, long jiffies)
109{
110 unsigned long flags;
111 int result = 0;
112
113 spin_lock_irqsave(&sem->lock, flags);
114 if (unlikely(sem->count-- <= 0))
115 result = __down_timeout(sem, jiffies);
116 spin_unlock_irqrestore(&sem->lock, flags);
117
118 return result;
119}
120EXPORT_SYMBOL(down_timeout);
121
107void up(struct semaphore *sem) 122void up(struct semaphore *sem)
108{ 123{
109 unsigned long flags; 124 unsigned long flags;
@@ -142,10 +157,12 @@ static noinline void __sched __up_down_common(struct semaphore *sem)
142} 157}
143 158
144/* 159/*
145 * Because this function is inlined, the 'state' parameter will be constant, 160 * Because this function is inlined, the 'state' parameter will be
146 * and thus optimised away by the compiler. 161 * constant, and thus optimised away by the compiler. Likewise the
162 * 'timeout' parameter for the cases without timeouts.
147 */ 163 */
148static inline int __sched __down_common(struct semaphore *sem, long state) 164static inline int __sched __down_common(struct semaphore *sem, long state,
165 long timeout)
149{ 166{
150 int result = 0; 167 int result = 0;
151 struct task_struct *task = current; 168 struct task_struct *task = current;
@@ -160,14 +177,20 @@ static inline int __sched __down_common(struct semaphore *sem, long state)
160 goto interrupted; 177 goto interrupted;
161 if (state == TASK_KILLABLE && fatal_signal_pending(task)) 178 if (state == TASK_KILLABLE && fatal_signal_pending(task))
162 goto interrupted; 179 goto interrupted;
180 if (timeout <= 0)
181 goto timed_out;
163 __set_task_state(task, state); 182 __set_task_state(task, state);
164 spin_unlock_irq(&sem->lock); 183 spin_unlock_irq(&sem->lock);
165 schedule(); 184 timeout = schedule_timeout(timeout);
166 spin_lock_irq(&sem->lock); 185 spin_lock_irq(&sem->lock);
167 if (waiter.up) 186 if (waiter.up)
168 goto woken; 187 goto woken;
169 } 188 }
170 189
190 timed_out:
191 list_del(&waiter.list);
192 result = -ETIME;
193 goto woken;
171 interrupted: 194 interrupted:
172 list_del(&waiter.list); 195 list_del(&waiter.list);
173 result = -EINTR; 196 result = -EINTR;
@@ -187,17 +210,22 @@ static inline int __sched __down_common(struct semaphore *sem, long state)
187 210
188static noinline void __sched __down(struct semaphore *sem) 211static noinline void __sched __down(struct semaphore *sem)
189{ 212{
190 __down_common(sem, TASK_UNINTERRUPTIBLE); 213 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
191} 214}
192 215
193static noinline int __sched __down_interruptible(struct semaphore *sem) 216static noinline int __sched __down_interruptible(struct semaphore *sem)
194{ 217{
195 return __down_common(sem, TASK_INTERRUPTIBLE); 218 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
196} 219}
197 220
198static noinline int __sched __down_killable(struct semaphore *sem) 221static noinline int __sched __down_killable(struct semaphore *sem)
199{ 222{
200 return __down_common(sem, TASK_KILLABLE); 223 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
224}
225
226static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
227{
228 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
201} 229}
202 230
203static noinline void __sched __up(struct semaphore *sem) 231static noinline void __sched __up(struct semaphore *sem)