diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-02-23 18:23:57 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-23 20:12:15 -0500 |
commit | a0c1e9073ef7428a14309cba010633a6cd6719ea (patch) | |
tree | 05ce792ddcde92e73d1bae4c8e20f607a2e7db40 /kernel | |
parent | 3e4ab747efa8e78562ec6782b08bbf21a00aba1b (diff) |
futex: runtime enable pi and robust functionality
Not all architectures implement futex_atomic_cmpxchg_inatomic(). The default
implementation returns -ENOSYS, which is currently not handled inside of the
futex guts.
Futex PI calls and robust list exits with a held futex result in an endless
loop in the futex code on architectures which have no support.
Fixing up every place where futex_atomic_cmpxchg_inatomic() is called would
add a fair amount of extra if/else constructs to the already complex code. It
is also not possible to disable the robust feature before user space tries to
register robust lists.
Compile time disabling is not a good idea either, as there are already
architectures with runtime detection of futex_atomic_cmpxchg_inatomic support.
Detect the functionality at runtime instead by calling
cmpxchg_futex_value_locked() with a NULL pointer from the futex initialization
code. This is guaranteed to fail, but the call of
futex_atomic_cmpxchg_inatomic() happens with pagefaults disabled.
On architectures, which use the asm-generic implementation or have a runtime
CPU feature detection, a -ENOSYS return value disables the PI/robust features.
On architectures with a working implementation the call returns -EFAULT and
the PI/robust features are enabled.
The relevant syscalls return -ENOSYS and the robust list exit code is blocked,
when the detection fails.
Fixes http://lkml.org/lkml/2008/2/11/149
Originally reported by: Lennart Buytenhek
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Lennert Buytenhek <buytenh@wantstofly.org>
Cc: Riku Voipio <riku.voipio@movial.fi>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 38 | ||||
-rw-r--r-- | kernel/futex_compat.c | 9 |
2 files changed, 43 insertions, 4 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index c21f667c63f6..06968cd79200 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -60,6 +60,8 @@ | |||
60 | 60 | ||
61 | #include "rtmutex_common.h" | 61 | #include "rtmutex_common.h" |
62 | 62 | ||
63 | int __read_mostly futex_cmpxchg_enabled; | ||
64 | |||
63 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) | 65 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) |
64 | 66 | ||
65 | /* | 67 | /* |
@@ -469,6 +471,8 @@ void exit_pi_state_list(struct task_struct *curr) | |||
469 | struct futex_hash_bucket *hb; | 471 | struct futex_hash_bucket *hb; |
470 | union futex_key key; | 472 | union futex_key key; |
471 | 473 | ||
474 | if (!futex_cmpxchg_enabled) | ||
475 | return; | ||
472 | /* | 476 | /* |
473 | * We are a ZOMBIE and nobody can enqueue itself on | 477 | * We are a ZOMBIE and nobody can enqueue itself on |
474 | * pi_state_list anymore, but we have to be careful | 478 | * pi_state_list anymore, but we have to be careful |
@@ -1870,6 +1874,8 @@ asmlinkage long | |||
1870 | sys_set_robust_list(struct robust_list_head __user *head, | 1874 | sys_set_robust_list(struct robust_list_head __user *head, |
1871 | size_t len) | 1875 | size_t len) |
1872 | { | 1876 | { |
1877 | if (!futex_cmpxchg_enabled) | ||
1878 | return -ENOSYS; | ||
1873 | /* | 1879 | /* |
1874 | * The kernel knows only one size for now: | 1880 | * The kernel knows only one size for now: |
1875 | */ | 1881 | */ |
@@ -1894,6 +1900,9 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, | |||
1894 | struct robust_list_head __user *head; | 1900 | struct robust_list_head __user *head; |
1895 | unsigned long ret; | 1901 | unsigned long ret; |
1896 | 1902 | ||
1903 | if (!futex_cmpxchg_enabled) | ||
1904 | return -ENOSYS; | ||
1905 | |||
1897 | if (!pid) | 1906 | if (!pid) |
1898 | head = current->robust_list; | 1907 | head = current->robust_list; |
1899 | else { | 1908 | else { |
@@ -1997,6 +2006,9 @@ void exit_robust_list(struct task_struct *curr) | |||
1997 | unsigned long futex_offset; | 2006 | unsigned long futex_offset; |
1998 | int rc; | 2007 | int rc; |
1999 | 2008 | ||
2009 | if (!futex_cmpxchg_enabled) | ||
2010 | return; | ||
2011 | |||
2000 | /* | 2012 | /* |
2001 | * Fetch the list head (which was registered earlier, via | 2013 | * Fetch the list head (which was registered earlier, via |
2002 | * sys_set_robust_list()): | 2014 | * sys_set_robust_list()): |
@@ -2051,7 +2063,7 @@ void exit_robust_list(struct task_struct *curr) | |||
2051 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | 2063 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
2052 | u32 __user *uaddr2, u32 val2, u32 val3) | 2064 | u32 __user *uaddr2, u32 val2, u32 val3) |
2053 | { | 2065 | { |
2054 | int ret; | 2066 | int ret = -ENOSYS; |
2055 | int cmd = op & FUTEX_CMD_MASK; | 2067 | int cmd = op & FUTEX_CMD_MASK; |
2056 | struct rw_semaphore *fshared = NULL; | 2068 | struct rw_semaphore *fshared = NULL; |
2057 | 2069 | ||
@@ -2083,13 +2095,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | |||
2083 | ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); | 2095 | ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); |
2084 | break; | 2096 | break; |
2085 | case FUTEX_LOCK_PI: | 2097 | case FUTEX_LOCK_PI: |
2086 | ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); | 2098 | if (futex_cmpxchg_enabled) |
2099 | ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); | ||
2087 | break; | 2100 | break; |
2088 | case FUTEX_UNLOCK_PI: | 2101 | case FUTEX_UNLOCK_PI: |
2089 | ret = futex_unlock_pi(uaddr, fshared); | 2102 | if (futex_cmpxchg_enabled) |
2103 | ret = futex_unlock_pi(uaddr, fshared); | ||
2090 | break; | 2104 | break; |
2091 | case FUTEX_TRYLOCK_PI: | 2105 | case FUTEX_TRYLOCK_PI: |
2092 | ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); | 2106 | if (futex_cmpxchg_enabled) |
2107 | ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); | ||
2093 | break; | 2108 | break; |
2094 | default: | 2109 | default: |
2095 | ret = -ENOSYS; | 2110 | ret = -ENOSYS; |
@@ -2145,8 +2160,23 @@ static struct file_system_type futex_fs_type = { | |||
2145 | 2160 | ||
2146 | static int __init init(void) | 2161 | static int __init init(void) |
2147 | { | 2162 | { |
2163 | u32 curval; | ||
2148 | int i; | 2164 | int i; |
2149 | 2165 | ||
2166 | /* | ||
2167 | * This will fail and we want it. Some arch implementations do | ||
2168 | * runtime detection of the futex_atomic_cmpxchg_inatomic() | ||
2169 | * functionality. We want to know that before we call in any | ||
2170 | * of the complex code paths. Also we want to prevent | ||
2171 | * registration of robust lists in that case. NULL is | ||
2172 | * guaranteed to fault and we get -EFAULT on functional | ||
2173 | * implementation, the non functional ones will return | ||
2174 | * -ENOSYS. | ||
2175 | */ | ||
2176 | curval = cmpxchg_futex_value_locked(NULL, 0, 0); | ||
2177 | if (curval == -EFAULT) | ||
2178 | futex_cmpxchg_enabled = 1; | ||
2179 | |||
2150 | for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { | 2180 | for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { |
2151 | plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); | 2181 | plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock); |
2152 | spin_lock_init(&futex_queues[i].lock); | 2182 | spin_lock_init(&futex_queues[i].lock); |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 7d5e4b016f39..ff90f049f8f6 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -54,6 +54,9 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
54 | compat_long_t futex_offset; | 54 | compat_long_t futex_offset; |
55 | int rc; | 55 | int rc; |
56 | 56 | ||
57 | if (!futex_cmpxchg_enabled) | ||
58 | return; | ||
59 | |||
57 | /* | 60 | /* |
58 | * Fetch the list head (which was registered earlier, via | 61 | * Fetch the list head (which was registered earlier, via |
59 | * sys_set_robust_list()): | 62 | * sys_set_robust_list()): |
@@ -115,6 +118,9 @@ asmlinkage long | |||
115 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, | 118 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, |
116 | compat_size_t len) | 119 | compat_size_t len) |
117 | { | 120 | { |
121 | if (!futex_cmpxchg_enabled) | ||
122 | return -ENOSYS; | ||
123 | |||
118 | if (unlikely(len != sizeof(*head))) | 124 | if (unlikely(len != sizeof(*head))) |
119 | return -EINVAL; | 125 | return -EINVAL; |
120 | 126 | ||
@@ -130,6 +136,9 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, | |||
130 | struct compat_robust_list_head __user *head; | 136 | struct compat_robust_list_head __user *head; |
131 | unsigned long ret; | 137 | unsigned long ret; |
132 | 138 | ||
139 | if (!futex_cmpxchg_enabled) | ||
140 | return -ENOSYS; | ||
141 | |||
133 | if (!pid) | 142 | if (!pid) |
134 | head = current->compat_robust_list; | 143 | head = current->compat_robust_list; |
135 | else { | 144 | else { |