aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 13:59:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 13:59:39 -0400
commit462bf234a82ae1ae9d7628f59bc81022591e1348 (patch)
treef75eea7864ae7c72c0757d5d090e38f757b5cb2d /kernel/futex.c
parent455c6fdbd219161bd09b1165f11699d6d73de11c (diff)
parent6f008e72cd111a119b5d8de8c5438d892aae99eb (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The biggest change is the MCS spinlock generalization changes from Tim Chen, Peter Zijlstra, Jason Low et al. There's also lockdep fixes/enhancements from Oleg Nesterov, in particular a false negative fix related to lockdep_set_novalidate_class() usage" * 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) locking/mutex: Fix debug checks locking/mutexes: Add extra reschedule point locking/mutexes: Introduce cancelable MCS lock for adaptive spinning locking/mutexes: Unlock the mutex without the wait_lock locking/mutexes: Modify the way optimistic spinners are queued locking/mutexes: Return false if task need_resched() in mutex_can_spin_on_owner() locking: Move mcs_spinlock.h into kernel/locking/ m68k: Skip futex_atomic_cmpxchg_inatomic() test futex: Allow architectures to skip futex_atomic_cmpxchg_inatomic() test Revert "sched/wait: Suppress Sparse 'variable shadowing' warning" lockdep: Change lockdep_set_novalidate_class() to use _and_name lockdep: Change mark_held_locks() to check hlock->check instead of lockdep_no_validate lockdep: Don't create the wrong dependency on hlock->check == 0 lockdep: Make held_lock->check and "int check" argument bool locking/mcs: Allow architecture specific asm files to be used for contended case locking/mcs: Order the header files in Kbuild of each architecture in alphabetical order sched/wait: Suppress Sparse 'variable shadowing' warning hung_task/Documentation: Fix hung_task_warnings description locking/mcs: Allow architectures to hook in to contended paths locking/mcs: Micro-optimize the MCS code, add extra comments ...
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 08ec814ad9d2..67dacaf93e56 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -157,7 +157,9 @@
157 * enqueue. 157 * enqueue.
158 */ 158 */
159 159
160#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
160int __read_mostly futex_cmpxchg_enabled; 161int __read_mostly futex_cmpxchg_enabled;
162#endif
161 163
162/* 164/*
163 * Futex flags used to encode options to functions and preserve them across 165 * Futex flags used to encode options to functions and preserve them across
@@ -2875,9 +2877,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2875 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); 2877 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2876} 2878}
2877 2879
2878static int __init futex_init(void) 2880static void __init futex_detect_cmpxchg(void)
2879{ 2881{
2882#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
2880 u32 curval; 2883 u32 curval;
2884
2885 /*
2886 * This will fail and we want it. Some arch implementations do
2887 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2888 * functionality. We want to know that before we call in any
2889 * of the complex code paths. Also we want to prevent
2890 * registration of robust lists in that case. NULL is
2891 * guaranteed to fault and we get -EFAULT on functional
2892 * implementation, the non-functional ones will return
2893 * -ENOSYS.
2894 */
2895 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2896 futex_cmpxchg_enabled = 1;
2897#endif
2898}
2899
2900static int __init futex_init(void)
2901{
2881 unsigned int futex_shift; 2902 unsigned int futex_shift;
2882 unsigned long i; 2903 unsigned long i;
2883 2904
@@ -2893,18 +2914,8 @@ static int __init futex_init(void)
2893 &futex_shift, NULL, 2914 &futex_shift, NULL,
2894 futex_hashsize, futex_hashsize); 2915 futex_hashsize, futex_hashsize);
2895 futex_hashsize = 1UL << futex_shift; 2916 futex_hashsize = 1UL << futex_shift;
2896 /* 2917
2897 * This will fail and we want it. Some arch implementations do 2918 futex_detect_cmpxchg();
2898 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2899 * functionality. We want to know that before we call in any
2900 * of the complex code paths. Also we want to prevent
2901 * registration of robust lists in that case. NULL is
2902 * guaranteed to fault and we get -EFAULT on functional
2903 * implementation, the non-functional ones will return
2904 * -ENOSYS.
2905 */
2906 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2907 futex_cmpxchg_enabled = 1;
2908 2919
2909 for (i = 0; i < futex_hashsize; i++) { 2920 for (i = 0; i < futex_hashsize; i++) {
2910 atomic_set(&futex_queues[i].waiters, 0); 2921 atomic_set(&futex_queues[i].waiters, 0);