aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 13:59:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-03-31 13:59:39 -0400
commit462bf234a82ae1ae9d7628f59bc81022591e1348 (patch)
treef75eea7864ae7c72c0757d5d090e38f757b5cb2d
parent455c6fdbd219161bd09b1165f11699d6d73de11c (diff)
parent6f008e72cd111a119b5d8de8c5438d892aae99eb (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The biggest change is the MCS spinlock generalization changes from Tim Chen, Peter Zijlstra, Jason Low et al. There's also lockdep fixes/enhancements from Oleg Nesterov, in particular a false negative fix related to lockdep_set_novalidate_class() usage" * 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) locking/mutex: Fix debug checks locking/mutexes: Add extra reschedule point locking/mutexes: Introduce cancelable MCS lock for adaptive spinning locking/mutexes: Unlock the mutex without the wait_lock locking/mutexes: Modify the way optimistic spinners are queued locking/mutexes: Return false if task need_resched() in mutex_can_spin_on_owner() locking: Move mcs_spinlock.h into kernel/locking/ m68k: Skip futex_atomic_cmpxchg_inatomic() test futex: Allow architectures to skip futex_atomic_cmpxchg_inatomic() test Revert "sched/wait: Suppress Sparse 'variable shadowing' warning" lockdep: Change lockdep_set_novalidate_class() to use _and_name lockdep: Change mark_held_locks() to check hlock->check instead of lockdep_no_validate lockdep: Don't create the wrong dependency on hlock->check == 0 lockdep: Make held_lock->check and "int check" argument bool locking/mcs: Allow architecture specific asm files to be used for contended case locking/mcs: Order the header files in Kbuild of each architecture in alphabetical order sched/wait: Suppress Sparse 'variable shadowing' warning hung_task/Documentation: Fix hung_task_warnings description locking/mcs: Allow architectures to hook in to contended paths locking/mcs: Micro-optimize the MCS code, add extra comments ...
-rw-r--r--Documentation/sysctl/kernel.txt5
-rw-r--r--arch/alpha/include/asm/Kbuild7
-rw-r--r--arch/arc/include/asm/Kbuild7
-rw-r--r--arch/arm/include/asm/Kbuild5
-rw-r--r--arch/arm64/include/asm/Kbuild7
-rw-r--r--arch/avr32/include/asm/Kbuild41
-rw-r--r--arch/blackfin/include/asm/Kbuild7
-rw-r--r--arch/c6x/include/asm/Kbuild5
-rw-r--r--arch/cris/include/asm/Kbuild3
-rw-r--r--arch/frv/include/asm/Kbuild5
-rw-r--r--arch/hexagon/include/asm/Kbuild7
-rw-r--r--arch/ia64/include/asm/Kbuild5
-rw-r--r--arch/m32r/include/asm/Kbuild5
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/include/asm/Kbuild3
-rw-r--r--arch/metag/include/asm/Kbuild5
-rw-r--r--arch/microblaze/include/asm/Kbuild5
-rw-r--r--arch/mips/include/asm/Kbuild5
-rw-r--r--arch/mn10300/include/asm/Kbuild3
-rw-r--r--arch/openrisc/include/asm/Kbuild11
-rw-r--r--arch/parisc/include/asm/Kbuild32
-rw-r--r--arch/powerpc/include/asm/Kbuild5
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/Kbuild5
-rw-r--r--arch/score/include/asm/Kbuild5
-rw-r--r--arch/sh/include/asm/Kbuild9
-rw-r--r--arch/sparc/include/asm/Kbuild11
-rw-r--r--arch/tile/include/asm/Kbuild5
-rw-r--r--arch/um/include/asm/Kbuild34
-rw-r--r--arch/unicore32/include/asm/Kbuild5
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/Kbuild5
-rw-r--r--drivers/tty/tty_ldsem.c15
-rw-r--r--include/asm-generic/mcs_spinlock.h13
-rw-r--r--include/linux/futex.h4
-rw-r--r--include/linux/lockdep.h27
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--init/Kconfig7
-rw-r--r--kernel/futex.c37
-rw-r--r--kernel/locking/Makefile2
-rw-r--r--kernel/locking/lockdep.c17
-rw-r--r--kernel/locking/mcs_spinlock.c178
-rw-r--r--kernel/locking/mcs_spinlock.h129
-rw-r--r--kernel/locking/mutex-debug.c6
-rw-r--r--kernel/locking/mutex.c94
46 files changed, 584 insertions, 212 deletions
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index e55124e7c40c..e1d28fbf7570 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -320,10 +320,11 @@ This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
320 320
321============================================================== 321==============================================================
322 322
323hung_task_warning: 323hung_task_warnings:
324 324
325The maximum number of warnings to report. During a check interval 325The maximum number of warnings to report. During a check interval
326When this value is reached, no more the warnings will be reported. 326if a hung task is detected, this value is decreased by 1.
327When this value reaches 0, no more warnings will be reported.
327This file shows up if CONFIG_DETECT_HUNG_TASK is enabled. 328This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
328 329
329-1: report an infinite number of warnings. 330-1: report an infinite number of warnings.
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index a73a8e208a4a..7736f426ff3b 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -1,7 +1,8 @@
1 1
2generic-y += clkdev.h
3 2
3generic-y += clkdev.h
4generic-y += exec.h 4generic-y += exec.h
5generic-y += trace_clock.h
6generic-y += preempt.h
7generic-y += hash.h 5generic-y += hash.h
6generic-y += mcs_spinlock.h
7generic-y += preempt.h
8generic-y += trace_clock.h
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 0d3362991c31..e76fd79f32b0 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,15 +1,15 @@
1generic-y += auxvec.h 1generic-y += auxvec.h
2generic-y += barrier.h 2generic-y += barrier.h
3generic-y += bugs.h
4generic-y += bitsperlong.h 3generic-y += bitsperlong.h
4generic-y += bugs.h
5generic-y += clkdev.h 5generic-y += clkdev.h
6generic-y += cputime.h 6generic-y += cputime.h
7generic-y += device.h 7generic-y += device.h
8generic-y += div64.h 8generic-y += div64.h
9generic-y += emergency-restart.h 9generic-y += emergency-restart.h
10generic-y += errno.h 10generic-y += errno.h
11generic-y += fcntl.h
12generic-y += fb.h 11generic-y += fb.h
12generic-y += fcntl.h
13generic-y += ftrace.h 13generic-y += ftrace.h
14generic-y += hardirq.h 14generic-y += hardirq.h
15generic-y += hash.h 15generic-y += hash.h
@@ -22,6 +22,7 @@ generic-y += kmap_types.h
22generic-y += kvm_para.h 22generic-y += kvm_para.h
23generic-y += local.h 23generic-y += local.h
24generic-y += local64.h 24generic-y += local64.h
25generic-y += mcs_spinlock.h
25generic-y += mman.h 26generic-y += mman.h
26generic-y += msgbuf.h 27generic-y += msgbuf.h
27generic-y += param.h 28generic-y += param.h
@@ -30,6 +31,7 @@ generic-y += pci.h
30generic-y += percpu.h 31generic-y += percpu.h
31generic-y += poll.h 32generic-y += poll.h
32generic-y += posix_types.h 33generic-y += posix_types.h
34generic-y += preempt.h
33generic-y += resource.h 35generic-y += resource.h
34generic-y += scatterlist.h 36generic-y += scatterlist.h
35generic-y += sembuf.h 37generic-y += sembuf.h
@@ -48,4 +50,3 @@ generic-y += ucontext.h
48generic-y += user.h 50generic-y += user.h
49generic-y += vga.h 51generic-y += vga.h
50generic-y += xor.h 52generic-y += xor.h
51generic-y += preempt.h
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 3278afe2c3ab..23e728ecf8ab 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -7,16 +7,19 @@ generic-y += current.h
7generic-y += emergency-restart.h 7generic-y += emergency-restart.h
8generic-y += errno.h 8generic-y += errno.h
9generic-y += exec.h 9generic-y += exec.h
10generic-y += hash.h
10generic-y += ioctl.h 11generic-y += ioctl.h
11generic-y += ipcbuf.h 12generic-y += ipcbuf.h
12generic-y += irq_regs.h 13generic-y += irq_regs.h
13generic-y += kdebug.h 14generic-y += kdebug.h
14generic-y += local.h 15generic-y += local.h
15generic-y += local64.h 16generic-y += local64.h
17generic-y += mcs_spinlock.h
16generic-y += msgbuf.h 18generic-y += msgbuf.h
17generic-y += param.h 19generic-y += param.h
18generic-y += parport.h 20generic-y += parport.h
19generic-y += poll.h 21generic-y += poll.h
22generic-y += preempt.h
20generic-y += resource.h 23generic-y += resource.h
21generic-y += sections.h 24generic-y += sections.h
22generic-y += segment.h 25generic-y += segment.h
@@ -33,5 +36,3 @@ generic-y += termios.h
33generic-y += timex.h 36generic-y += timex.h
34generic-y += trace_clock.h 37generic-y += trace_clock.h
35generic-y += unaligned.h 38generic-y += unaligned.h
36generic-y += preempt.h
37generic-y += hash.h
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 71c53ecfcc3a..3bdfdda70567 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -12,6 +12,7 @@ generic-y += dma.h
12generic-y += emergency-restart.h 12generic-y += emergency-restart.h
13generic-y += errno.h 13generic-y += errno.h
14generic-y += ftrace.h 14generic-y += ftrace.h
15generic-y += hash.h
15generic-y += hw_irq.h 16generic-y += hw_irq.h
16generic-y += ioctl.h 17generic-y += ioctl.h
17generic-y += ioctls.h 18generic-y += ioctls.h
@@ -22,12 +23,14 @@ generic-y += kmap_types.h
22generic-y += kvm_para.h 23generic-y += kvm_para.h
23generic-y += local.h 24generic-y += local.h
24generic-y += local64.h 25generic-y += local64.h
26generic-y += mcs_spinlock.h
25generic-y += mman.h 27generic-y += mman.h
26generic-y += msgbuf.h 28generic-y += msgbuf.h
27generic-y += mutex.h 29generic-y += mutex.h
28generic-y += pci.h 30generic-y += pci.h
29generic-y += poll.h 31generic-y += poll.h
30generic-y += posix_types.h 32generic-y += posix_types.h
33generic-y += preempt.h
31generic-y += resource.h 34generic-y += resource.h
32generic-y += scatterlist.h 35generic-y += scatterlist.h
33generic-y += sections.h 36generic-y += sections.h
@@ -38,8 +41,8 @@ generic-y += shmbuf.h
38generic-y += sizes.h 41generic-y += sizes.h
39generic-y += socket.h 42generic-y += socket.h
40generic-y += sockios.h 43generic-y += sockios.h
41generic-y += switch_to.h
42generic-y += swab.h 44generic-y += swab.h
45generic-y += switch_to.h
43generic-y += termbits.h 46generic-y += termbits.h
44generic-y += termios.h 47generic-y += termios.h
45generic-y += topology.h 48generic-y += topology.h
@@ -49,5 +52,3 @@ generic-y += unaligned.h
49generic-y += user.h 52generic-y += user.h
50generic-y += vga.h 53generic-y += vga.h
51generic-y += xor.h 54generic-y += xor.h
52generic-y += preempt.h
53generic-y += hash.h
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index c7c64a63c29f..00a0f3ccd6eb 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -1,22 +1,23 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += cputime.h 3generic-y += cputime.h
4generic-y += delay.h 4generic-y += delay.h
5generic-y += device.h 5generic-y += device.h
6generic-y += div64.h 6generic-y += div64.h
7generic-y += emergency-restart.h 7generic-y += emergency-restart.h
8generic-y += exec.h 8generic-y += exec.h
9generic-y += futex.h 9generic-y += futex.h
10generic-y += preempt.h 10generic-y += hash.h
11generic-y += irq_regs.h 11generic-y += irq_regs.h
12generic-y += param.h 12generic-y += local.h
13generic-y += local.h 13generic-y += local64.h
14generic-y += local64.h 14generic-y += mcs_spinlock.h
15generic-y += percpu.h 15generic-y += param.h
16generic-y += scatterlist.h 16generic-y += percpu.h
17generic-y += sections.h 17generic-y += preempt.h
18generic-y += topology.h 18generic-y += scatterlist.h
19generic-y += trace_clock.h 19generic-y += sections.h
20generic-y += topology.h
21generic-y += trace_clock.h
20generic-y += vga.h 22generic-y += vga.h
21generic-y += xor.h 23generic-y += xor.h
22generic-y += hash.h
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 359d36fdc247..0d93b9a79ca9 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -10,6 +10,7 @@ generic-y += emergency-restart.h
10generic-y += errno.h 10generic-y += errno.h
11generic-y += fb.h 11generic-y += fb.h
12generic-y += futex.h 12generic-y += futex.h
13generic-y += hash.h
13generic-y += hw_irq.h 14generic-y += hw_irq.h
14generic-y += ioctl.h 15generic-y += ioctl.h
15generic-y += ipcbuf.h 16generic-y += ipcbuf.h
@@ -17,14 +18,16 @@ generic-y += irq_regs.h
17generic-y += kdebug.h 18generic-y += kdebug.h
18generic-y += kmap_types.h 19generic-y += kmap_types.h
19generic-y += kvm_para.h 20generic-y += kvm_para.h
20generic-y += local64.h
21generic-y += local.h 21generic-y += local.h
22generic-y += local64.h
23generic-y += mcs_spinlock.h
22generic-y += mman.h 24generic-y += mman.h
23generic-y += msgbuf.h 25generic-y += msgbuf.h
24generic-y += mutex.h 26generic-y += mutex.h
25generic-y += param.h 27generic-y += param.h
26generic-y += percpu.h 28generic-y += percpu.h
27generic-y += pgalloc.h 29generic-y += pgalloc.h
30generic-y += preempt.h
28generic-y += resource.h 31generic-y += resource.h
29generic-y += scatterlist.h 32generic-y += scatterlist.h
30generic-y += sembuf.h 33generic-y += sembuf.h
@@ -44,5 +47,3 @@ generic-y += ucontext.h
44generic-y += unaligned.h 47generic-y += unaligned.h
45generic-y += user.h 48generic-y += user.h
46generic-y += xor.h 49generic-y += xor.h
47generic-y += preempt.h
48generic-y += hash.h
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index d73bb85ccdd3..8dbdce8421b0 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += exec.h
15generic-y += fb.h 15generic-y += fb.h
16generic-y += fcntl.h 16generic-y += fcntl.h
17generic-y += futex.h 17generic-y += futex.h
18generic-y += hash.h
18generic-y += hw_irq.h 19generic-y += hw_irq.h
19generic-y += io.h 20generic-y += io.h
20generic-y += ioctl.h 21generic-y += ioctl.h
@@ -24,6 +25,7 @@ generic-y += irq_regs.h
24generic-y += kdebug.h 25generic-y += kdebug.h
25generic-y += kmap_types.h 26generic-y += kmap_types.h
26generic-y += local.h 27generic-y += local.h
28generic-y += mcs_spinlock.h
27generic-y += mman.h 29generic-y += mman.h
28generic-y += mmu.h 30generic-y += mmu.h
29generic-y += mmu_context.h 31generic-y += mmu_context.h
@@ -34,6 +36,7 @@ generic-y += percpu.h
34generic-y += pgalloc.h 36generic-y += pgalloc.h
35generic-y += poll.h 37generic-y += poll.h
36generic-y += posix_types.h 38generic-y += posix_types.h
39generic-y += preempt.h
37generic-y += resource.h 40generic-y += resource.h
38generic-y += scatterlist.h 41generic-y += scatterlist.h
39generic-y += segment.h 42generic-y += segment.h
@@ -56,5 +59,3 @@ generic-y += ucontext.h
56generic-y += user.h 59generic-y += user.h
57generic-y += vga.h 60generic-y += vga.h
58generic-y += xor.h 61generic-y += xor.h
59generic-y += preempt.h
60generic-y += hash.h
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index f3fd8768f095..056027f38351 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -9,8 +9,9 @@ generic-y += exec.h
9generic-y += hash.h 9generic-y += hash.h
10generic-y += kvm_para.h 10generic-y += kvm_para.h
11generic-y += linkage.h 11generic-y += linkage.h
12generic-y += mcs_spinlock.h
12generic-y += module.h 13generic-y += module.h
14generic-y += preempt.h
13generic-y += trace_clock.h 15generic-y += trace_clock.h
14generic-y += vga.h 16generic-y += vga.h
15generic-y += xor.h 17generic-y += xor.h
16generic-y += preempt.h
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index bc42f14c9c2e..babb9338ebf8 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -1,6 +1,7 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += trace_clock.h
5generic-y += preempt.h
6generic-y += hash.h 4generic-y += hash.h
5generic-y += mcs_spinlock.h
6generic-y += preempt.h
7generic-y += trace_clock.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 38ca45d3df1e..eadcc118f950 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -25,14 +25,16 @@ generic-y += ipcbuf.h
25generic-y += irq_regs.h 25generic-y += irq_regs.h
26generic-y += kdebug.h 26generic-y += kdebug.h
27generic-y += kmap_types.h 27generic-y += kmap_types.h
28generic-y += local64.h
29generic-y += local.h 28generic-y += local.h
29generic-y += local64.h
30generic-y += mcs_spinlock.h
30generic-y += mman.h 31generic-y += mman.h
31generic-y += msgbuf.h 32generic-y += msgbuf.h
32generic-y += pci.h 33generic-y += pci.h
33generic-y += percpu.h 34generic-y += percpu.h
34generic-y += poll.h 35generic-y += poll.h
35generic-y += posix_types.h 36generic-y += posix_types.h
37generic-y += preempt.h
36generic-y += resource.h 38generic-y += resource.h
37generic-y += rwsem.h 39generic-y += rwsem.h
38generic-y += scatterlist.h 40generic-y += scatterlist.h
@@ -45,8 +47,8 @@ generic-y += siginfo.h
45generic-y += sizes.h 47generic-y += sizes.h
46generic-y += socket.h 48generic-y += socket.h
47generic-y += sockios.h 49generic-y += sockios.h
48generic-y += statfs.h
49generic-y += stat.h 50generic-y += stat.h
51generic-y += statfs.h
50generic-y += termbits.h 52generic-y += termbits.h
51generic-y += termios.h 53generic-y += termios.h
52generic-y += topology.h 54generic-y += topology.h
@@ -55,4 +57,3 @@ generic-y += types.h
55generic-y += ucontext.h 57generic-y += ucontext.h
56generic-y += unaligned.h 58generic-y += unaligned.h
57generic-y += xor.h 59generic-y += xor.h
58generic-y += preempt.h
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 283a83154b5e..0da4aa2602ae 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,8 +1,9 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += hash.h
4generic-y += kvm_para.h 5generic-y += kvm_para.h
5generic-y += trace_clock.h 6generic-y += mcs_spinlock.h
6generic-y += preempt.h 7generic-y += preempt.h
8generic-y += trace_clock.h
7generic-y += vtime.h 9generic-y += vtime.h
8generic-y += hash.h
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index 932435ac4e5c..5825a35b2c56 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -1,7 +1,8 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += hash.h
5generic-y += mcs_spinlock.h
4generic-y += module.h 6generic-y += module.h
5generic-y += trace_clock.h
6generic-y += preempt.h 7generic-y += preempt.h
7generic-y += hash.h 8generic-y += trace_clock.h
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index dbdd2231c75d..b2e322939256 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -17,6 +17,7 @@ config M68K
17 select FPU if MMU 17 select FPU if MMU
18 select ARCH_WANT_IPC_PARSE_VERSION 18 select ARCH_WANT_IPC_PARSE_VERSION
19 select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE 19 select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
20 select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
20 select HAVE_MOD_ARCH_SPECIFIC 21 select HAVE_MOD_ARCH_SPECIFIC
21 select MODULES_USE_ELF_REL 22 select MODULES_USE_ELF_REL
22 select MODULES_USE_ELF_RELA 23 select MODULES_USE_ELF_RELA
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 6fb9e813a910..c67c94a2d672 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -14,8 +14,9 @@ generic-y += irq_regs.h
14generic-y += kdebug.h 14generic-y += kdebug.h
15generic-y += kmap_types.h 15generic-y += kmap_types.h
16generic-y += kvm_para.h 16generic-y += kvm_para.h
17generic-y += local64.h
18generic-y += local.h 17generic-y += local.h
18generic-y += local64.h
19generic-y += mcs_spinlock.h
19generic-y += mman.h 20generic-y += mman.h
20generic-y += mutex.h 21generic-y += mutex.h
21generic-y += percpu.h 22generic-y += percpu.h
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index b716d807c2ec..c29ead89a317 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += fb.h
13generic-y += fcntl.h 13generic-y += fcntl.h
14generic-y += futex.h 14generic-y += futex.h
15generic-y += hardirq.h 15generic-y += hardirq.h
16generic-y += hash.h
16generic-y += hw_irq.h 17generic-y += hw_irq.h
17generic-y += ioctl.h 18generic-y += ioctl.h
18generic-y += ioctls.h 19generic-y += ioctls.h
@@ -23,6 +24,7 @@ generic-y += kmap_types.h
23generic-y += kvm_para.h 24generic-y += kvm_para.h
24generic-y += local.h 25generic-y += local.h
25generic-y += local64.h 26generic-y += local64.h
27generic-y += mcs_spinlock.h
26generic-y += msgbuf.h 28generic-y += msgbuf.h
27generic-y += mutex.h 29generic-y += mutex.h
28generic-y += param.h 30generic-y += param.h
@@ -30,6 +32,7 @@ generic-y += pci.h
30generic-y += percpu.h 32generic-y += percpu.h
31generic-y += poll.h 33generic-y += poll.h
32generic-y += posix_types.h 34generic-y += posix_types.h
35generic-y += preempt.h
33generic-y += scatterlist.h 36generic-y += scatterlist.h
34generic-y += sections.h 37generic-y += sections.h
35generic-y += sembuf.h 38generic-y += sembuf.h
@@ -52,5 +55,3 @@ generic-y += unaligned.h
52generic-y += user.h 55generic-y += user.h
53generic-y += vga.h 56generic-y += vga.h
54generic-y += xor.h 57generic-y += xor.h
55generic-y += preempt.h
56generic-y += hash.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 2b98bc73642a..1f590ab8f323 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -3,6 +3,7 @@ generic-y += barrier.h
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += exec.h 4generic-y += exec.h
5generic-y += hash.h 5generic-y += hash.h
6generic-y += trace_clock.h 6generic-y += mcs_spinlock.h
7generic-y += syscalls.h
8generic-y += preempt.h 7generic-y += preempt.h
8generic-y += syscalls.h
9generic-y += trace_clock.h
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 2d7f65052c1f..05439187891d 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -2,16 +2,17 @@
2generic-y += cputime.h 2generic-y += cputime.h
3generic-y += current.h 3generic-y += current.h
4generic-y += emergency-restart.h 4generic-y += emergency-restart.h
5generic-y += hash.h
5generic-y += local64.h 6generic-y += local64.h
7generic-y += mcs_spinlock.h
6generic-y += mutex.h 8generic-y += mutex.h
7generic-y += parport.h 9generic-y += parport.h
8generic-y += percpu.h 10generic-y += percpu.h
11generic-y += preempt.h
9generic-y += scatterlist.h 12generic-y += scatterlist.h
10generic-y += sections.h 13generic-y += sections.h
11generic-y += segment.h 14generic-y += segment.h
12generic-y += serial.h 15generic-y += serial.h
13generic-y += trace_clock.h 16generic-y += trace_clock.h
14generic-y += preempt.h
15generic-y += ucontext.h 17generic-y += ucontext.h
16generic-y += xor.h 18generic-y += xor.h
17generic-y += hash.h
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 992e989ab785..cbc6b9bf45da 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -3,5 +3,6 @@ generic-y += barrier.h
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += exec.h 4generic-y += exec.h
5generic-y += hash.h 5generic-y += hash.h
6generic-y += trace_clock.h 6generic-y += mcs_spinlock.h
7generic-y += preempt.h 7generic-y += preempt.h
8generic-y += trace_clock.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 2e40f1ca8667..480af0d9c2f5 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -10,8 +10,8 @@ generic-y += bugs.h
10generic-y += cacheflush.h 10generic-y += cacheflush.h
11generic-y += checksum.h 11generic-y += checksum.h
12generic-y += clkdev.h 12generic-y += clkdev.h
13generic-y += cmpxchg.h
14generic-y += cmpxchg-local.h 13generic-y += cmpxchg-local.h
14generic-y += cmpxchg.h
15generic-y += cputime.h 15generic-y += cputime.h
16generic-y += current.h 16generic-y += current.h
17generic-y += device.h 17generic-y += device.h
@@ -25,6 +25,7 @@ generic-y += fcntl.h
25generic-y += ftrace.h 25generic-y += ftrace.h
26generic-y += futex.h 26generic-y += futex.h
27generic-y += hardirq.h 27generic-y += hardirq.h
28generic-y += hash.h
28generic-y += hw_irq.h 29generic-y += hw_irq.h
29generic-y += ioctl.h 30generic-y += ioctl.h
30generic-y += ioctls.h 31generic-y += ioctls.h
@@ -34,6 +35,7 @@ generic-y += kdebug.h
34generic-y += kmap_types.h 35generic-y += kmap_types.h
35generic-y += kvm_para.h 36generic-y += kvm_para.h
36generic-y += local.h 37generic-y += local.h
38generic-y += mcs_spinlock.h
37generic-y += mman.h 39generic-y += mman.h
38generic-y += module.h 40generic-y += module.h
39generic-y += msgbuf.h 41generic-y += msgbuf.h
@@ -41,6 +43,7 @@ generic-y += pci.h
41generic-y += percpu.h 43generic-y += percpu.h
42generic-y += poll.h 44generic-y += poll.h
43generic-y += posix_types.h 45generic-y += posix_types.h
46generic-y += preempt.h
44generic-y += resource.h 47generic-y += resource.h
45generic-y += scatterlist.h 48generic-y += scatterlist.h
46generic-y += sections.h 49generic-y += sections.h
@@ -53,11 +56,11 @@ generic-y += siginfo.h
53generic-y += signal.h 56generic-y += signal.h
54generic-y += socket.h 57generic-y += socket.h
55generic-y += sockios.h 58generic-y += sockios.h
56generic-y += statfs.h
57generic-y += stat.h 59generic-y += stat.h
60generic-y += statfs.h
58generic-y += string.h 61generic-y += string.h
59generic-y += switch_to.h
60generic-y += swab.h 62generic-y += swab.h
63generic-y += switch_to.h
61generic-y += termbits.h 64generic-y += termbits.h
62generic-y += termios.h 65generic-y += termios.h
63generic-y += topology.h 66generic-y += topology.h
@@ -68,5 +71,3 @@ generic-y += user.h
68generic-y += vga.h 71generic-y += vga.h
69generic-y += word-at-a-time.h 72generic-y += word-at-a-time.h
70generic-y += xor.h 73generic-y += xor.h
71generic-y += preempt.h
72generic-y += hash.h
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 752c981bc3c7..ecf25e6678ad 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,9 +1,29 @@
1 1
2generic-y += auxvec.h
2generic-y += barrier.h 3generic-y += barrier.h
3generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ 4generic-y += clkdev.h
4 segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ 5generic-y += cputime.h
5 div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ 6generic-y += device.h
6 poll.h xor.h clkdev.h exec.h 7generic-y += div64.h
7generic-y += trace_clock.h 8generic-y += emergency-restart.h
8generic-y += preempt.h 9generic-y += exec.h
9generic-y += hash.h 10generic-y += hash.h
11generic-y += hw_irq.h
12generic-y += irq_regs.h
13generic-y += kdebug.h
14generic-y += kvm_para.h
15generic-y += local.h
16generic-y += local64.h
17generic-y += mcs_spinlock.h
18generic-y += mutex.h
19generic-y += param.h
20generic-y += percpu.h
21generic-y += poll.h
22generic-y += preempt.h
23generic-y += segment.h
24generic-y += topology.h
25generic-y += trace_clock.h
26generic-y += user.h
27generic-y += vga.h
28generic-y += word-at-a-time.h
29generic-y += xor.h
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 6c0a955a1b06..3fb1bc432f4f 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,7 +1,8 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += hash.h
4generic-y += mcs_spinlock.h
5generic-y += preempt.h
3generic-y += rwsem.h 6generic-y += rwsem.h
4generic-y += trace_clock.h 7generic-y += trace_clock.h
5generic-y += preempt.h
6generic-y += vtime.h 8generic-y += vtime.h
7generic-y += hash.h
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 65a07750f4f9..bb74b21f007a 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -117,6 +117,7 @@ config S390
117 select HAVE_FUNCTION_GRAPH_TRACER 117 select HAVE_FUNCTION_GRAPH_TRACER
118 select HAVE_FUNCTION_TRACER 118 select HAVE_FUNCTION_TRACER
119 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 119 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
120 select HAVE_FUTEX_CMPXCHG if FUTEX
120 select HAVE_KERNEL_BZIP2 121 select HAVE_KERNEL_BZIP2
121 select HAVE_KERNEL_GZIP 122 select HAVE_KERNEL_GZIP
122 select HAVE_KERNEL_LZ4 123 select HAVE_KERNEL_LZ4
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 8386a4a1f19a..57892a8a9055 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,7 @@
1 1
2 2
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += trace_clock.h
5generic-y += preempt.h
6generic-y += hash.h 4generic-y += hash.h
5generic-y += mcs_spinlock.h
6generic-y += preempt.h
7generic-y += trace_clock.h
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 146b9d5e89f8..4630cf217b5b 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -1,10 +1,11 @@
1 1
2header-y += 2header-y +=
3 3
4
4generic-y += barrier.h 5generic-y += barrier.h
5generic-y += clkdev.h 6generic-y += clkdev.h
6generic-y += hash.h 7generic-y += hash.h
8generic-y += mcs_spinlock.h
9generic-y += preempt.h
7generic-y += trace_clock.h 10generic-y += trace_clock.h
8generic-y += xor.h 11generic-y += xor.h
9generic-y += preempt.h
10
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 0cd7198a4524..c19e47dacb31 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -8,18 +8,21 @@ generic-y += emergency-restart.h
8generic-y += errno.h 8generic-y += errno.h
9generic-y += exec.h 9generic-y += exec.h
10generic-y += fcntl.h 10generic-y += fcntl.h
11generic-y += hash.h
11generic-y += ioctl.h 12generic-y += ioctl.h
12generic-y += ipcbuf.h 13generic-y += ipcbuf.h
13generic-y += irq_regs.h 14generic-y += irq_regs.h
14generic-y += kvm_para.h 15generic-y += kvm_para.h
15generic-y += local.h 16generic-y += local.h
16generic-y += local64.h 17generic-y += local64.h
18generic-y += mcs_spinlock.h
19generic-y += mman.h
20generic-y += msgbuf.h
17generic-y += param.h 21generic-y += param.h
18generic-y += parport.h 22generic-y += parport.h
19generic-y += percpu.h 23generic-y += percpu.h
20generic-y += poll.h 24generic-y += poll.h
21generic-y += mman.h 25generic-y += preempt.h
22generic-y += msgbuf.h
23generic-y += resource.h 26generic-y += resource.h
24generic-y += scatterlist.h 27generic-y += scatterlist.h
25generic-y += sembuf.h 28generic-y += sembuf.h
@@ -34,5 +37,3 @@ generic-y += termios.h
34generic-y += trace_clock.h 37generic-y += trace_clock.h
35generic-y += ucontext.h 38generic-y += ucontext.h
36generic-y += xor.h 39generic-y += xor.h
37generic-y += preempt.h
38generic-y += hash.h
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 4b60a0c325ec..a45821818003 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -6,15 +6,16 @@ generic-y += cputime.h
6generic-y += div64.h 6generic-y += div64.h
7generic-y += emergency-restart.h 7generic-y += emergency-restart.h
8generic-y += exec.h 8generic-y += exec.h
9generic-y += linkage.h 9generic-y += hash.h
10generic-y += local64.h
11generic-y += mutex.h
12generic-y += irq_regs.h 10generic-y += irq_regs.h
11generic-y += linkage.h
13generic-y += local.h 12generic-y += local.h
13generic-y += local64.h
14generic-y += mcs_spinlock.h
14generic-y += module.h 15generic-y += module.h
16generic-y += mutex.h
17generic-y += preempt.h
15generic-y += serial.h 18generic-y += serial.h
16generic-y += trace_clock.h 19generic-y += trace_clock.h
17generic-y += types.h 20generic-y += types.h
18generic-y += word-at-a-time.h 21generic-y += word-at-a-time.h
19generic-y += preempt.h
20generic-y += hash.h
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 3793c75e45d9..0aa5675e7025 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -11,6 +11,7 @@ generic-y += errno.h
11generic-y += exec.h 11generic-y += exec.h
12generic-y += fb.h 12generic-y += fb.h
13generic-y += fcntl.h 13generic-y += fcntl.h
14generic-y += hash.h
14generic-y += hw_irq.h 15generic-y += hw_irq.h
15generic-y += ioctl.h 16generic-y += ioctl.h
16generic-y += ioctls.h 17generic-y += ioctls.h
@@ -18,12 +19,14 @@ generic-y += ipcbuf.h
18generic-y += irq_regs.h 19generic-y += irq_regs.h
19generic-y += local.h 20generic-y += local.h
20generic-y += local64.h 21generic-y += local64.h
22generic-y += mcs_spinlock.h
21generic-y += msgbuf.h 23generic-y += msgbuf.h
22generic-y += mutex.h 24generic-y += mutex.h
23generic-y += param.h 25generic-y += param.h
24generic-y += parport.h 26generic-y += parport.h
25generic-y += poll.h 27generic-y += poll.h
26generic-y += posix_types.h 28generic-y += posix_types.h
29generic-y += preempt.h
27generic-y += resource.h 30generic-y += resource.h
28generic-y += scatterlist.h 31generic-y += scatterlist.h
29generic-y += sembuf.h 32generic-y += sembuf.h
@@ -38,5 +41,3 @@ generic-y += termios.h
38generic-y += trace_clock.h 41generic-y += trace_clock.h
39generic-y += types.h 42generic-y += types.h
40generic-y += xor.h 43generic-y += xor.h
41generic-y += preempt.h
42generic-y += hash.h
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 88a330dcdede..a5e4b6068213 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,8 +1,28 @@
1generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
2generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
3generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
4generic-y += switch_to.h clkdev.h
5generic-y += trace_clock.h
6generic-y += preempt.h
7generic-y += hash.h
8generic-y += barrier.h 1generic-y += barrier.h
2generic-y += bug.h
3generic-y += clkdev.h
4generic-y += cputime.h
5generic-y += current.h
6generic-y += delay.h
7generic-y += device.h
8generic-y += emergency-restart.h
9generic-y += exec.h
10generic-y += ftrace.h
11generic-y += futex.h
12generic-y += hardirq.h
13generic-y += hash.h
14generic-y += hw_irq.h
15generic-y += io.h
16generic-y += irq_regs.h
17generic-y += kdebug.h
18generic-y += mcs_spinlock.h
19generic-y += mutex.h
20generic-y += param.h
21generic-y += pci.h
22generic-y += percpu.h
23generic-y += preempt.h
24generic-y += sections.h
25generic-y += switch_to.h
26generic-y += topology.h
27generic-y += trace_clock.h
28generic-y += xor.h
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 3ef4f9d9bf5d..1e5fb872a4aa 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += fcntl.h
16generic-y += ftrace.h 16generic-y += ftrace.h
17generic-y += futex.h 17generic-y += futex.h
18generic-y += hardirq.h 18generic-y += hardirq.h
19generic-y += hash.h
19generic-y += hw_irq.h 20generic-y += hw_irq.h
20generic-y += ioctl.h 21generic-y += ioctl.h
21generic-y += ioctls.h 22generic-y += ioctls.h
@@ -24,6 +25,7 @@ generic-y += irq_regs.h
24generic-y += kdebug.h 25generic-y += kdebug.h
25generic-y += kmap_types.h 26generic-y += kmap_types.h
26generic-y += local.h 27generic-y += local.h
28generic-y += mcs_spinlock.h
27generic-y += mman.h 29generic-y += mman.h
28generic-y += module.h 30generic-y += module.h
29generic-y += msgbuf.h 31generic-y += msgbuf.h
@@ -32,6 +34,7 @@ generic-y += parport.h
32generic-y += percpu.h 34generic-y += percpu.h
33generic-y += poll.h 35generic-y += poll.h
34generic-y += posix_types.h 36generic-y += posix_types.h
37generic-y += preempt.h
35generic-y += resource.h 38generic-y += resource.h
36generic-y += scatterlist.h 39generic-y += scatterlist.h
37generic-y += sections.h 40generic-y += sections.h
@@ -60,5 +63,3 @@ generic-y += unaligned.h
60generic-y += user.h 63generic-y += user.h
61generic-y += vga.h 64generic-y += vga.h
62generic-y += xor.h 65generic-y += xor.h
63generic-y += preempt.h
64generic-y += hash.h
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 7f669853317a..a8fee078b92f 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -5,3 +5,4 @@ genhdr-y += unistd_64.h
5genhdr-y += unistd_x32.h 5genhdr-y += unistd_x32.h
6 6
7generic-y += clkdev.h 7generic-y += clkdev.h
8generic-y += mcs_spinlock.h
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 0a337e4a8370..c3d20ba6eb86 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += errno.h
9generic-y += exec.h 9generic-y += exec.h
10generic-y += fcntl.h 10generic-y += fcntl.h
11generic-y += hardirq.h 11generic-y += hardirq.h
12generic-y += hash.h
12generic-y += ioctl.h 13generic-y += ioctl.h
13generic-y += irq_regs.h 14generic-y += irq_regs.h
14generic-y += kdebug.h 15generic-y += kdebug.h
@@ -17,7 +18,9 @@ generic-y += kvm_para.h
17generic-y += linkage.h 18generic-y += linkage.h
18generic-y += local.h 19generic-y += local.h
19generic-y += local64.h 20generic-y += local64.h
21generic-y += mcs_spinlock.h
20generic-y += percpu.h 22generic-y += percpu.h
23generic-y += preempt.h
21generic-y += resource.h 24generic-y += resource.h
22generic-y += scatterlist.h 25generic-y += scatterlist.h
23generic-y += sections.h 26generic-y += sections.h
@@ -27,5 +30,3 @@ generic-y += termios.h
27generic-y += topology.h 30generic-y += topology.h
28generic-y += trace_clock.h 31generic-y += trace_clock.h
29generic-y += xor.h 32generic-y += xor.h
30generic-y += preempt.h
31generic-y += hash.h
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index d8a55e87877f..0ffb0cbe2823 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -39,17 +39,10 @@
39 lock_acquire(&(l)->dep_map, s, t, r, c, n, i) 39 lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
40# define __rel(l, n, i) \ 40# define __rel(l, n, i) \
41 lock_release(&(l)->dep_map, n, i) 41 lock_release(&(l)->dep_map, n, i)
42# ifdef CONFIG_PROVE_LOCKING 42#define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
43# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i) 43#define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
44# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i) 44#define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
45# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i) 45#define lockdep_release(l, n, i) __rel(l, n, i)
46# define lockdep_release(l, n, i) __rel(l, n, i)
47# else
48# define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
49# define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
50# define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
51# define lockdep_release(l, n, i) __rel(l, n, i)
52# endif
53#else 46#else
54# define lockdep_acquire(l, s, t, i) do { } while (0) 47# define lockdep_acquire(l, s, t, i) do { } while (0)
55# define lockdep_acquire_nest(l, s, t, n, i) do { } while (0) 48# define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h
new file mode 100644
index 000000000000..10cd4ffc6ba2
--- /dev/null
+++ b/include/asm-generic/mcs_spinlock.h
@@ -0,0 +1,13 @@
1#ifndef __ASM_MCS_SPINLOCK_H
2#define __ASM_MCS_SPINLOCK_H
3
4/*
5 * Architectures can define their own:
6 *
7 * arch_mcs_spin_lock_contended(l)
8 * arch_mcs_spin_unlock_contended(l)
9 *
10 * See kernel/locking/mcs_spinlock.c.
11 */
12
13#endif /* __ASM_MCS_SPINLOCK_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index b0d95cac826e..6435f46d6e13 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -55,7 +55,11 @@ union futex_key {
55#ifdef CONFIG_FUTEX 55#ifdef CONFIG_FUTEX
56extern void exit_robust_list(struct task_struct *curr); 56extern void exit_robust_list(struct task_struct *curr);
57extern void exit_pi_state_list(struct task_struct *curr); 57extern void exit_pi_state_list(struct task_struct *curr);
58#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
59#define futex_cmpxchg_enabled 1
60#else
58extern int futex_cmpxchg_enabled; 61extern int futex_cmpxchg_enabled;
62#endif
59#else 63#else
60static inline void exit_robust_list(struct task_struct *curr) 64static inline void exit_robust_list(struct task_struct *curr)
61{ 65{
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 92b1bfc5da60..060e5137fd80 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -252,9 +252,9 @@ struct held_lock {
252 unsigned int trylock:1; /* 16 bits */ 252 unsigned int trylock:1; /* 16 bits */
253 253
254 unsigned int read:2; /* see lock_acquire() comment */ 254 unsigned int read:2; /* see lock_acquire() comment */
255 unsigned int check:2; /* see lock_acquire() comment */ 255 unsigned int check:1; /* see lock_acquire() comment */
256 unsigned int hardirqs_off:1; 256 unsigned int hardirqs_off:1;
257 unsigned int references:11; /* 32 bits */ 257 unsigned int references:12; /* 32 bits */
258}; 258};
259 259
260/* 260/*
@@ -303,7 +303,7 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
303 (lock)->dep_map.key, sub) 303 (lock)->dep_map.key, sub)
304 304
305#define lockdep_set_novalidate_class(lock) \ 305#define lockdep_set_novalidate_class(lock) \
306 lockdep_set_class(lock, &__lockdep_no_validate__) 306 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
307/* 307/*
308 * Compare locking classes 308 * Compare locking classes
309 */ 309 */
@@ -326,9 +326,8 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
326 * 326 *
327 * Values for check: 327 * Values for check:
328 * 328 *
329 * 0: disabled 329 * 0: simple checks (freeing, held-at-exit-time, etc.)
330 * 1: simple checks (freeing, held-at-exit-time, etc.) 330 * 1: full validation
331 * 2: full validation
332 */ 331 */
333extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 332extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
334 int trylock, int read, int check, 333 int trylock, int read, int check,
@@ -479,15 +478,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
479 * on the per lock-class debug mode: 478 * on the per lock-class debug mode:
480 */ 479 */
481 480
482#ifdef CONFIG_PROVE_LOCKING 481#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
483 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) 482#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
484 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i) 483#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
485 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i)
486#else
487 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
488 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
489 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
490#endif
491 484
492#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 485#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
493#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 486#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
@@ -518,13 +511,13 @@ static inline void print_irqtrace_events(struct task_struct *curr)
518# define might_lock(lock) \ 511# define might_lock(lock) \
519do { \ 512do { \
520 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 513 typecheck(struct lockdep_map *, &(lock)->dep_map); \
521 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ 514 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
522 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 515 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
523} while (0) 516} while (0)
524# define might_lock_read(lock) \ 517# define might_lock_read(lock) \
525do { \ 518do { \
526 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 519 typecheck(struct lockdep_map *, &(lock)->dep_map); \
527 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ 520 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
528 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 521 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
529} while (0) 522} while (0)
530#else 523#else
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index d3181936c138..11692dea18aa 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -46,6 +46,7 @@
46 * - detects multi-task circular deadlocks and prints out all affected 46 * - detects multi-task circular deadlocks and prints out all affected
47 * locks and tasks (and only those tasks) 47 * locks and tasks (and only those tasks)
48 */ 48 */
49struct optimistic_spin_queue;
49struct mutex { 50struct mutex {
50 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
51 atomic_t count; 52 atomic_t count;
@@ -55,7 +56,7 @@ struct mutex {
55 struct task_struct *owner; 56 struct task_struct *owner;
56#endif 57#endif
57#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
58 void *spin_mlock; /* Spinner MCS lock */ 59 struct optimistic_spin_queue *osq; /* Spinner MCS lock */
59#endif 60#endif
60#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
61 const char *name; 62 const char *name;
@@ -179,4 +180,4 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
179# define arch_mutex_cpu_relax() cpu_relax() 180# define arch_mutex_cpu_relax() cpu_relax()
180#endif 181#endif
181 182
182#endif 183#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 72bf3a01a4ee..adff3c99dcaa 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -314,7 +314,7 @@ static inline bool rcu_lockdep_current_cpu_online(void)
314 314
315static inline void rcu_lock_acquire(struct lockdep_map *map) 315static inline void rcu_lock_acquire(struct lockdep_map *map)
316{ 316{
317 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); 317 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
318} 318}
319 319
320static inline void rcu_lock_release(struct lockdep_map *map) 320static inline void rcu_lock_release(struct lockdep_map *map)
diff --git a/init/Kconfig b/init/Kconfig
index 009a797dd242..d56cb03c1b49 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1387,6 +1387,13 @@ config FUTEX
1387 support for "fast userspace mutexes". The resulting kernel may not 1387 support for "fast userspace mutexes". The resulting kernel may not
1388 run glibc-based applications correctly. 1388 run glibc-based applications correctly.
1389 1389
1390config HAVE_FUTEX_CMPXCHG
1391 bool
1392 help
1393 Architectures should select this if futex_atomic_cmpxchg_inatomic()
1394 is implemented and always working. This removes a couple of runtime
1395 checks.
1396
1390config EPOLL 1397config EPOLL
1391 bool "Enable eventpoll support" if EXPERT 1398 bool "Enable eventpoll support" if EXPERT
1392 default y 1399 default y
diff --git a/kernel/futex.c b/kernel/futex.c
index 08ec814ad9d2..67dacaf93e56 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -157,7 +157,9 @@
157 * enqueue. 157 * enqueue.
158 */ 158 */
159 159
160#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
160int __read_mostly futex_cmpxchg_enabled; 161int __read_mostly futex_cmpxchg_enabled;
162#endif
161 163
162/* 164/*
163 * Futex flags used to encode options to functions and preserve them across 165 * Futex flags used to encode options to functions and preserve them across
@@ -2875,9 +2877,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2875 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); 2877 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2876} 2878}
2877 2879
2878static int __init futex_init(void) 2880static void __init futex_detect_cmpxchg(void)
2879{ 2881{
2882#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
2880 u32 curval; 2883 u32 curval;
2884
2885 /*
2886 * This will fail and we want it. Some arch implementations do
2887 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2888 * functionality. We want to know that before we call in any
2889 * of the complex code paths. Also we want to prevent
2890 * registration of robust lists in that case. NULL is
2891 * guaranteed to fault and we get -EFAULT on functional
2892 * implementation, the non-functional ones will return
2893 * -ENOSYS.
2894 */
2895 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2896 futex_cmpxchg_enabled = 1;
2897#endif
2898}
2899
2900static int __init futex_init(void)
2901{
2881 unsigned int futex_shift; 2902 unsigned int futex_shift;
2882 unsigned long i; 2903 unsigned long i;
2883 2904
@@ -2893,18 +2914,8 @@ static int __init futex_init(void)
2893 &futex_shift, NULL, 2914 &futex_shift, NULL,
2894 futex_hashsize, futex_hashsize); 2915 futex_hashsize, futex_hashsize);
2895 futex_hashsize = 1UL << futex_shift; 2916 futex_hashsize = 1UL << futex_shift;
2896 /* 2917
2897 * This will fail and we want it. Some arch implementations do 2918 futex_detect_cmpxchg();
2898 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2899 * functionality. We want to know that before we call in any
2900 * of the complex code paths. Also we want to prevent
2901 * registration of robust lists in that case. NULL is
2902 * guaranteed to fault and we get -EFAULT on functional
2903 * implementation, the non-functional ones will return
2904 * -ENOSYS.
2905 */
2906 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2907 futex_cmpxchg_enabled = 1;
2908 2919
2909 for (i = 0; i < futex_hashsize; i++) { 2920 for (i = 0; i < futex_hashsize; i++) {
2910 atomic_set(&futex_queues[i].waiters, 0); 2921 atomic_set(&futex_queues[i].waiters, 0);
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index baab8e5e7f66..2a9ee96ecf00 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -1,5 +1,5 @@
1 1
2obj-y += mutex.o semaphore.o rwsem.o lglock.o 2obj-y += mutex.o semaphore.o rwsem.o lglock.o mcs_spinlock.o
3 3
4ifdef CONFIG_FUNCTION_TRACER 4ifdef CONFIG_FUNCTION_TRACER
5CFLAGS_REMOVE_lockdep.o = -pg 5CFLAGS_REMOVE_lockdep.o = -pg
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index eb8a54783fa0..bf0c6b0dd9c5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1936,12 +1936,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1936 1936
1937 for (;;) { 1937 for (;;) {
1938 int distance = curr->lockdep_depth - depth + 1; 1938 int distance = curr->lockdep_depth - depth + 1;
1939 hlock = curr->held_locks + depth-1; 1939 hlock = curr->held_locks + depth - 1;
1940 /* 1940 /*
1941 * Only non-recursive-read entries get new dependencies 1941 * Only non-recursive-read entries get new dependencies
1942 * added: 1942 * added:
1943 */ 1943 */
1944 if (hlock->read != 2) { 1944 if (hlock->read != 2 && hlock->check) {
1945 if (!check_prev_add(curr, hlock, next, 1945 if (!check_prev_add(curr, hlock, next,
1946 distance, trylock_loop)) 1946 distance, trylock_loop))
1947 return 0; 1947 return 0;
@@ -2098,7 +2098,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2098 * (If lookup_chain_cache() returns with 1 it acquires 2098 * (If lookup_chain_cache() returns with 1 it acquires
2099 * graph_lock for us) 2099 * graph_lock for us)
2100 */ 2100 */
2101 if (!hlock->trylock && (hlock->check == 2) && 2101 if (!hlock->trylock && hlock->check &&
2102 lookup_chain_cache(curr, hlock, chain_key)) { 2102 lookup_chain_cache(curr, hlock, chain_key)) {
2103 /* 2103 /*
2104 * Check whether last held lock: 2104 * Check whether last held lock:
@@ -2517,7 +2517,7 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
2517 2517
2518 BUG_ON(usage_bit >= LOCK_USAGE_STATES); 2518 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2519 2519
2520 if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys) 2520 if (!hlock->check)
2521 continue; 2521 continue;
2522 2522
2523 if (!mark_lock(curr, hlock, usage_bit)) 2523 if (!mark_lock(curr, hlock, usage_bit))
@@ -3055,9 +3055,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3055 int class_idx; 3055 int class_idx;
3056 u64 chain_key; 3056 u64 chain_key;
3057 3057
3058 if (!prove_locking)
3059 check = 1;
3060
3061 if (unlikely(!debug_locks)) 3058 if (unlikely(!debug_locks))
3062 return 0; 3059 return 0;
3063 3060
@@ -3069,8 +3066,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3069 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3066 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3070 return 0; 3067 return 0;
3071 3068
3072 if (lock->key == &__lockdep_no_validate__) 3069 if (!prove_locking || lock->key == &__lockdep_no_validate__)
3073 check = 1; 3070 check = 0;
3074 3071
3075 if (subclass < NR_LOCKDEP_CACHING_CLASSES) 3072 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3076 class = lock->class_cache[subclass]; 3073 class = lock->class_cache[subclass];
@@ -3138,7 +3135,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3138 hlock->holdtime_stamp = lockstat_clock(); 3135 hlock->holdtime_stamp = lockstat_clock();
3139#endif 3136#endif
3140 3137
3141 if (check == 2 && !mark_irqflags(curr, hlock)) 3138 if (check && !mark_irqflags(curr, hlock))
3142 return 0; 3139 return 0;
3143 3140
3144 /* mark it as used: */ 3141 /* mark it as used: */
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
new file mode 100644
index 000000000000..838dc9e00669
--- /dev/null
+++ b/kernel/locking/mcs_spinlock.c
@@ -0,0 +1,178 @@
1
2#include <linux/percpu.h>
3#include <linux/mutex.h>
4#include <linux/sched.h>
5#include "mcs_spinlock.h"
6
7#ifdef CONFIG_SMP
8
9/*
10 * An MCS like lock especially tailored for optimistic spinning for sleeping
11 * lock implementations (mutex, rwsem, etc).
12 *
13 * Using a single mcs node per CPU is safe because sleeping locks should not be
14 * called from interrupt context and we have preemption disabled while
15 * spinning.
16 */
17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
18
19/*
20 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
21 * Can return NULL in case we were the last queued and we updated @lock instead.
22 */
23static inline struct optimistic_spin_queue *
24osq_wait_next(struct optimistic_spin_queue **lock,
25 struct optimistic_spin_queue *node,
26 struct optimistic_spin_queue *prev)
27{
28 struct optimistic_spin_queue *next = NULL;
29
30 for (;;) {
31 if (*lock == node && cmpxchg(lock, node, prev) == node) {
32 /*
33 * We were the last queued, we moved @lock back. @prev
34 * will now observe @lock and will complete its
35 * unlock()/unqueue().
36 */
37 break;
38 }
39
40 /*
41 * We must xchg() the @node->next value, because if we were to
42 * leave it in, a concurrent unlock()/unqueue() from
43 * @node->next might complete Step-A and think its @prev is
44 * still valid.
45 *
46 * If the concurrent unlock()/unqueue() wins the race, we'll
47 * wait for either @lock to point to us, through its Step-B, or
48 * wait for a new @node->next from its Step-C.
49 */
50 if (node->next) {
51 next = xchg(&node->next, NULL);
52 if (next)
53 break;
54 }
55
56 arch_mutex_cpu_relax();
57 }
58
59 return next;
60}
61
62bool osq_lock(struct optimistic_spin_queue **lock)
63{
64 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
65 struct optimistic_spin_queue *prev, *next;
66
67 node->locked = 0;
68 node->next = NULL;
69
70 node->prev = prev = xchg(lock, node);
71 if (likely(prev == NULL))
72 return true;
73
74 ACCESS_ONCE(prev->next) = node;
75
76 /*
77 * Normally @prev is untouchable after the above store; because at that
78 * moment unlock can proceed and wipe the node element from stack.
79 *
80 * However, since our nodes are static per-cpu storage, we're
81 * guaranteed their existence -- this allows us to apply
82 * cmpxchg in an attempt to undo our queueing.
83 */
84
85 while (!smp_load_acquire(&node->locked)) {
86 /*
87 * If we need to reschedule bail... so we can block.
88 */
89 if (need_resched())
90 goto unqueue;
91
92 arch_mutex_cpu_relax();
93 }
94 return true;
95
96unqueue:
97 /*
98 * Step - A -- stabilize @prev
99 *
100 * Undo our @prev->next assignment; this will make @prev's
101 * unlock()/unqueue() wait for a next pointer since @lock points to us
102 * (or later).
103 */
104
105 for (;;) {
106 if (prev->next == node &&
107 cmpxchg(&prev->next, node, NULL) == node)
108 break;
109
110 /*
111 * We can only fail the cmpxchg() racing against an unlock(),
112 * in which case we should observe @node->locked becomming
113 * true.
114 */
115 if (smp_load_acquire(&node->locked))
116 return true;
117
118 arch_mutex_cpu_relax();
119
120 /*
121 * Or we race against a concurrent unqueue()'s step-B, in which
122 * case its step-C will write us a new @node->prev pointer.
123 */
124 prev = ACCESS_ONCE(node->prev);
125 }
126
127 /*
128 * Step - B -- stabilize @next
129 *
130 * Similar to unlock(), wait for @node->next or move @lock from @node
131 * back to @prev.
132 */
133
134 next = osq_wait_next(lock, node, prev);
135 if (!next)
136 return false;
137
138 /*
139 * Step - C -- unlink
140 *
141 * @prev is stable because its still waiting for a new @prev->next
142 * pointer, @next is stable because our @node->next pointer is NULL and
143 * it will wait in Step-A.
144 */
145
146 ACCESS_ONCE(next->prev) = prev;
147 ACCESS_ONCE(prev->next) = next;
148
149 return false;
150}
151
152void osq_unlock(struct optimistic_spin_queue **lock)
153{
154 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
155 struct optimistic_spin_queue *next;
156
157 /*
158 * Fast path for the uncontended case.
159 */
160 if (likely(cmpxchg(lock, node, NULL) == node))
161 return;
162
163 /*
164 * Second most likely case.
165 */
166 next = xchg(&node->next, NULL);
167 if (next) {
168 ACCESS_ONCE(next->locked) = 1;
169 return;
170 }
171
172 next = osq_wait_next(lock, node, NULL);
173 if (next)
174 ACCESS_ONCE(next->locked) = 1;
175}
176
177#endif
178
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
new file mode 100644
index 000000000000..a2dbac4aca6b
--- /dev/null
+++ b/kernel/locking/mcs_spinlock.h
@@ -0,0 +1,129 @@
1/*
2 * MCS lock defines
3 *
4 * This file contains the main data structure and API definitions of MCS lock.
5 *
6 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
7 * with the desirable properties of being fair, and with each cpu trying
8 * to acquire the lock spinning on a local variable.
9 * It avoids expensive cache bouncings that common test-and-set spin-lock
10 * implementations incur.
11 */
12#ifndef __LINUX_MCS_SPINLOCK_H
13#define __LINUX_MCS_SPINLOCK_H
14
15#include <asm/mcs_spinlock.h>
16
17struct mcs_spinlock {
18 struct mcs_spinlock *next;
19 int locked; /* 1 if lock acquired */
20};
21
22#ifndef arch_mcs_spin_lock_contended
23/*
24 * Using smp_load_acquire() provides a memory barrier that ensures
25 * subsequent operations happen after the lock is acquired.
26 */
27#define arch_mcs_spin_lock_contended(l) \
28do { \
29 while (!(smp_load_acquire(l))) \
30 arch_mutex_cpu_relax(); \
31} while (0)
32#endif
33
34#ifndef arch_mcs_spin_unlock_contended
35/*
36 * smp_store_release() provides a memory barrier to ensure all
37 * operations in the critical section has been completed before
38 * unlocking.
39 */
40#define arch_mcs_spin_unlock_contended(l) \
41 smp_store_release((l), 1)
42#endif
43
44/*
45 * Note: the smp_load_acquire/smp_store_release pair is not
46 * sufficient to form a full memory barrier across
47 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
48 * For applications that need a full barrier across multiple cpus
49 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
50 * used after mcs_lock.
51 */
52
53/*
54 * In order to acquire the lock, the caller should declare a local node and
55 * pass a reference of the node to this function in addition to the lock.
56 * If the lock has already been acquired, then this will proceed to spin
57 * on this node->locked until the previous lock holder sets the node->locked
58 * in mcs_spin_unlock().
59 *
60 * We don't inline mcs_spin_lock() so that perf can correctly account for the
61 * time spent in this lock function.
62 */
63static inline
64void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
65{
66 struct mcs_spinlock *prev;
67
68 /* Init node */
69 node->locked = 0;
70 node->next = NULL;
71
72 prev = xchg(lock, node);
73 if (likely(prev == NULL)) {
74 /*
75 * Lock acquired, don't need to set node->locked to 1. Threads
76 * only spin on its own node->locked value for lock acquisition.
77 * However, since this thread can immediately acquire the lock
78 * and does not proceed to spin on its own node->locked, this
79 * value won't be used. If a debug mode is needed to
80 * audit lock status, then set node->locked value here.
81 */
82 return;
83 }
84 ACCESS_ONCE(prev->next) = node;
85
86 /* Wait until the lock holder passes the lock down. */
87 arch_mcs_spin_lock_contended(&node->locked);
88}
89
90/*
91 * Releases the lock. The caller should pass in the corresponding node that
92 * was used to acquire the lock.
93 */
94static inline
95void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
96{
97 struct mcs_spinlock *next = ACCESS_ONCE(node->next);
98
99 if (likely(!next)) {
100 /*
101 * Release the lock by setting it to NULL
102 */
103 if (likely(cmpxchg(lock, node, NULL) == node))
104 return;
105 /* Wait until the next pointer is set */
106 while (!(next = ACCESS_ONCE(node->next)))
107 arch_mutex_cpu_relax();
108 }
109
110 /* Pass lock to next waiter. */
111 arch_mcs_spin_unlock_contended(&next->locked);
112}
113
114/*
115 * Cancellable version of the MCS lock above.
116 *
117 * Intended for adaptive spinning of sleeping locks:
118 * mutex_lock()/rwsem_down_{read,write}() etc.
119 */
120
121struct optimistic_spin_queue {
122 struct optimistic_spin_queue *next, *prev;
123 int locked; /* 1 if lock acquired */
124};
125
126extern bool osq_lock(struct optimistic_spin_queue **lock);
127extern void osq_unlock(struct optimistic_spin_queue **lock);
128
129#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index faf6f5b53e77..e1191c996c59 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -83,6 +83,12 @@ void debug_mutex_unlock(struct mutex *lock)
83 83
84 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 84 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
85 mutex_clear_owner(lock); 85 mutex_clear_owner(lock);
86
87 /*
88 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
89 * mutexes so that we can do it here after we've verified state.
90 */
91 atomic_set(&lock->count, 1);
86} 92}
87 93
88void debug_mutex_init(struct mutex *lock, const char *name, 94void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 4dd6e4c219de..14fe72cc8ce7 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -25,6 +25,7 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/debug_locks.h> 27#include <linux/debug_locks.h>
28#include "mcs_spinlock.h"
28 29
29/* 30/*
30 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -33,6 +34,13 @@
33#ifdef CONFIG_DEBUG_MUTEXES 34#ifdef CONFIG_DEBUG_MUTEXES
34# include "mutex-debug.h" 35# include "mutex-debug.h"
35# include <asm-generic/mutex-null.h> 36# include <asm-generic/mutex-null.h>
37/*
38 * Must be 0 for the debug case so we do not do the unlock outside of the
39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
40 * case.
41 */
42# undef __mutex_slowpath_needs_to_unlock
43# define __mutex_slowpath_needs_to_unlock() 0
36#else 44#else
37# include "mutex.h" 45# include "mutex.h"
38# include <asm/mutex.h> 46# include <asm/mutex.h>
@@ -52,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
52 INIT_LIST_HEAD(&lock->wait_list); 60 INIT_LIST_HEAD(&lock->wait_list);
53 mutex_clear_owner(lock); 61 mutex_clear_owner(lock);
54#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
55 lock->spin_mlock = NULL; 63 lock->osq = NULL;
56#endif 64#endif
57 65
58 debug_mutex_init(lock, name, key); 66 debug_mutex_init(lock, name, key);
@@ -111,54 +119,7 @@ EXPORT_SYMBOL(mutex_lock);
111 * more or less simultaneously, the spinners need to acquire a MCS lock 119 * more or less simultaneously, the spinners need to acquire a MCS lock
112 * first before spinning on the owner field. 120 * first before spinning on the owner field.
113 * 121 *
114 * We don't inline mspin_lock() so that perf can correctly account for the
115 * time spent in this lock function.
116 */ 122 */
117struct mspin_node {
118 struct mspin_node *next ;
119 int locked; /* 1 if lock acquired */
120};
121#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
122
123static noinline
124void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
125{
126 struct mspin_node *prev;
127
128 /* Init node */
129 node->locked = 0;
130 node->next = NULL;
131
132 prev = xchg(lock, node);
133 if (likely(prev == NULL)) {
134 /* Lock acquired */
135 node->locked = 1;
136 return;
137 }
138 ACCESS_ONCE(prev->next) = node;
139 smp_wmb();
140 /* Wait until the lock holder passes the lock down */
141 while (!ACCESS_ONCE(node->locked))
142 arch_mutex_cpu_relax();
143}
144
145static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
146{
147 struct mspin_node *next = ACCESS_ONCE(node->next);
148
149 if (likely(!next)) {
150 /*
151 * Release the lock by setting it to NULL
152 */
153 if (cmpxchg(lock, node, NULL) == node)
154 return;
155 /* Wait until the next pointer is set */
156 while (!(next = ACCESS_ONCE(node->next)))
157 arch_mutex_cpu_relax();
158 }
159 ACCESS_ONCE(next->locked) = 1;
160 smp_wmb();
161}
162 123
163/* 124/*
164 * Mutex spinning code migrated from kernel/sched/core.c 125 * Mutex spinning code migrated from kernel/sched/core.c
@@ -212,6 +173,9 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
212 struct task_struct *owner; 173 struct task_struct *owner;
213 int retval = 1; 174 int retval = 1;
214 175
176 if (need_resched())
177 return 0;
178
215 rcu_read_lock(); 179 rcu_read_lock();
216 owner = ACCESS_ONCE(lock->owner); 180 owner = ACCESS_ONCE(lock->owner);
217 if (owner) 181 if (owner)
@@ -446,9 +410,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
446 if (!mutex_can_spin_on_owner(lock)) 410 if (!mutex_can_spin_on_owner(lock))
447 goto slowpath; 411 goto slowpath;
448 412
413 if (!osq_lock(&lock->osq))
414 goto slowpath;
415
449 for (;;) { 416 for (;;) {
450 struct task_struct *owner; 417 struct task_struct *owner;
451 struct mspin_node node;
452 418
453 if (use_ww_ctx && ww_ctx->acquired > 0) { 419 if (use_ww_ctx && ww_ctx->acquired > 0) {
454 struct ww_mutex *ww; 420 struct ww_mutex *ww;
@@ -463,19 +429,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
463 * performed the optimistic spinning cannot be done. 429 * performed the optimistic spinning cannot be done.
464 */ 430 */
465 if (ACCESS_ONCE(ww->ctx)) 431 if (ACCESS_ONCE(ww->ctx))
466 goto slowpath; 432 break;
467 } 433 }
468 434
469 /* 435 /*
470 * If there's an owner, wait for it to either 436 * If there's an owner, wait for it to either
471 * release the lock or go to sleep. 437 * release the lock or go to sleep.
472 */ 438 */
473 mspin_lock(MLOCK(lock), &node);
474 owner = ACCESS_ONCE(lock->owner); 439 owner = ACCESS_ONCE(lock->owner);
475 if (owner && !mutex_spin_on_owner(lock, owner)) { 440 if (owner && !mutex_spin_on_owner(lock, owner))
476 mspin_unlock(MLOCK(lock), &node); 441 break;
477 goto slowpath;
478 }
479 442
480 if ((atomic_read(&lock->count) == 1) && 443 if ((atomic_read(&lock->count) == 1) &&
481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 444 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
@@ -488,11 +451,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
488 } 451 }
489 452
490 mutex_set_owner(lock); 453 mutex_set_owner(lock);
491 mspin_unlock(MLOCK(lock), &node); 454 osq_unlock(&lock->osq);
492 preempt_enable(); 455 preempt_enable();
493 return 0; 456 return 0;
494 } 457 }
495 mspin_unlock(MLOCK(lock), &node);
496 458
497 /* 459 /*
498 * When there's no owner, we might have preempted between the 460 * When there's no owner, we might have preempted between the
@@ -501,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
501 * the owner complete. 463 * the owner complete.
502 */ 464 */
503 if (!owner && (need_resched() || rt_task(task))) 465 if (!owner && (need_resched() || rt_task(task)))
504 goto slowpath; 466 break;
505 467
506 /* 468 /*
507 * The cpu_relax() call is a compiler barrier which forces 469 * The cpu_relax() call is a compiler barrier which forces
@@ -511,7 +473,15 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
511 */ 473 */
512 arch_mutex_cpu_relax(); 474 arch_mutex_cpu_relax();
513 } 475 }
476 osq_unlock(&lock->osq);
514slowpath: 477slowpath:
478 /*
479 * If we fell out of the spin path because of need_resched(),
480 * reschedule now, before we try-lock the mutex. This avoids getting
481 * scheduled out right after we obtained the mutex.
482 */
483 if (need_resched())
484 schedule_preempt_disabled();
515#endif 485#endif
516 spin_lock_mutex(&lock->wait_lock, flags); 486 spin_lock_mutex(&lock->wait_lock, flags);
517 487
@@ -717,10 +687,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
717 struct mutex *lock = container_of(lock_count, struct mutex, count); 687 struct mutex *lock = container_of(lock_count, struct mutex, count);
718 unsigned long flags; 688 unsigned long flags;
719 689
720 spin_lock_mutex(&lock->wait_lock, flags);
721 mutex_release(&lock->dep_map, nested, _RET_IP_);
722 debug_mutex_unlock(lock);
723
724 /* 690 /*
725 * some architectures leave the lock unlocked in the fastpath failure 691 * some architectures leave the lock unlocked in the fastpath failure
726 * case, others need to leave it locked. In the later case we have to 692 * case, others need to leave it locked. In the later case we have to
@@ -729,6 +695,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
729 if (__mutex_slowpath_needs_to_unlock()) 695 if (__mutex_slowpath_needs_to_unlock())
730 atomic_set(&lock->count, 1); 696 atomic_set(&lock->count, 1);
731 697
698 spin_lock_mutex(&lock->wait_lock, flags);
699 mutex_release(&lock->dep_map, nested, _RET_IP_);
700 debug_mutex_unlock(lock);
701
732 if (!list_empty(&lock->wait_list)) { 702 if (!list_empty(&lock->wait_list)) {
733 /* get the first entry from the wait-list: */ 703 /* get the first entry from the wait-list: */
734 struct mutex_waiter *waiter = 704 struct mutex_waiter *waiter =