diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-03-31 13:59:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-03-31 13:59:39 -0400 |
commit | 462bf234a82ae1ae9d7628f59bc81022591e1348 (patch) | |
tree | f75eea7864ae7c72c0757d5d090e38f757b5cb2d | |
parent | 455c6fdbd219161bd09b1165f11699d6d73de11c (diff) | |
parent | 6f008e72cd111a119b5d8de8c5438d892aae99eb (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar:
"The biggest change is the MCS spinlock generalization changes from Tim
Chen, Peter Zijlstra, Jason Low et al. There's also lockdep
fixes/enhancements from Oleg Nesterov, in particular a false negative
fix related to lockdep_set_novalidate_class() usage"
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits)
locking/mutex: Fix debug checks
locking/mutexes: Add extra reschedule point
locking/mutexes: Introduce cancelable MCS lock for adaptive spinning
locking/mutexes: Unlock the mutex without the wait_lock
locking/mutexes: Modify the way optimistic spinners are queued
locking/mutexes: Return false if task need_resched() in mutex_can_spin_on_owner()
locking: Move mcs_spinlock.h into kernel/locking/
m68k: Skip futex_atomic_cmpxchg_inatomic() test
futex: Allow architectures to skip futex_atomic_cmpxchg_inatomic() test
Revert "sched/wait: Suppress Sparse 'variable shadowing' warning"
lockdep: Change lockdep_set_novalidate_class() to use _and_name
lockdep: Change mark_held_locks() to check hlock->check instead of lockdep_no_validate
lockdep: Don't create the wrong dependency on hlock->check == 0
lockdep: Make held_lock->check and "int check" argument bool
locking/mcs: Allow architecture specific asm files to be used for contended case
locking/mcs: Order the header files in Kbuild of each architecture in alphabetical order
sched/wait: Suppress Sparse 'variable shadowing' warning
hung_task/Documentation: Fix hung_task_warnings description
locking/mcs: Allow architectures to hook in to contended paths
locking/mcs: Micro-optimize the MCS code, add extra comments
...
46 files changed, 584 insertions, 212 deletions
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index e55124e7c40c..e1d28fbf7570 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
@@ -320,10 +320,11 @@ This file shows up if CONFIG_DETECT_HUNG_TASK is enabled. | |||
320 | 320 | ||
321 | ============================================================== | 321 | ============================================================== |
322 | 322 | ||
323 | hung_task_warning: | 323 | hung_task_warnings: |
324 | 324 | ||
325 | The maximum number of warnings to report. During a check interval | 325 | The maximum number of warnings to report. During a check interval |
326 | When this value is reached, no more the warnings will be reported. | 326 | if a hung task is detected, this value is decreased by 1. |
327 | When this value reaches 0, no more warnings will be reported. | ||
327 | This file shows up if CONFIG_DETECT_HUNG_TASK is enabled. | 328 | This file shows up if CONFIG_DETECT_HUNG_TASK is enabled. |
328 | 329 | ||
329 | -1: report an infinite number of warnings. | 330 | -1: report an infinite number of warnings. |
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index a73a8e208a4a..7736f426ff3b 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild | |||
@@ -1,7 +1,8 @@ | |||
1 | 1 | ||
2 | generic-y += clkdev.h | ||
3 | 2 | ||
3 | generic-y += clkdev.h | ||
4 | generic-y += exec.h | 4 | generic-y += exec.h |
5 | generic-y += trace_clock.h | ||
6 | generic-y += preempt.h | ||
7 | generic-y += hash.h | 5 | generic-y += hash.h |
6 | generic-y += mcs_spinlock.h | ||
7 | generic-y += preempt.h | ||
8 | generic-y += trace_clock.h | ||
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 0d3362991c31..e76fd79f32b0 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild | |||
@@ -1,15 +1,15 @@ | |||
1 | generic-y += auxvec.h | 1 | generic-y += auxvec.h |
2 | generic-y += barrier.h | 2 | generic-y += barrier.h |
3 | generic-y += bugs.h | ||
4 | generic-y += bitsperlong.h | 3 | generic-y += bitsperlong.h |
4 | generic-y += bugs.h | ||
5 | generic-y += clkdev.h | 5 | generic-y += clkdev.h |
6 | generic-y += cputime.h | 6 | generic-y += cputime.h |
7 | generic-y += device.h | 7 | generic-y += device.h |
8 | generic-y += div64.h | 8 | generic-y += div64.h |
9 | generic-y += emergency-restart.h | 9 | generic-y += emergency-restart.h |
10 | generic-y += errno.h | 10 | generic-y += errno.h |
11 | generic-y += fcntl.h | ||
12 | generic-y += fb.h | 11 | generic-y += fb.h |
12 | generic-y += fcntl.h | ||
13 | generic-y += ftrace.h | 13 | generic-y += ftrace.h |
14 | generic-y += hardirq.h | 14 | generic-y += hardirq.h |
15 | generic-y += hash.h | 15 | generic-y += hash.h |
@@ -22,6 +22,7 @@ generic-y += kmap_types.h | |||
22 | generic-y += kvm_para.h | 22 | generic-y += kvm_para.h |
23 | generic-y += local.h | 23 | generic-y += local.h |
24 | generic-y += local64.h | 24 | generic-y += local64.h |
25 | generic-y += mcs_spinlock.h | ||
25 | generic-y += mman.h | 26 | generic-y += mman.h |
26 | generic-y += msgbuf.h | 27 | generic-y += msgbuf.h |
27 | generic-y += param.h | 28 | generic-y += param.h |
@@ -30,6 +31,7 @@ generic-y += pci.h | |||
30 | generic-y += percpu.h | 31 | generic-y += percpu.h |
31 | generic-y += poll.h | 32 | generic-y += poll.h |
32 | generic-y += posix_types.h | 33 | generic-y += posix_types.h |
34 | generic-y += preempt.h | ||
33 | generic-y += resource.h | 35 | generic-y += resource.h |
34 | generic-y += scatterlist.h | 36 | generic-y += scatterlist.h |
35 | generic-y += sembuf.h | 37 | generic-y += sembuf.h |
@@ -48,4 +50,3 @@ generic-y += ucontext.h | |||
48 | generic-y += user.h | 50 | generic-y += user.h |
49 | generic-y += vga.h | 51 | generic-y += vga.h |
50 | generic-y += xor.h | 52 | generic-y += xor.h |
51 | generic-y += preempt.h | ||
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 3278afe2c3ab..23e728ecf8ab 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild | |||
@@ -7,16 +7,19 @@ generic-y += current.h | |||
7 | generic-y += emergency-restart.h | 7 | generic-y += emergency-restart.h |
8 | generic-y += errno.h | 8 | generic-y += errno.h |
9 | generic-y += exec.h | 9 | generic-y += exec.h |
10 | generic-y += hash.h | ||
10 | generic-y += ioctl.h | 11 | generic-y += ioctl.h |
11 | generic-y += ipcbuf.h | 12 | generic-y += ipcbuf.h |
12 | generic-y += irq_regs.h | 13 | generic-y += irq_regs.h |
13 | generic-y += kdebug.h | 14 | generic-y += kdebug.h |
14 | generic-y += local.h | 15 | generic-y += local.h |
15 | generic-y += local64.h | 16 | generic-y += local64.h |
17 | generic-y += mcs_spinlock.h | ||
16 | generic-y += msgbuf.h | 18 | generic-y += msgbuf.h |
17 | generic-y += param.h | 19 | generic-y += param.h |
18 | generic-y += parport.h | 20 | generic-y += parport.h |
19 | generic-y += poll.h | 21 | generic-y += poll.h |
22 | generic-y += preempt.h | ||
20 | generic-y += resource.h | 23 | generic-y += resource.h |
21 | generic-y += sections.h | 24 | generic-y += sections.h |
22 | generic-y += segment.h | 25 | generic-y += segment.h |
@@ -33,5 +36,3 @@ generic-y += termios.h | |||
33 | generic-y += timex.h | 36 | generic-y += timex.h |
34 | generic-y += trace_clock.h | 37 | generic-y += trace_clock.h |
35 | generic-y += unaligned.h | 38 | generic-y += unaligned.h |
36 | generic-y += preempt.h | ||
37 | generic-y += hash.h | ||
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 71c53ecfcc3a..3bdfdda70567 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild | |||
@@ -12,6 +12,7 @@ generic-y += dma.h | |||
12 | generic-y += emergency-restart.h | 12 | generic-y += emergency-restart.h |
13 | generic-y += errno.h | 13 | generic-y += errno.h |
14 | generic-y += ftrace.h | 14 | generic-y += ftrace.h |
15 | generic-y += hash.h | ||
15 | generic-y += hw_irq.h | 16 | generic-y += hw_irq.h |
16 | generic-y += ioctl.h | 17 | generic-y += ioctl.h |
17 | generic-y += ioctls.h | 18 | generic-y += ioctls.h |
@@ -22,12 +23,14 @@ generic-y += kmap_types.h | |||
22 | generic-y += kvm_para.h | 23 | generic-y += kvm_para.h |
23 | generic-y += local.h | 24 | generic-y += local.h |
24 | generic-y += local64.h | 25 | generic-y += local64.h |
26 | generic-y += mcs_spinlock.h | ||
25 | generic-y += mman.h | 27 | generic-y += mman.h |
26 | generic-y += msgbuf.h | 28 | generic-y += msgbuf.h |
27 | generic-y += mutex.h | 29 | generic-y += mutex.h |
28 | generic-y += pci.h | 30 | generic-y += pci.h |
29 | generic-y += poll.h | 31 | generic-y += poll.h |
30 | generic-y += posix_types.h | 32 | generic-y += posix_types.h |
33 | generic-y += preempt.h | ||
31 | generic-y += resource.h | 34 | generic-y += resource.h |
32 | generic-y += scatterlist.h | 35 | generic-y += scatterlist.h |
33 | generic-y += sections.h | 36 | generic-y += sections.h |
@@ -38,8 +41,8 @@ generic-y += shmbuf.h | |||
38 | generic-y += sizes.h | 41 | generic-y += sizes.h |
39 | generic-y += socket.h | 42 | generic-y += socket.h |
40 | generic-y += sockios.h | 43 | generic-y += sockios.h |
41 | generic-y += switch_to.h | ||
42 | generic-y += swab.h | 44 | generic-y += swab.h |
45 | generic-y += switch_to.h | ||
43 | generic-y += termbits.h | 46 | generic-y += termbits.h |
44 | generic-y += termios.h | 47 | generic-y += termios.h |
45 | generic-y += topology.h | 48 | generic-y += topology.h |
@@ -49,5 +52,3 @@ generic-y += unaligned.h | |||
49 | generic-y += user.h | 52 | generic-y += user.h |
50 | generic-y += vga.h | 53 | generic-y += vga.h |
51 | generic-y += xor.h | 54 | generic-y += xor.h |
52 | generic-y += preempt.h | ||
53 | generic-y += hash.h | ||
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index c7c64a63c29f..00a0f3ccd6eb 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild | |||
@@ -1,22 +1,23 @@ | |||
1 | 1 | ||
2 | generic-y += clkdev.h | 2 | generic-y += clkdev.h |
3 | generic-y += cputime.h | 3 | generic-y += cputime.h |
4 | generic-y += delay.h | 4 | generic-y += delay.h |
5 | generic-y += device.h | 5 | generic-y += device.h |
6 | generic-y += div64.h | 6 | generic-y += div64.h |
7 | generic-y += emergency-restart.h | 7 | generic-y += emergency-restart.h |
8 | generic-y += exec.h | 8 | generic-y += exec.h |
9 | generic-y += futex.h | 9 | generic-y += futex.h |
10 | generic-y += preempt.h | 10 | generic-y += hash.h |
11 | generic-y += irq_regs.h | 11 | generic-y += irq_regs.h |
12 | generic-y += param.h | 12 | generic-y += local.h |
13 | generic-y += local.h | 13 | generic-y += local64.h |
14 | generic-y += local64.h | 14 | generic-y += mcs_spinlock.h |
15 | generic-y += percpu.h | 15 | generic-y += param.h |
16 | generic-y += scatterlist.h | 16 | generic-y += percpu.h |
17 | generic-y += sections.h | 17 | generic-y += preempt.h |
18 | generic-y += topology.h | 18 | generic-y += scatterlist.h |
19 | generic-y += trace_clock.h | 19 | generic-y += sections.h |
20 | generic-y += topology.h | ||
21 | generic-y += trace_clock.h | ||
20 | generic-y += vga.h | 22 | generic-y += vga.h |
21 | generic-y += xor.h | 23 | generic-y += xor.h |
22 | generic-y += hash.h | ||
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 359d36fdc247..0d93b9a79ca9 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild | |||
@@ -10,6 +10,7 @@ generic-y += emergency-restart.h | |||
10 | generic-y += errno.h | 10 | generic-y += errno.h |
11 | generic-y += fb.h | 11 | generic-y += fb.h |
12 | generic-y += futex.h | 12 | generic-y += futex.h |
13 | generic-y += hash.h | ||
13 | generic-y += hw_irq.h | 14 | generic-y += hw_irq.h |
14 | generic-y += ioctl.h | 15 | generic-y += ioctl.h |
15 | generic-y += ipcbuf.h | 16 | generic-y += ipcbuf.h |
@@ -17,14 +18,16 @@ generic-y += irq_regs.h | |||
17 | generic-y += kdebug.h | 18 | generic-y += kdebug.h |
18 | generic-y += kmap_types.h | 19 | generic-y += kmap_types.h |
19 | generic-y += kvm_para.h | 20 | generic-y += kvm_para.h |
20 | generic-y += local64.h | ||
21 | generic-y += local.h | 21 | generic-y += local.h |
22 | generic-y += local64.h | ||
23 | generic-y += mcs_spinlock.h | ||
22 | generic-y += mman.h | 24 | generic-y += mman.h |
23 | generic-y += msgbuf.h | 25 | generic-y += msgbuf.h |
24 | generic-y += mutex.h | 26 | generic-y += mutex.h |
25 | generic-y += param.h | 27 | generic-y += param.h |
26 | generic-y += percpu.h | 28 | generic-y += percpu.h |
27 | generic-y += pgalloc.h | 29 | generic-y += pgalloc.h |
30 | generic-y += preempt.h | ||
28 | generic-y += resource.h | 31 | generic-y += resource.h |
29 | generic-y += scatterlist.h | 32 | generic-y += scatterlist.h |
30 | generic-y += sembuf.h | 33 | generic-y += sembuf.h |
@@ -44,5 +47,3 @@ generic-y += ucontext.h | |||
44 | generic-y += unaligned.h | 47 | generic-y += unaligned.h |
45 | generic-y += user.h | 48 | generic-y += user.h |
46 | generic-y += xor.h | 49 | generic-y += xor.h |
47 | generic-y += preempt.h | ||
48 | generic-y += hash.h | ||
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index d73bb85ccdd3..8dbdce8421b0 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild | |||
@@ -15,6 +15,7 @@ generic-y += exec.h | |||
15 | generic-y += fb.h | 15 | generic-y += fb.h |
16 | generic-y += fcntl.h | 16 | generic-y += fcntl.h |
17 | generic-y += futex.h | 17 | generic-y += futex.h |
18 | generic-y += hash.h | ||
18 | generic-y += hw_irq.h | 19 | generic-y += hw_irq.h |
19 | generic-y += io.h | 20 | generic-y += io.h |
20 | generic-y += ioctl.h | 21 | generic-y += ioctl.h |
@@ -24,6 +25,7 @@ generic-y += irq_regs.h | |||
24 | generic-y += kdebug.h | 25 | generic-y += kdebug.h |
25 | generic-y += kmap_types.h | 26 | generic-y += kmap_types.h |
26 | generic-y += local.h | 27 | generic-y += local.h |
28 | generic-y += mcs_spinlock.h | ||
27 | generic-y += mman.h | 29 | generic-y += mman.h |
28 | generic-y += mmu.h | 30 | generic-y += mmu.h |
29 | generic-y += mmu_context.h | 31 | generic-y += mmu_context.h |
@@ -34,6 +36,7 @@ generic-y += percpu.h | |||
34 | generic-y += pgalloc.h | 36 | generic-y += pgalloc.h |
35 | generic-y += poll.h | 37 | generic-y += poll.h |
36 | generic-y += posix_types.h | 38 | generic-y += posix_types.h |
39 | generic-y += preempt.h | ||
37 | generic-y += resource.h | 40 | generic-y += resource.h |
38 | generic-y += scatterlist.h | 41 | generic-y += scatterlist.h |
39 | generic-y += segment.h | 42 | generic-y += segment.h |
@@ -56,5 +59,3 @@ generic-y += ucontext.h | |||
56 | generic-y += user.h | 59 | generic-y += user.h |
57 | generic-y += vga.h | 60 | generic-y += vga.h |
58 | generic-y += xor.h | 61 | generic-y += xor.h |
59 | generic-y += preempt.h | ||
60 | generic-y += hash.h | ||
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index f3fd8768f095..056027f38351 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild | |||
@@ -9,8 +9,9 @@ generic-y += exec.h | |||
9 | generic-y += hash.h | 9 | generic-y += hash.h |
10 | generic-y += kvm_para.h | 10 | generic-y += kvm_para.h |
11 | generic-y += linkage.h | 11 | generic-y += linkage.h |
12 | generic-y += mcs_spinlock.h | ||
12 | generic-y += module.h | 13 | generic-y += module.h |
14 | generic-y += preempt.h | ||
13 | generic-y += trace_clock.h | 15 | generic-y += trace_clock.h |
14 | generic-y += vga.h | 16 | generic-y += vga.h |
15 | generic-y += xor.h | 17 | generic-y += xor.h |
16 | generic-y += preempt.h | ||
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index bc42f14c9c2e..babb9338ebf8 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild | |||
@@ -1,6 +1,7 @@ | |||
1 | 1 | ||
2 | generic-y += clkdev.h | 2 | generic-y += clkdev.h |
3 | generic-y += exec.h | 3 | generic-y += exec.h |
4 | generic-y += trace_clock.h | ||
5 | generic-y += preempt.h | ||
6 | generic-y += hash.h | 4 | generic-y += hash.h |
5 | generic-y += mcs_spinlock.h | ||
6 | generic-y += preempt.h | ||
7 | generic-y += trace_clock.h | ||
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 38ca45d3df1e..eadcc118f950 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild | |||
@@ -25,14 +25,16 @@ generic-y += ipcbuf.h | |||
25 | generic-y += irq_regs.h | 25 | generic-y += irq_regs.h |
26 | generic-y += kdebug.h | 26 | generic-y += kdebug.h |
27 | generic-y += kmap_types.h | 27 | generic-y += kmap_types.h |
28 | generic-y += local64.h | ||
29 | generic-y += local.h | 28 | generic-y += local.h |
29 | generic-y += local64.h | ||
30 | generic-y += mcs_spinlock.h | ||
30 | generic-y += mman.h | 31 | generic-y += mman.h |
31 | generic-y += msgbuf.h | 32 | generic-y += msgbuf.h |
32 | generic-y += pci.h | 33 | generic-y += pci.h |
33 | generic-y += percpu.h | 34 | generic-y += percpu.h |
34 | generic-y += poll.h | 35 | generic-y += poll.h |
35 | generic-y += posix_types.h | 36 | generic-y += posix_types.h |
37 | generic-y += preempt.h | ||
36 | generic-y += resource.h | 38 | generic-y += resource.h |
37 | generic-y += rwsem.h | 39 | generic-y += rwsem.h |
38 | generic-y += scatterlist.h | 40 | generic-y += scatterlist.h |
@@ -45,8 +47,8 @@ generic-y += siginfo.h | |||
45 | generic-y += sizes.h | 47 | generic-y += sizes.h |
46 | generic-y += socket.h | 48 | generic-y += socket.h |
47 | generic-y += sockios.h | 49 | generic-y += sockios.h |
48 | generic-y += statfs.h | ||
49 | generic-y += stat.h | 50 | generic-y += stat.h |
51 | generic-y += statfs.h | ||
50 | generic-y += termbits.h | 52 | generic-y += termbits.h |
51 | generic-y += termios.h | 53 | generic-y += termios.h |
52 | generic-y += topology.h | 54 | generic-y += topology.h |
@@ -55,4 +57,3 @@ generic-y += types.h | |||
55 | generic-y += ucontext.h | 57 | generic-y += ucontext.h |
56 | generic-y += unaligned.h | 58 | generic-y += unaligned.h |
57 | generic-y += xor.h | 59 | generic-y += xor.h |
58 | generic-y += preempt.h | ||
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 283a83154b5e..0da4aa2602ae 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild | |||
@@ -1,8 +1,9 @@ | |||
1 | 1 | ||
2 | generic-y += clkdev.h | 2 | generic-y += clkdev.h |
3 | generic-y += exec.h | 3 | generic-y += exec.h |
4 | generic-y += hash.h | ||
4 | generic-y += kvm_para.h | 5 | generic-y += kvm_para.h |
5 | generic-y += trace_clock.h | 6 | generic-y += mcs_spinlock.h |
6 | generic-y += preempt.h | 7 | generic-y += preempt.h |
8 | generic-y += trace_clock.h | ||
7 | generic-y += vtime.h | 9 | generic-y += vtime.h |
8 | generic-y += hash.h | ||
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index 932435ac4e5c..5825a35b2c56 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild | |||
@@ -1,7 +1,8 @@ | |||
1 | 1 | ||
2 | generic-y += clkdev.h | 2 | generic-y += clkdev.h |
3 | generic-y += exec.h | 3 | generic-y += exec.h |
4 | generic-y += hash.h | ||
5 | generic-y += mcs_spinlock.h | ||
4 | generic-y += module.h | 6 | generic-y += module.h |
5 | generic-y += trace_clock.h | ||
6 | generic-y += preempt.h | 7 | generic-y += preempt.h |
7 | generic-y += hash.h | 8 | generic-y += trace_clock.h |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index dbdd2231c75d..b2e322939256 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -17,6 +17,7 @@ config M68K | |||
17 | select FPU if MMU | 17 | select FPU if MMU |
18 | select ARCH_WANT_IPC_PARSE_VERSION | 18 | select ARCH_WANT_IPC_PARSE_VERSION |
19 | select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE | 19 | select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE |
20 | select HAVE_FUTEX_CMPXCHG if MMU && FUTEX | ||
20 | select HAVE_MOD_ARCH_SPECIFIC | 21 | select HAVE_MOD_ARCH_SPECIFIC |
21 | select MODULES_USE_ELF_REL | 22 | select MODULES_USE_ELF_REL |
22 | select MODULES_USE_ELF_RELA | 23 | select MODULES_USE_ELF_RELA |
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 6fb9e813a910..c67c94a2d672 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild | |||
@@ -14,8 +14,9 @@ generic-y += irq_regs.h | |||
14 | generic-y += kdebug.h | 14 | generic-y += kdebug.h |
15 | generic-y += kmap_types.h | 15 | generic-y += kmap_types.h |
16 | generic-y += kvm_para.h | 16 | generic-y += kvm_para.h |
17 | generic-y += local64.h | ||
18 | generic-y += local.h | 17 | generic-y += local.h |
18 | generic-y += local64.h | ||
19 | generic-y += mcs_spinlock.h | ||
19 | generic-y += mman.h | 20 | generic-y += mman.h |
20 | generic-y += mutex.h | 21 | generic-y += mutex.h |
21 | generic-y += percpu.h | 22 | generic-y += percpu.h |
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index b716d807c2ec..c29ead89a317 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild | |||
@@ -13,6 +13,7 @@ generic-y += fb.h | |||
13 | generic-y += fcntl.h | 13 | generic-y += fcntl.h |
14 | generic-y += futex.h | 14 | generic-y += futex.h |
15 | generic-y += hardirq.h | 15 | generic-y += hardirq.h |
16 | generic-y += hash.h | ||
16 | generic-y += hw_irq.h | 17 | generic-y += hw_irq.h |
17 | generic-y += ioctl.h | 18 | generic-y += ioctl.h |
18 | generic-y += ioctls.h | 19 | generic-y += ioctls.h |
@@ -23,6 +24,7 @@ generic-y += kmap_types.h | |||
23 | generic-y += kvm_para.h | 24 | generic-y += kvm_para.h |
24 | generic-y += local.h | 25 | generic-y += local.h |
25 | generic-y += local64.h | 26 | generic-y += local64.h |
27 | generic-y += mcs_spinlock.h | ||
26 | generic-y += msgbuf.h | 28 | generic-y += msgbuf.h |
27 | generic-y += mutex.h | 29 | generic-y += mutex.h |
28 | generic-y += param.h | 30 | generic-y += param.h |
@@ -30,6 +32,7 @@ generic-y += pci.h | |||
30 | generic-y += percpu.h | 32 | generic-y += percpu.h |
31 | generic-y += poll.h | 33 | generic-y += poll.h |
32 | generic-y += posix_types.h | 34 | generic-y += posix_types.h |
35 | generic-y += preempt.h | ||
33 | generic-y += scatterlist.h | 36 | generic-y += scatterlist.h |
34 | generic-y += sections.h | 37 | generic-y += sections.h |
35 | generic-y += sembuf.h | 38 | generic-y += sembuf.h |
@@ -52,5 +55,3 @@ generic-y += unaligned.h | |||
52 | generic-y += user.h | 55 | generic-y += user.h |
53 | generic-y += vga.h | 56 | generic-y += vga.h |
54 | generic-y += xor.h | 57 | generic-y += xor.h |
55 | generic-y += preempt.h | ||
56 | generic-y += hash.h | ||
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 2b98bc73642a..1f590ab8f323 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild | |||
@@ -3,6 +3,7 @@ generic-y += barrier.h | |||
3 | generic-y += clkdev.h | 3 | generic-y += clkdev.h |
4 | generic-y += exec.h | 4 | generic-y += exec.h |
5 | generic-y += hash.h | 5 | generic-y += hash.h |
6 | generic-y += trace_clock.h | 6 | generic-y += mcs_spinlock.h |
7 | generic-y += syscalls.h | ||
8 | generic-y += preempt.h | 7 | generic-y += preempt.h |
8 | generic-y += syscalls.h | ||
9 | generic-y += trace_clock.h | ||
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 2d7f65052c1f..05439187891d 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild | |||
@@ -2,16 +2,17 @@ | |||
2 | generic-y += cputime.h | 2 | generic-y += cputime.h |
3 | generic-y += current.h | 3 | generic-y += current.h |
4 | generic-y += emergency-restart.h | 4 | generic-y += emergency-restart.h |
5 | generic-y += hash.h | ||
5 | generic-y += local64.h | 6 | generic-y += local64.h |
7 | generic-y += mcs_spinlock.h | ||
6 | generic-y += mutex.h | 8 | generic-y += mutex.h |
7 | generic-y += parport.h | 9 | generic-y += parport.h |
8 | generic-y += percpu.h | 10 | generic-y += percpu.h |
11 | generic-y += preempt.h | ||
9 | generic-y += scatterlist.h | 12 | generic-y += scatterlist.h |
10 | generic-y += sections.h | 13 | generic-y += sections.h |
11 | generic-y += segment.h | 14 | generic-y += segment.h |
12 | generic-y += serial.h | 15 | generic-y += serial.h |
13 | generic-y += trace_clock.h | 16 | generic-y += trace_clock.h |
14 | generic-y += preempt.h | ||
15 | generic-y += ucontext.h | 17 | generic-y += ucontext.h |
16 | generic-y += xor.h | 18 | generic-y += xor.h |
17 | generic-y += hash.h | ||
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 992e989ab785..cbc6b9bf45da 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild | |||
@@ -3,5 +3,6 @@ generic-y += barrier.h | |||
3 | generic-y += clkdev.h | 3 | generic-y += clkdev.h |
4 | generic-y += exec.h | 4 | generic-y += exec.h |
5 | generic-y += hash.h | 5 | generic-y += hash.h |
6 | generic-y += trace_clock.h | 6 | generic-y += mcs_spinlock.h |
7 | generic-y += preempt.h | 7 | generic-y += preempt.h |
8 | generic-y += trace_clock.h | ||
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 2e40f1ca8667..480af0d9c2f5 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild | |||
@@ -10,8 +10,8 @@ generic-y += bugs.h | |||
10 | generic-y += cacheflush.h | 10 | generic-y += cacheflush.h |
11 | generic-y += checksum.h | 11 | generic-y += checksum.h |
12 | generic-y += clkdev.h | 12 | generic-y += clkdev.h |
13 | generic-y += cmpxchg.h | ||
14 | generic-y += cmpxchg-local.h | 13 | generic-y += cmpxchg-local.h |
14 | generic-y += cmpxchg.h | ||
15 | generic-y += cputime.h | 15 | generic-y += cputime.h |
16 | generic-y += current.h | 16 | generic-y += current.h |
17 | generic-y += device.h | 17 | generic-y += device.h |
@@ -25,6 +25,7 @@ generic-y += fcntl.h | |||
25 | generic-y += ftrace.h | 25 | generic-y += ftrace.h |
26 | generic-y += futex.h | 26 | generic-y += futex.h |
27 | generic-y += hardirq.h | 27 | generic-y += hardirq.h |
28 | generic-y += hash.h | ||
28 | generic-y += hw_irq.h | 29 | generic-y += hw_irq.h |
29 | generic-y += ioctl.h | 30 | generic-y += ioctl.h |
30 | generic-y += ioctls.h | 31 | generic-y += ioctls.h |
@@ -34,6 +35,7 @@ generic-y += kdebug.h | |||
34 | generic-y += kmap_types.h | 35 | generic-y += kmap_types.h |
35 | generic-y += kvm_para.h | 36 | generic-y += kvm_para.h |
36 | generic-y += local.h | 37 | generic-y += local.h |
38 | generic-y += mcs_spinlock.h | ||
37 | generic-y += mman.h | 39 | generic-y += mman.h |
38 | generic-y += module.h | 40 | generic-y += module.h |
39 | generic-y += msgbuf.h | 41 | generic-y += msgbuf.h |
@@ -41,6 +43,7 @@ generic-y += pci.h | |||
41 | generic-y += percpu.h | 43 | generic-y += percpu.h |
42 | generic-y += poll.h | 44 | generic-y += poll.h |
43 | generic-y += posix_types.h | 45 | generic-y += posix_types.h |
46 | generic-y += preempt.h | ||
44 | generic-y += resource.h | 47 | generic-y += resource.h |
45 | generic-y += scatterlist.h | 48 | generic-y += scatterlist.h |
46 | generic-y += sections.h | 49 | generic-y += sections.h |
@@ -53,11 +56,11 @@ generic-y += siginfo.h | |||
53 | generic-y += signal.h | 56 | generic-y += signal.h |
54 | generic-y += socket.h | 57 | generic-y += socket.h |
55 | generic-y += sockios.h | 58 | generic-y += sockios.h |
56 | generic-y += statfs.h | ||
57 | generic-y += stat.h | 59 | generic-y += stat.h |
60 | generic-y += statfs.h | ||
58 | generic-y += string.h | 61 | generic-y += string.h |
59 | generic-y += switch_to.h | ||
60 | generic-y += swab.h | 62 | generic-y += swab.h |
63 | generic-y += switch_to.h | ||
61 | generic-y += termbits.h | 64 | generic-y += termbits.h |
62 | generic-y += termios.h | 65 | generic-y += termios.h |
63 | generic-y += topology.h | 66 | generic-y += topology.h |
@@ -68,5 +71,3 @@ generic-y += user.h | |||
68 | generic-y += vga.h | 71 | generic-y += vga.h |
69 | generic-y += word-at-a-time.h | 72 | generic-y += word-at-a-time.h |
70 | generic-y += xor.h | 73 | generic-y += xor.h |
71 | generic-y += preempt.h | ||
72 | generic-y += hash.h | ||
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 752c981bc3c7..ecf25e6678ad 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild | |||
@@ -1,9 +1,29 @@ | |||
1 | 1 | ||
2 | generic-y += auxvec.h | ||
2 | generic-y += barrier.h | 3 | generic-y += barrier.h |
3 | generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ | 4 | generic-y += clkdev.h |
4 | segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ | 5 | generic-y += cputime.h |
5 | div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ | 6 | generic-y += device.h |
6 | poll.h xor.h clkdev.h exec.h | 7 | generic-y += div64.h |
7 | generic-y += trace_clock.h | 8 | generic-y += emergency-restart.h |
8 | generic-y += preempt.h | 9 | generic-y += exec.h |
9 | generic-y += hash.h | 10 | generic-y += hash.h |
11 | generic-y += hw_irq.h | ||
12 | generic-y += irq_regs.h | ||
13 | generic-y += kdebug.h | ||
14 | generic-y += kvm_para.h | ||
15 | generic-y += local.h | ||
16 | generic-y += local64.h | ||
17 | generic-y += mcs_spinlock.h | ||
18 | generic-y += mutex.h | ||
19 | generic-y += param.h | ||
20 | generic-y += percpu.h | ||
21 | generic-y += poll.h | ||
22 | generic-y += preempt.h | ||
23 | generic-y += segment.h | ||
24 | generic-y += topology.h | ||
25 | generic-y += trace_clock.h | ||
26 | generic-y += user.h | ||
27 | generic-y += vga.h | ||
28 | generic-y += word-at-a-time.h | ||
29 | generic-y += xor.h | ||
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 6c0a955a1b06..3fb1bc432f4f 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild | |||
@@ -1,7 +1,8 @@ | |||
1 | 1 | ||
2 | generic-y += clkdev.h | 2 | generic-y += clkdev.h |
3 | generic-y += hash.h | ||
4 | generic-y += mcs_spinlock.h | ||
5 | generic-y += preempt.h | ||
3 | generic-y += rwsem.h | 6 | generic-y += rwsem.h |
4 | generic-y += trace_clock.h | 7 | generic-y += trace_clock.h |
5 | generic-y += preempt.h | ||
6 | generic-y += vtime.h | 8 | generic-y += vtime.h |
7 | generic-y += hash.h | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 65a07750f4f9..bb74b21f007a 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -117,6 +117,7 @@ config S390 | |||
117 | select HAVE_FUNCTION_GRAPH_TRACER | 117 | select HAVE_FUNCTION_GRAPH_TRACER |
118 | select HAVE_FUNCTION_TRACER | 118 | select HAVE_FUNCTION_TRACER |
119 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 119 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
120 | select HAVE_FUTEX_CMPXCHG if FUTEX | ||
120 | select HAVE_KERNEL_BZIP2 | 121 | select HAVE_KERNEL_BZIP2 |
121 | select HAVE_KERNEL_GZIP | 122 | select HAVE_KERNEL_GZIP |
122 | select HAVE_KERNEL_LZ4 | 123 | select HAVE_KERNEL_LZ4 |
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 8386a4a1f19a..57892a8a9055 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild | |||
@@ -1,6 +1,7 @@ | |||
1 | 1 | ||
2 | 2 | ||
3 | generic-y += clkdev.h | 3 | generic-y += clkdev.h |
4 | generic-y += trace_clock.h | ||
5 | generic-y += preempt.h | ||
6 | generic-y += hash.h | 4 | generic-y += hash.h |
5 | generic-y += mcs_spinlock.h | ||
6 | generic-y += preempt.h | ||
7 | generic-y += trace_clock.h | ||
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 146b9d5e89f8..4630cf217b5b 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild | |||
@@ -1,10 +1,11 @@ | |||
1 | 1 | ||
2 | header-y += | 2 | header-y += |
3 | 3 | ||
4 | |||
4 | generic-y += barrier.h | 5 | generic-y += barrier.h |
5 | generic-y += clkdev.h | 6 | generic-y += clkdev.h |
6 | generic-y += hash.h | 7 | generic-y += hash.h |
8 | generic-y += mcs_spinlock.h | ||
9 | generic-y += preempt.h | ||
7 | generic-y += trace_clock.h | 10 | generic-y += trace_clock.h |
8 | generic-y += xor.h | 11 | generic-y += xor.h |
9 | generic-y += preempt.h | ||
10 | |||
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 0cd7198a4524..c19e47dacb31 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild | |||
@@ -8,18 +8,21 @@ generic-y += emergency-restart.h | |||
8 | generic-y += errno.h | 8 | generic-y += errno.h |
9 | generic-y += exec.h | 9 | generic-y += exec.h |
10 | generic-y += fcntl.h | 10 | generic-y += fcntl.h |
11 | generic-y += hash.h | ||
11 | generic-y += ioctl.h | 12 | generic-y += ioctl.h |
12 | generic-y += ipcbuf.h | 13 | generic-y += ipcbuf.h |
13 | generic-y += irq_regs.h | 14 | generic-y += irq_regs.h |
14 | generic-y += kvm_para.h | 15 | generic-y += kvm_para.h |
15 | generic-y += local.h | 16 | generic-y += local.h |
16 | generic-y += local64.h | 17 | generic-y += local64.h |
18 | generic-y += mcs_spinlock.h | ||
19 | generic-y += mman.h | ||
20 | generic-y += msgbuf.h | ||
17 | generic-y += param.h | 21 | generic-y += param.h |
18 | generic-y += parport.h | 22 | generic-y += parport.h |
19 | generic-y += percpu.h | 23 | generic-y += percpu.h |
20 | generic-y += poll.h | 24 | generic-y += poll.h |
21 | generic-y += mman.h | 25 | generic-y += preempt.h |
22 | generic-y += msgbuf.h | ||
23 | generic-y += resource.h | 26 | generic-y += resource.h |
24 | generic-y += scatterlist.h | 27 | generic-y += scatterlist.h |
25 | generic-y += sembuf.h | 28 | generic-y += sembuf.h |
@@ -34,5 +37,3 @@ generic-y += termios.h | |||
34 | generic-y += trace_clock.h | 37 | generic-y += trace_clock.h |
35 | generic-y += ucontext.h | 38 | generic-y += ucontext.h |
36 | generic-y += xor.h | 39 | generic-y += xor.h |
37 | generic-y += preempt.h | ||
38 | generic-y += hash.h | ||
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 4b60a0c325ec..a45821818003 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild | |||
@@ -6,15 +6,16 @@ generic-y += cputime.h | |||
6 | generic-y += div64.h | 6 | generic-y += div64.h |
7 | generic-y += emergency-restart.h | 7 | generic-y += emergency-restart.h |
8 | generic-y += exec.h | 8 | generic-y += exec.h |
9 | generic-y += linkage.h | 9 | generic-y += hash.h |
10 | generic-y += local64.h | ||
11 | generic-y += mutex.h | ||
12 | generic-y += irq_regs.h | 10 | generic-y += irq_regs.h |
11 | generic-y += linkage.h | ||
13 | generic-y += local.h | 12 | generic-y += local.h |
13 | generic-y += local64.h | ||
14 | generic-y += mcs_spinlock.h | ||
14 | generic-y += module.h | 15 | generic-y += module.h |
16 | generic-y += mutex.h | ||
17 | generic-y += preempt.h | ||
15 | generic-y += serial.h | 18 | generic-y += serial.h |
16 | generic-y += trace_clock.h | 19 | generic-y += trace_clock.h |
17 | generic-y += types.h | 20 | generic-y += types.h |
18 | generic-y += word-at-a-time.h | 21 | generic-y += word-at-a-time.h |
19 | generic-y += preempt.h | ||
20 | generic-y += hash.h | ||
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 3793c75e45d9..0aa5675e7025 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild | |||
@@ -11,6 +11,7 @@ generic-y += errno.h | |||
11 | generic-y += exec.h | 11 | generic-y += exec.h |
12 | generic-y += fb.h | 12 | generic-y += fb.h |
13 | generic-y += fcntl.h | 13 | generic-y += fcntl.h |
14 | generic-y += hash.h | ||
14 | generic-y += hw_irq.h | 15 | generic-y += hw_irq.h |
15 | generic-y += ioctl.h | 16 | generic-y += ioctl.h |
16 | generic-y += ioctls.h | 17 | generic-y += ioctls.h |
@@ -18,12 +19,14 @@ generic-y += ipcbuf.h | |||
18 | generic-y += irq_regs.h | 19 | generic-y += irq_regs.h |
19 | generic-y += local.h | 20 | generic-y += local.h |
20 | generic-y += local64.h | 21 | generic-y += local64.h |
22 | generic-y += mcs_spinlock.h | ||
21 | generic-y += msgbuf.h | 23 | generic-y += msgbuf.h |
22 | generic-y += mutex.h | 24 | generic-y += mutex.h |
23 | generic-y += param.h | 25 | generic-y += param.h |
24 | generic-y += parport.h | 26 | generic-y += parport.h |
25 | generic-y += poll.h | 27 | generic-y += poll.h |
26 | generic-y += posix_types.h | 28 | generic-y += posix_types.h |
29 | generic-y += preempt.h | ||
27 | generic-y += resource.h | 30 | generic-y += resource.h |
28 | generic-y += scatterlist.h | 31 | generic-y += scatterlist.h |
29 | generic-y += sembuf.h | 32 | generic-y += sembuf.h |
@@ -38,5 +41,3 @@ generic-y += termios.h | |||
38 | generic-y += trace_clock.h | 41 | generic-y += trace_clock.h |
39 | generic-y += types.h | 42 | generic-y += types.h |
40 | generic-y += xor.h | 43 | generic-y += xor.h |
41 | generic-y += preempt.h | ||
42 | generic-y += hash.h | ||
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 88a330dcdede..a5e4b6068213 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild | |||
@@ -1,8 +1,28 @@ | |||
1 | generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h | ||
2 | generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h | ||
3 | generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h | ||
4 | generic-y += switch_to.h clkdev.h | ||
5 | generic-y += trace_clock.h | ||
6 | generic-y += preempt.h | ||
7 | generic-y += hash.h | ||
8 | generic-y += barrier.h | 1 | generic-y += barrier.h |
2 | generic-y += bug.h | ||
3 | generic-y += clkdev.h | ||
4 | generic-y += cputime.h | ||
5 | generic-y += current.h | ||
6 | generic-y += delay.h | ||
7 | generic-y += device.h | ||
8 | generic-y += emergency-restart.h | ||
9 | generic-y += exec.h | ||
10 | generic-y += ftrace.h | ||
11 | generic-y += futex.h | ||
12 | generic-y += hardirq.h | ||
13 | generic-y += hash.h | ||
14 | generic-y += hw_irq.h | ||
15 | generic-y += io.h | ||
16 | generic-y += irq_regs.h | ||
17 | generic-y += kdebug.h | ||
18 | generic-y += mcs_spinlock.h | ||
19 | generic-y += mutex.h | ||
20 | generic-y += param.h | ||
21 | generic-y += pci.h | ||
22 | generic-y += percpu.h | ||
23 | generic-y += preempt.h | ||
24 | generic-y += sections.h | ||
25 | generic-y += switch_to.h | ||
26 | generic-y += topology.h | ||
27 | generic-y += trace_clock.h | ||
28 | generic-y += xor.h | ||
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 3ef4f9d9bf5d..1e5fb872a4aa 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild | |||
@@ -16,6 +16,7 @@ generic-y += fcntl.h | |||
16 | generic-y += ftrace.h | 16 | generic-y += ftrace.h |
17 | generic-y += futex.h | 17 | generic-y += futex.h |
18 | generic-y += hardirq.h | 18 | generic-y += hardirq.h |
19 | generic-y += hash.h | ||
19 | generic-y += hw_irq.h | 20 | generic-y += hw_irq.h |
20 | generic-y += ioctl.h | 21 | generic-y += ioctl.h |
21 | generic-y += ioctls.h | 22 | generic-y += ioctls.h |
@@ -24,6 +25,7 @@ generic-y += irq_regs.h | |||
24 | generic-y += kdebug.h | 25 | generic-y += kdebug.h |
25 | generic-y += kmap_types.h | 26 | generic-y += kmap_types.h |
26 | generic-y += local.h | 27 | generic-y += local.h |
28 | generic-y += mcs_spinlock.h | ||
27 | generic-y += mman.h | 29 | generic-y += mman.h |
28 | generic-y += module.h | 30 | generic-y += module.h |
29 | generic-y += msgbuf.h | 31 | generic-y += msgbuf.h |
@@ -32,6 +34,7 @@ generic-y += parport.h | |||
32 | generic-y += percpu.h | 34 | generic-y += percpu.h |
33 | generic-y += poll.h | 35 | generic-y += poll.h |
34 | generic-y += posix_types.h | 36 | generic-y += posix_types.h |
37 | generic-y += preempt.h | ||
35 | generic-y += resource.h | 38 | generic-y += resource.h |
36 | generic-y += scatterlist.h | 39 | generic-y += scatterlist.h |
37 | generic-y += sections.h | 40 | generic-y += sections.h |
@@ -60,5 +63,3 @@ generic-y += unaligned.h | |||
60 | generic-y += user.h | 63 | generic-y += user.h |
61 | generic-y += vga.h | 64 | generic-y += vga.h |
62 | generic-y += xor.h | 65 | generic-y += xor.h |
63 | generic-y += preempt.h | ||
64 | generic-y += hash.h | ||
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 7f669853317a..a8fee078b92f 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -5,3 +5,4 @@ genhdr-y += unistd_64.h | |||
5 | genhdr-y += unistd_x32.h | 5 | genhdr-y += unistd_x32.h |
6 | 6 | ||
7 | generic-y += clkdev.h | 7 | generic-y += clkdev.h |
8 | generic-y += mcs_spinlock.h | ||
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 0a337e4a8370..c3d20ba6eb86 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild | |||
@@ -9,6 +9,7 @@ generic-y += errno.h | |||
9 | generic-y += exec.h | 9 | generic-y += exec.h |
10 | generic-y += fcntl.h | 10 | generic-y += fcntl.h |
11 | generic-y += hardirq.h | 11 | generic-y += hardirq.h |
12 | generic-y += hash.h | ||
12 | generic-y += ioctl.h | 13 | generic-y += ioctl.h |
13 | generic-y += irq_regs.h | 14 | generic-y += irq_regs.h |
14 | generic-y += kdebug.h | 15 | generic-y += kdebug.h |
@@ -17,7 +18,9 @@ generic-y += kvm_para.h | |||
17 | generic-y += linkage.h | 18 | generic-y += linkage.h |
18 | generic-y += local.h | 19 | generic-y += local.h |
19 | generic-y += local64.h | 20 | generic-y += local64.h |
21 | generic-y += mcs_spinlock.h | ||
20 | generic-y += percpu.h | 22 | generic-y += percpu.h |
23 | generic-y += preempt.h | ||
21 | generic-y += resource.h | 24 | generic-y += resource.h |
22 | generic-y += scatterlist.h | 25 | generic-y += scatterlist.h |
23 | generic-y += sections.h | 26 | generic-y += sections.h |
@@ -27,5 +30,3 @@ generic-y += termios.h | |||
27 | generic-y += topology.h | 30 | generic-y += topology.h |
28 | generic-y += trace_clock.h | 31 | generic-y += trace_clock.h |
29 | generic-y += xor.h | 32 | generic-y += xor.h |
30 | generic-y += preempt.h | ||
31 | generic-y += hash.h | ||
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index d8a55e87877f..0ffb0cbe2823 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c | |||
@@ -39,17 +39,10 @@ | |||
39 | lock_acquire(&(l)->dep_map, s, t, r, c, n, i) | 39 | lock_acquire(&(l)->dep_map, s, t, r, c, n, i) |
40 | # define __rel(l, n, i) \ | 40 | # define __rel(l, n, i) \ |
41 | lock_release(&(l)->dep_map, n, i) | 41 | lock_release(&(l)->dep_map, n, i) |
42 | # ifdef CONFIG_PROVE_LOCKING | 42 | #define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i) |
43 | # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i) | 43 | #define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i) |
44 | # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i) | 44 | #define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i) |
45 | # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i) | 45 | #define lockdep_release(l, n, i) __rel(l, n, i) |
46 | # define lockdep_release(l, n, i) __rel(l, n, i) | ||
47 | # else | ||
48 | # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i) | ||
49 | # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i) | ||
50 | # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i) | ||
51 | # define lockdep_release(l, n, i) __rel(l, n, i) | ||
52 | # endif | ||
53 | #else | 46 | #else |
54 | # define lockdep_acquire(l, s, t, i) do { } while (0) | 47 | # define lockdep_acquire(l, s, t, i) do { } while (0) |
55 | # define lockdep_acquire_nest(l, s, t, n, i) do { } while (0) | 48 | # define lockdep_acquire_nest(l, s, t, n, i) do { } while (0) |
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h new file mode 100644 index 000000000000..10cd4ffc6ba2 --- /dev/null +++ b/include/asm-generic/mcs_spinlock.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_MCS_SPINLOCK_H | ||
2 | #define __ASM_MCS_SPINLOCK_H | ||
3 | |||
4 | /* | ||
5 | * Architectures can define their own: | ||
6 | * | ||
7 | * arch_mcs_spin_lock_contended(l) | ||
8 | * arch_mcs_spin_unlock_contended(l) | ||
9 | * | ||
10 | * See kernel/locking/mcs_spinlock.c. | ||
11 | */ | ||
12 | |||
13 | #endif /* __ASM_MCS_SPINLOCK_H */ | ||
diff --git a/include/linux/futex.h b/include/linux/futex.h index b0d95cac826e..6435f46d6e13 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -55,7 +55,11 @@ union futex_key { | |||
55 | #ifdef CONFIG_FUTEX | 55 | #ifdef CONFIG_FUTEX |
56 | extern void exit_robust_list(struct task_struct *curr); | 56 | extern void exit_robust_list(struct task_struct *curr); |
57 | extern void exit_pi_state_list(struct task_struct *curr); | 57 | extern void exit_pi_state_list(struct task_struct *curr); |
58 | #ifdef CONFIG_HAVE_FUTEX_CMPXCHG | ||
59 | #define futex_cmpxchg_enabled 1 | ||
60 | #else | ||
58 | extern int futex_cmpxchg_enabled; | 61 | extern int futex_cmpxchg_enabled; |
62 | #endif | ||
59 | #else | 63 | #else |
60 | static inline void exit_robust_list(struct task_struct *curr) | 64 | static inline void exit_robust_list(struct task_struct *curr) |
61 | { | 65 | { |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 92b1bfc5da60..060e5137fd80 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -252,9 +252,9 @@ struct held_lock { | |||
252 | unsigned int trylock:1; /* 16 bits */ | 252 | unsigned int trylock:1; /* 16 bits */ |
253 | 253 | ||
254 | unsigned int read:2; /* see lock_acquire() comment */ | 254 | unsigned int read:2; /* see lock_acquire() comment */ |
255 | unsigned int check:2; /* see lock_acquire() comment */ | 255 | unsigned int check:1; /* see lock_acquire() comment */ |
256 | unsigned int hardirqs_off:1; | 256 | unsigned int hardirqs_off:1; |
257 | unsigned int references:11; /* 32 bits */ | 257 | unsigned int references:12; /* 32 bits */ |
258 | }; | 258 | }; |
259 | 259 | ||
260 | /* | 260 | /* |
@@ -303,7 +303,7 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
303 | (lock)->dep_map.key, sub) | 303 | (lock)->dep_map.key, sub) |
304 | 304 | ||
305 | #define lockdep_set_novalidate_class(lock) \ | 305 | #define lockdep_set_novalidate_class(lock) \ |
306 | lockdep_set_class(lock, &__lockdep_no_validate__) | 306 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
307 | /* | 307 | /* |
308 | * Compare locking classes | 308 | * Compare locking classes |
309 | */ | 309 | */ |
@@ -326,9 +326,8 @@ static inline int lockdep_match_key(struct lockdep_map *lock, | |||
326 | * | 326 | * |
327 | * Values for check: | 327 | * Values for check: |
328 | * | 328 | * |
329 | * 0: disabled | 329 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
330 | * 1: simple checks (freeing, held-at-exit-time, etc.) | 330 | * 1: full validation |
331 | * 2: full validation | ||
332 | */ | 331 | */ |
333 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 332 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
334 | int trylock, int read, int check, | 333 | int trylock, int read, int check, |
@@ -479,15 +478,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
479 | * on the per lock-class debug mode: | 478 | * on the per lock-class debug mode: |
480 | */ | 479 | */ |
481 | 480 | ||
482 | #ifdef CONFIG_PROVE_LOCKING | 481 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
483 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) | 482 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
484 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i) | 483 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
485 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i) | ||
486 | #else | ||
487 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) | ||
488 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) | ||
489 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) | ||
490 | #endif | ||
491 | 484 | ||
492 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) | 485 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
493 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) | 486 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
@@ -518,13 +511,13 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
518 | # define might_lock(lock) \ | 511 | # define might_lock(lock) \ |
519 | do { \ | 512 | do { \ |
520 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | 513 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
521 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ | 514 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
522 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | 515 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
523 | } while (0) | 516 | } while (0) |
524 | # define might_lock_read(lock) \ | 517 | # define might_lock_read(lock) \ |
525 | do { \ | 518 | do { \ |
526 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | 519 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
527 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ | 520 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
528 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | 521 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
529 | } while (0) | 522 | } while (0) |
530 | #else | 523 | #else |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index d3181936c138..11692dea18aa 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -46,6 +46,7 @@ | |||
46 | * - detects multi-task circular deadlocks and prints out all affected | 46 | * - detects multi-task circular deadlocks and prints out all affected |
47 | * locks and tasks (and only those tasks) | 47 | * locks and tasks (and only those tasks) |
48 | */ | 48 | */ |
49 | struct optimistic_spin_queue; | ||
49 | struct mutex { | 50 | struct mutex { |
50 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ | 51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ |
51 | atomic_t count; | 52 | atomic_t count; |
@@ -55,7 +56,7 @@ struct mutex { | |||
55 | struct task_struct *owner; | 56 | struct task_struct *owner; |
56 | #endif | 57 | #endif |
57 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
58 | void *spin_mlock; /* Spinner MCS lock */ | 59 | struct optimistic_spin_queue *osq; /* Spinner MCS lock */ |
59 | #endif | 60 | #endif |
60 | #ifdef CONFIG_DEBUG_MUTEXES | 61 | #ifdef CONFIG_DEBUG_MUTEXES |
61 | const char *name; | 62 | const char *name; |
@@ -179,4 +180,4 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | |||
179 | # define arch_mutex_cpu_relax() cpu_relax() | 180 | # define arch_mutex_cpu_relax() cpu_relax() |
180 | #endif | 181 | #endif |
181 | 182 | ||
182 | #endif | 183 | #endif /* __LINUX_MUTEX_H */ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 72bf3a01a4ee..adff3c99dcaa 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -314,7 +314,7 @@ static inline bool rcu_lockdep_current_cpu_online(void) | |||
314 | 314 | ||
315 | static inline void rcu_lock_acquire(struct lockdep_map *map) | 315 | static inline void rcu_lock_acquire(struct lockdep_map *map) |
316 | { | 316 | { |
317 | lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); | 317 | lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); |
318 | } | 318 | } |
319 | 319 | ||
320 | static inline void rcu_lock_release(struct lockdep_map *map) | 320 | static inline void rcu_lock_release(struct lockdep_map *map) |
diff --git a/init/Kconfig b/init/Kconfig index 009a797dd242..d56cb03c1b49 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1387,6 +1387,13 @@ config FUTEX | |||
1387 | support for "fast userspace mutexes". The resulting kernel may not | 1387 | support for "fast userspace mutexes". The resulting kernel may not |
1388 | run glibc-based applications correctly. | 1388 | run glibc-based applications correctly. |
1389 | 1389 | ||
1390 | config HAVE_FUTEX_CMPXCHG | ||
1391 | bool | ||
1392 | help | ||
1393 | Architectures should select this if futex_atomic_cmpxchg_inatomic() | ||
1394 | is implemented and always working. This removes a couple of runtime | ||
1395 | checks. | ||
1396 | |||
1390 | config EPOLL | 1397 | config EPOLL |
1391 | bool "Enable eventpoll support" if EXPERT | 1398 | bool "Enable eventpoll support" if EXPERT |
1392 | default y | 1399 | default y |
diff --git a/kernel/futex.c b/kernel/futex.c index 08ec814ad9d2..67dacaf93e56 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -157,7 +157,9 @@ | |||
157 | * enqueue. | 157 | * enqueue. |
158 | */ | 158 | */ |
159 | 159 | ||
160 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG | ||
160 | int __read_mostly futex_cmpxchg_enabled; | 161 | int __read_mostly futex_cmpxchg_enabled; |
162 | #endif | ||
161 | 163 | ||
162 | /* | 164 | /* |
163 | * Futex flags used to encode options to functions and preserve them across | 165 | * Futex flags used to encode options to functions and preserve them across |
@@ -2875,9 +2877,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, | |||
2875 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); | 2877 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
2876 | } | 2878 | } |
2877 | 2879 | ||
2878 | static int __init futex_init(void) | 2880 | static void __init futex_detect_cmpxchg(void) |
2879 | { | 2881 | { |
2882 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG | ||
2880 | u32 curval; | 2883 | u32 curval; |
2884 | |||
2885 | /* | ||
2886 | * This will fail and we want it. Some arch implementations do | ||
2887 | * runtime detection of the futex_atomic_cmpxchg_inatomic() | ||
2888 | * functionality. We want to know that before we call in any | ||
2889 | * of the complex code paths. Also we want to prevent | ||
2890 | * registration of robust lists in that case. NULL is | ||
2891 | * guaranteed to fault and we get -EFAULT on functional | ||
2892 | * implementation, the non-functional ones will return | ||
2893 | * -ENOSYS. | ||
2894 | */ | ||
2895 | if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) | ||
2896 | futex_cmpxchg_enabled = 1; | ||
2897 | #endif | ||
2898 | } | ||
2899 | |||
2900 | static int __init futex_init(void) | ||
2901 | { | ||
2881 | unsigned int futex_shift; | 2902 | unsigned int futex_shift; |
2882 | unsigned long i; | 2903 | unsigned long i; |
2883 | 2904 | ||
@@ -2893,18 +2914,8 @@ static int __init futex_init(void) | |||
2893 | &futex_shift, NULL, | 2914 | &futex_shift, NULL, |
2894 | futex_hashsize, futex_hashsize); | 2915 | futex_hashsize, futex_hashsize); |
2895 | futex_hashsize = 1UL << futex_shift; | 2916 | futex_hashsize = 1UL << futex_shift; |
2896 | /* | 2917 | |
2897 | * This will fail and we want it. Some arch implementations do | 2918 | futex_detect_cmpxchg(); |
2898 | * runtime detection of the futex_atomic_cmpxchg_inatomic() | ||
2899 | * functionality. We want to know that before we call in any | ||
2900 | * of the complex code paths. Also we want to prevent | ||
2901 | * registration of robust lists in that case. NULL is | ||
2902 | * guaranteed to fault and we get -EFAULT on functional | ||
2903 | * implementation, the non-functional ones will return | ||
2904 | * -ENOSYS. | ||
2905 | */ | ||
2906 | if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) | ||
2907 | futex_cmpxchg_enabled = 1; | ||
2908 | 2919 | ||
2909 | for (i = 0; i < futex_hashsize; i++) { | 2920 | for (i = 0; i < futex_hashsize; i++) { |
2910 | atomic_set(&futex_queues[i].waiters, 0); | 2921 | atomic_set(&futex_queues[i].waiters, 0); |
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index baab8e5e7f66..2a9ee96ecf00 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | 1 | ||
2 | obj-y += mutex.o semaphore.o rwsem.o lglock.o | 2 | obj-y += mutex.o semaphore.o rwsem.o lglock.o mcs_spinlock.o |
3 | 3 | ||
4 | ifdef CONFIG_FUNCTION_TRACER | 4 | ifdef CONFIG_FUNCTION_TRACER |
5 | CFLAGS_REMOVE_lockdep.o = -pg | 5 | CFLAGS_REMOVE_lockdep.o = -pg |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index eb8a54783fa0..bf0c6b0dd9c5 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -1936,12 +1936,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
1936 | 1936 | ||
1937 | for (;;) { | 1937 | for (;;) { |
1938 | int distance = curr->lockdep_depth - depth + 1; | 1938 | int distance = curr->lockdep_depth - depth + 1; |
1939 | hlock = curr->held_locks + depth-1; | 1939 | hlock = curr->held_locks + depth - 1; |
1940 | /* | 1940 | /* |
1941 | * Only non-recursive-read entries get new dependencies | 1941 | * Only non-recursive-read entries get new dependencies |
1942 | * added: | 1942 | * added: |
1943 | */ | 1943 | */ |
1944 | if (hlock->read != 2) { | 1944 | if (hlock->read != 2 && hlock->check) { |
1945 | if (!check_prev_add(curr, hlock, next, | 1945 | if (!check_prev_add(curr, hlock, next, |
1946 | distance, trylock_loop)) | 1946 | distance, trylock_loop)) |
1947 | return 0; | 1947 | return 0; |
@@ -2098,7 +2098,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, | |||
2098 | * (If lookup_chain_cache() returns with 1 it acquires | 2098 | * (If lookup_chain_cache() returns with 1 it acquires |
2099 | * graph_lock for us) | 2099 | * graph_lock for us) |
2100 | */ | 2100 | */ |
2101 | if (!hlock->trylock && (hlock->check == 2) && | 2101 | if (!hlock->trylock && hlock->check && |
2102 | lookup_chain_cache(curr, hlock, chain_key)) { | 2102 | lookup_chain_cache(curr, hlock, chain_key)) { |
2103 | /* | 2103 | /* |
2104 | * Check whether last held lock: | 2104 | * Check whether last held lock: |
@@ -2517,7 +2517,7 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) | |||
2517 | 2517 | ||
2518 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); | 2518 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
2519 | 2519 | ||
2520 | if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys) | 2520 | if (!hlock->check) |
2521 | continue; | 2521 | continue; |
2522 | 2522 | ||
2523 | if (!mark_lock(curr, hlock, usage_bit)) | 2523 | if (!mark_lock(curr, hlock, usage_bit)) |
@@ -3055,9 +3055,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3055 | int class_idx; | 3055 | int class_idx; |
3056 | u64 chain_key; | 3056 | u64 chain_key; |
3057 | 3057 | ||
3058 | if (!prove_locking) | ||
3059 | check = 1; | ||
3060 | |||
3061 | if (unlikely(!debug_locks)) | 3058 | if (unlikely(!debug_locks)) |
3062 | return 0; | 3059 | return 0; |
3063 | 3060 | ||
@@ -3069,8 +3066,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3069 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 3066 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
3070 | return 0; | 3067 | return 0; |
3071 | 3068 | ||
3072 | if (lock->key == &__lockdep_no_validate__) | 3069 | if (!prove_locking || lock->key == &__lockdep_no_validate__) |
3073 | check = 1; | 3070 | check = 0; |
3074 | 3071 | ||
3075 | if (subclass < NR_LOCKDEP_CACHING_CLASSES) | 3072 | if (subclass < NR_LOCKDEP_CACHING_CLASSES) |
3076 | class = lock->class_cache[subclass]; | 3073 | class = lock->class_cache[subclass]; |
@@ -3138,7 +3135,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3138 | hlock->holdtime_stamp = lockstat_clock(); | 3135 | hlock->holdtime_stamp = lockstat_clock(); |
3139 | #endif | 3136 | #endif |
3140 | 3137 | ||
3141 | if (check == 2 && !mark_irqflags(curr, hlock)) | 3138 | if (check && !mark_irqflags(curr, hlock)) |
3142 | return 0; | 3139 | return 0; |
3143 | 3140 | ||
3144 | /* mark it as used: */ | 3141 | /* mark it as used: */ |
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c new file mode 100644 index 000000000000..838dc9e00669 --- /dev/null +++ b/kernel/locking/mcs_spinlock.c | |||
@@ -0,0 +1,178 @@ | |||
1 | |||
2 | #include <linux/percpu.h> | ||
3 | #include <linux/mutex.h> | ||
4 | #include <linux/sched.h> | ||
5 | #include "mcs_spinlock.h" | ||
6 | |||
7 | #ifdef CONFIG_SMP | ||
8 | |||
9 | /* | ||
10 | * An MCS like lock especially tailored for optimistic spinning for sleeping | ||
11 | * lock implementations (mutex, rwsem, etc). | ||
12 | * | ||
13 | * Using a single mcs node per CPU is safe because sleeping locks should not be | ||
14 | * called from interrupt context and we have preemption disabled while | ||
15 | * spinning. | ||
16 | */ | ||
17 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); | ||
18 | |||
19 | /* | ||
20 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. | ||
21 | * Can return NULL in case we were the last queued and we updated @lock instead. | ||
22 | */ | ||
23 | static inline struct optimistic_spin_queue * | ||
24 | osq_wait_next(struct optimistic_spin_queue **lock, | ||
25 | struct optimistic_spin_queue *node, | ||
26 | struct optimistic_spin_queue *prev) | ||
27 | { | ||
28 | struct optimistic_spin_queue *next = NULL; | ||
29 | |||
30 | for (;;) { | ||
31 | if (*lock == node && cmpxchg(lock, node, prev) == node) { | ||
32 | /* | ||
33 | * We were the last queued, we moved @lock back. @prev | ||
34 | * will now observe @lock and will complete its | ||
35 | * unlock()/unqueue(). | ||
36 | */ | ||
37 | break; | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * We must xchg() the @node->next value, because if we were to | ||
42 | * leave it in, a concurrent unlock()/unqueue() from | ||
43 | * @node->next might complete Step-A and think its @prev is | ||
44 | * still valid. | ||
45 | * | ||
46 | * If the concurrent unlock()/unqueue() wins the race, we'll | ||
47 | * wait for either @lock to point to us, through its Step-B, or | ||
48 | * wait for a new @node->next from its Step-C. | ||
49 | */ | ||
50 | if (node->next) { | ||
51 | next = xchg(&node->next, NULL); | ||
52 | if (next) | ||
53 | break; | ||
54 | } | ||
55 | |||
56 | arch_mutex_cpu_relax(); | ||
57 | } | ||
58 | |||
59 | return next; | ||
60 | } | ||
61 | |||
62 | bool osq_lock(struct optimistic_spin_queue **lock) | ||
63 | { | ||
64 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | ||
65 | struct optimistic_spin_queue *prev, *next; | ||
66 | |||
67 | node->locked = 0; | ||
68 | node->next = NULL; | ||
69 | |||
70 | node->prev = prev = xchg(lock, node); | ||
71 | if (likely(prev == NULL)) | ||
72 | return true; | ||
73 | |||
74 | ACCESS_ONCE(prev->next) = node; | ||
75 | |||
76 | /* | ||
77 | * Normally @prev is untouchable after the above store; because at that | ||
78 | * moment unlock can proceed and wipe the node element from stack. | ||
79 | * | ||
80 | * However, since our nodes are static per-cpu storage, we're | ||
81 | * guaranteed their existence -- this allows us to apply | ||
82 | * cmpxchg in an attempt to undo our queueing. | ||
83 | */ | ||
84 | |||
85 | while (!smp_load_acquire(&node->locked)) { | ||
86 | /* | ||
87 | * If we need to reschedule bail... so we can block. | ||
88 | */ | ||
89 | if (need_resched()) | ||
90 | goto unqueue; | ||
91 | |||
92 | arch_mutex_cpu_relax(); | ||
93 | } | ||
94 | return true; | ||
95 | |||
96 | unqueue: | ||
97 | /* | ||
98 | * Step - A -- stabilize @prev | ||
99 | * | ||
100 | * Undo our @prev->next assignment; this will make @prev's | ||
101 | * unlock()/unqueue() wait for a next pointer since @lock points to us | ||
102 | * (or later). | ||
103 | */ | ||
104 | |||
105 | for (;;) { | ||
106 | if (prev->next == node && | ||
107 | cmpxchg(&prev->next, node, NULL) == node) | ||
108 | break; | ||
109 | |||
110 | /* | ||
111 | * We can only fail the cmpxchg() racing against an unlock(), | ||
112 | * in which case we should observe @node->locked becomming | ||
113 | * true. | ||
114 | */ | ||
115 | if (smp_load_acquire(&node->locked)) | ||
116 | return true; | ||
117 | |||
118 | arch_mutex_cpu_relax(); | ||
119 | |||
120 | /* | ||
121 | * Or we race against a concurrent unqueue()'s step-B, in which | ||
122 | * case its step-C will write us a new @node->prev pointer. | ||
123 | */ | ||
124 | prev = ACCESS_ONCE(node->prev); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Step - B -- stabilize @next | ||
129 | * | ||
130 | * Similar to unlock(), wait for @node->next or move @lock from @node | ||
131 | * back to @prev. | ||
132 | */ | ||
133 | |||
134 | next = osq_wait_next(lock, node, prev); | ||
135 | if (!next) | ||
136 | return false; | ||
137 | |||
138 | /* | ||
139 | * Step - C -- unlink | ||
140 | * | ||
141 | * @prev is stable because its still waiting for a new @prev->next | ||
142 | * pointer, @next is stable because our @node->next pointer is NULL and | ||
143 | * it will wait in Step-A. | ||
144 | */ | ||
145 | |||
146 | ACCESS_ONCE(next->prev) = prev; | ||
147 | ACCESS_ONCE(prev->next) = next; | ||
148 | |||
149 | return false; | ||
150 | } | ||
151 | |||
152 | void osq_unlock(struct optimistic_spin_queue **lock) | ||
153 | { | ||
154 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | ||
155 | struct optimistic_spin_queue *next; | ||
156 | |||
157 | /* | ||
158 | * Fast path for the uncontended case. | ||
159 | */ | ||
160 | if (likely(cmpxchg(lock, node, NULL) == node)) | ||
161 | return; | ||
162 | |||
163 | /* | ||
164 | * Second most likely case. | ||
165 | */ | ||
166 | next = xchg(&node->next, NULL); | ||
167 | if (next) { | ||
168 | ACCESS_ONCE(next->locked) = 1; | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | next = osq_wait_next(lock, node, NULL); | ||
173 | if (next) | ||
174 | ACCESS_ONCE(next->locked) = 1; | ||
175 | } | ||
176 | |||
177 | #endif | ||
178 | |||
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h new file mode 100644 index 000000000000..a2dbac4aca6b --- /dev/null +++ b/kernel/locking/mcs_spinlock.h | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * MCS lock defines | ||
3 | * | ||
4 | * This file contains the main data structure and API definitions of MCS lock. | ||
5 | * | ||
6 | * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock | ||
7 | * with the desirable properties of being fair, and with each cpu trying | ||
8 | * to acquire the lock spinning on a local variable. | ||
9 | * It avoids expensive cache bouncings that common test-and-set spin-lock | ||
10 | * implementations incur. | ||
11 | */ | ||
12 | #ifndef __LINUX_MCS_SPINLOCK_H | ||
13 | #define __LINUX_MCS_SPINLOCK_H | ||
14 | |||
15 | #include <asm/mcs_spinlock.h> | ||
16 | |||
17 | struct mcs_spinlock { | ||
18 | struct mcs_spinlock *next; | ||
19 | int locked; /* 1 if lock acquired */ | ||
20 | }; | ||
21 | |||
22 | #ifndef arch_mcs_spin_lock_contended | ||
23 | /* | ||
24 | * Using smp_load_acquire() provides a memory barrier that ensures | ||
25 | * subsequent operations happen after the lock is acquired. | ||
26 | */ | ||
27 | #define arch_mcs_spin_lock_contended(l) \ | ||
28 | do { \ | ||
29 | while (!(smp_load_acquire(l))) \ | ||
30 | arch_mutex_cpu_relax(); \ | ||
31 | } while (0) | ||
32 | #endif | ||
33 | |||
34 | #ifndef arch_mcs_spin_unlock_contended | ||
35 | /* | ||
36 | * smp_store_release() provides a memory barrier to ensure all | ||
37 | * operations in the critical section has been completed before | ||
38 | * unlocking. | ||
39 | */ | ||
40 | #define arch_mcs_spin_unlock_contended(l) \ | ||
41 | smp_store_release((l), 1) | ||
42 | #endif | ||
43 | |||
44 | /* | ||
45 | * Note: the smp_load_acquire/smp_store_release pair is not | ||
46 | * sufficient to form a full memory barrier across | ||
47 | * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. | ||
48 | * For applications that need a full barrier across multiple cpus | ||
49 | * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be | ||
50 | * used after mcs_lock. | ||
51 | */ | ||
52 | |||
53 | /* | ||
54 | * In order to acquire the lock, the caller should declare a local node and | ||
55 | * pass a reference of the node to this function in addition to the lock. | ||
56 | * If the lock has already been acquired, then this will proceed to spin | ||
57 | * on this node->locked until the previous lock holder sets the node->locked | ||
58 | * in mcs_spin_unlock(). | ||
59 | * | ||
60 | * We don't inline mcs_spin_lock() so that perf can correctly account for the | ||
61 | * time spent in this lock function. | ||
62 | */ | ||
63 | static inline | ||
64 | void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | ||
65 | { | ||
66 | struct mcs_spinlock *prev; | ||
67 | |||
68 | /* Init node */ | ||
69 | node->locked = 0; | ||
70 | node->next = NULL; | ||
71 | |||
72 | prev = xchg(lock, node); | ||
73 | if (likely(prev == NULL)) { | ||
74 | /* | ||
75 | * Lock acquired, don't need to set node->locked to 1. Threads | ||
76 | * only spin on its own node->locked value for lock acquisition. | ||
77 | * However, since this thread can immediately acquire the lock | ||
78 | * and does not proceed to spin on its own node->locked, this | ||
79 | * value won't be used. If a debug mode is needed to | ||
80 | * audit lock status, then set node->locked value here. | ||
81 | */ | ||
82 | return; | ||
83 | } | ||
84 | ACCESS_ONCE(prev->next) = node; | ||
85 | |||
86 | /* Wait until the lock holder passes the lock down. */ | ||
87 | arch_mcs_spin_lock_contended(&node->locked); | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Releases the lock. The caller should pass in the corresponding node that | ||
92 | * was used to acquire the lock. | ||
93 | */ | ||
94 | static inline | ||
95 | void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | ||
96 | { | ||
97 | struct mcs_spinlock *next = ACCESS_ONCE(node->next); | ||
98 | |||
99 | if (likely(!next)) { | ||
100 | /* | ||
101 | * Release the lock by setting it to NULL | ||
102 | */ | ||
103 | if (likely(cmpxchg(lock, node, NULL) == node)) | ||
104 | return; | ||
105 | /* Wait until the next pointer is set */ | ||
106 | while (!(next = ACCESS_ONCE(node->next))) | ||
107 | arch_mutex_cpu_relax(); | ||
108 | } | ||
109 | |||
110 | /* Pass lock to next waiter. */ | ||
111 | arch_mcs_spin_unlock_contended(&next->locked); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Cancellable version of the MCS lock above. | ||
116 | * | ||
117 | * Intended for adaptive spinning of sleeping locks: | ||
118 | * mutex_lock()/rwsem_down_{read,write}() etc. | ||
119 | */ | ||
120 | |||
121 | struct optimistic_spin_queue { | ||
122 | struct optimistic_spin_queue *next, *prev; | ||
123 | int locked; /* 1 if lock acquired */ | ||
124 | }; | ||
125 | |||
126 | extern bool osq_lock(struct optimistic_spin_queue **lock); | ||
127 | extern void osq_unlock(struct optimistic_spin_queue **lock); | ||
128 | |||
129 | #endif /* __LINUX_MCS_SPINLOCK_H */ | ||
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index faf6f5b53e77..e1191c996c59 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c | |||
@@ -83,6 +83,12 @@ void debug_mutex_unlock(struct mutex *lock) | |||
83 | 83 | ||
84 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 84 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
85 | mutex_clear_owner(lock); | 85 | mutex_clear_owner(lock); |
86 | |||
87 | /* | ||
88 | * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug | ||
89 | * mutexes so that we can do it here after we've verified state. | ||
90 | */ | ||
91 | atomic_set(&lock->count, 1); | ||
86 | } | 92 | } |
87 | 93 | ||
88 | void debug_mutex_init(struct mutex *lock, const char *name, | 94 | void debug_mutex_init(struct mutex *lock, const char *name, |
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 4dd6e4c219de..14fe72cc8ce7 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/debug_locks.h> | 27 | #include <linux/debug_locks.h> |
28 | #include "mcs_spinlock.h" | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | 31 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
@@ -33,6 +34,13 @@ | |||
33 | #ifdef CONFIG_DEBUG_MUTEXES | 34 | #ifdef CONFIG_DEBUG_MUTEXES |
34 | # include "mutex-debug.h" | 35 | # include "mutex-debug.h" |
35 | # include <asm-generic/mutex-null.h> | 36 | # include <asm-generic/mutex-null.h> |
37 | /* | ||
38 | * Must be 0 for the debug case so we do not do the unlock outside of the | ||
39 | * wait_lock region. debug_mutex_unlock() will do the actual unlock in this | ||
40 | * case. | ||
41 | */ | ||
42 | # undef __mutex_slowpath_needs_to_unlock | ||
43 | # define __mutex_slowpath_needs_to_unlock() 0 | ||
36 | #else | 44 | #else |
37 | # include "mutex.h" | 45 | # include "mutex.h" |
38 | # include <asm/mutex.h> | 46 | # include <asm/mutex.h> |
@@ -52,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
52 | INIT_LIST_HEAD(&lock->wait_list); | 60 | INIT_LIST_HEAD(&lock->wait_list); |
53 | mutex_clear_owner(lock); | 61 | mutex_clear_owner(lock); |
54 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 62 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
55 | lock->spin_mlock = NULL; | 63 | lock->osq = NULL; |
56 | #endif | 64 | #endif |
57 | 65 | ||
58 | debug_mutex_init(lock, name, key); | 66 | debug_mutex_init(lock, name, key); |
@@ -111,54 +119,7 @@ EXPORT_SYMBOL(mutex_lock); | |||
111 | * more or less simultaneously, the spinners need to acquire a MCS lock | 119 | * more or less simultaneously, the spinners need to acquire a MCS lock |
112 | * first before spinning on the owner field. | 120 | * first before spinning on the owner field. |
113 | * | 121 | * |
114 | * We don't inline mspin_lock() so that perf can correctly account for the | ||
115 | * time spent in this lock function. | ||
116 | */ | 122 | */ |
117 | struct mspin_node { | ||
118 | struct mspin_node *next ; | ||
119 | int locked; /* 1 if lock acquired */ | ||
120 | }; | ||
121 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) | ||
122 | |||
123 | static noinline | ||
124 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) | ||
125 | { | ||
126 | struct mspin_node *prev; | ||
127 | |||
128 | /* Init node */ | ||
129 | node->locked = 0; | ||
130 | node->next = NULL; | ||
131 | |||
132 | prev = xchg(lock, node); | ||
133 | if (likely(prev == NULL)) { | ||
134 | /* Lock acquired */ | ||
135 | node->locked = 1; | ||
136 | return; | ||
137 | } | ||
138 | ACCESS_ONCE(prev->next) = node; | ||
139 | smp_wmb(); | ||
140 | /* Wait until the lock holder passes the lock down */ | ||
141 | while (!ACCESS_ONCE(node->locked)) | ||
142 | arch_mutex_cpu_relax(); | ||
143 | } | ||
144 | |||
145 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) | ||
146 | { | ||
147 | struct mspin_node *next = ACCESS_ONCE(node->next); | ||
148 | |||
149 | if (likely(!next)) { | ||
150 | /* | ||
151 | * Release the lock by setting it to NULL | ||
152 | */ | ||
153 | if (cmpxchg(lock, node, NULL) == node) | ||
154 | return; | ||
155 | /* Wait until the next pointer is set */ | ||
156 | while (!(next = ACCESS_ONCE(node->next))) | ||
157 | arch_mutex_cpu_relax(); | ||
158 | } | ||
159 | ACCESS_ONCE(next->locked) = 1; | ||
160 | smp_wmb(); | ||
161 | } | ||
162 | 123 | ||
163 | /* | 124 | /* |
164 | * Mutex spinning code migrated from kernel/sched/core.c | 125 | * Mutex spinning code migrated from kernel/sched/core.c |
@@ -212,6 +173,9 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) | |||
212 | struct task_struct *owner; | 173 | struct task_struct *owner; |
213 | int retval = 1; | 174 | int retval = 1; |
214 | 175 | ||
176 | if (need_resched()) | ||
177 | return 0; | ||
178 | |||
215 | rcu_read_lock(); | 179 | rcu_read_lock(); |
216 | owner = ACCESS_ONCE(lock->owner); | 180 | owner = ACCESS_ONCE(lock->owner); |
217 | if (owner) | 181 | if (owner) |
@@ -446,9 +410,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
446 | if (!mutex_can_spin_on_owner(lock)) | 410 | if (!mutex_can_spin_on_owner(lock)) |
447 | goto slowpath; | 411 | goto slowpath; |
448 | 412 | ||
413 | if (!osq_lock(&lock->osq)) | ||
414 | goto slowpath; | ||
415 | |||
449 | for (;;) { | 416 | for (;;) { |
450 | struct task_struct *owner; | 417 | struct task_struct *owner; |
451 | struct mspin_node node; | ||
452 | 418 | ||
453 | if (use_ww_ctx && ww_ctx->acquired > 0) { | 419 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
454 | struct ww_mutex *ww; | 420 | struct ww_mutex *ww; |
@@ -463,19 +429,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
463 | * performed the optimistic spinning cannot be done. | 429 | * performed the optimistic spinning cannot be done. |
464 | */ | 430 | */ |
465 | if (ACCESS_ONCE(ww->ctx)) | 431 | if (ACCESS_ONCE(ww->ctx)) |
466 | goto slowpath; | 432 | break; |
467 | } | 433 | } |
468 | 434 | ||
469 | /* | 435 | /* |
470 | * If there's an owner, wait for it to either | 436 | * If there's an owner, wait for it to either |
471 | * release the lock or go to sleep. | 437 | * release the lock or go to sleep. |
472 | */ | 438 | */ |
473 | mspin_lock(MLOCK(lock), &node); | ||
474 | owner = ACCESS_ONCE(lock->owner); | 439 | owner = ACCESS_ONCE(lock->owner); |
475 | if (owner && !mutex_spin_on_owner(lock, owner)) { | 440 | if (owner && !mutex_spin_on_owner(lock, owner)) |
476 | mspin_unlock(MLOCK(lock), &node); | 441 | break; |
477 | goto slowpath; | ||
478 | } | ||
479 | 442 | ||
480 | if ((atomic_read(&lock->count) == 1) && | 443 | if ((atomic_read(&lock->count) == 1) && |
481 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | 444 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
@@ -488,11 +451,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
488 | } | 451 | } |
489 | 452 | ||
490 | mutex_set_owner(lock); | 453 | mutex_set_owner(lock); |
491 | mspin_unlock(MLOCK(lock), &node); | 454 | osq_unlock(&lock->osq); |
492 | preempt_enable(); | 455 | preempt_enable(); |
493 | return 0; | 456 | return 0; |
494 | } | 457 | } |
495 | mspin_unlock(MLOCK(lock), &node); | ||
496 | 458 | ||
497 | /* | 459 | /* |
498 | * When there's no owner, we might have preempted between the | 460 | * When there's no owner, we might have preempted between the |
@@ -501,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
501 | * the owner complete. | 463 | * the owner complete. |
502 | */ | 464 | */ |
503 | if (!owner && (need_resched() || rt_task(task))) | 465 | if (!owner && (need_resched() || rt_task(task))) |
504 | goto slowpath; | 466 | break; |
505 | 467 | ||
506 | /* | 468 | /* |
507 | * The cpu_relax() call is a compiler barrier which forces | 469 | * The cpu_relax() call is a compiler barrier which forces |
@@ -511,7 +473,15 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
511 | */ | 473 | */ |
512 | arch_mutex_cpu_relax(); | 474 | arch_mutex_cpu_relax(); |
513 | } | 475 | } |
476 | osq_unlock(&lock->osq); | ||
514 | slowpath: | 477 | slowpath: |
478 | /* | ||
479 | * If we fell out of the spin path because of need_resched(), | ||
480 | * reschedule now, before we try-lock the mutex. This avoids getting | ||
481 | * scheduled out right after we obtained the mutex. | ||
482 | */ | ||
483 | if (need_resched()) | ||
484 | schedule_preempt_disabled(); | ||
515 | #endif | 485 | #endif |
516 | spin_lock_mutex(&lock->wait_lock, flags); | 486 | spin_lock_mutex(&lock->wait_lock, flags); |
517 | 487 | ||
@@ -717,10 +687,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
717 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 687 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
718 | unsigned long flags; | 688 | unsigned long flags; |
719 | 689 | ||
720 | spin_lock_mutex(&lock->wait_lock, flags); | ||
721 | mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
722 | debug_mutex_unlock(lock); | ||
723 | |||
724 | /* | 690 | /* |
725 | * some architectures leave the lock unlocked in the fastpath failure | 691 | * some architectures leave the lock unlocked in the fastpath failure |
726 | * case, others need to leave it locked. In the later case we have to | 692 | * case, others need to leave it locked. In the later case we have to |
@@ -729,6 +695,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
729 | if (__mutex_slowpath_needs_to_unlock()) | 695 | if (__mutex_slowpath_needs_to_unlock()) |
730 | atomic_set(&lock->count, 1); | 696 | atomic_set(&lock->count, 1); |
731 | 697 | ||
698 | spin_lock_mutex(&lock->wait_lock, flags); | ||
699 | mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
700 | debug_mutex_unlock(lock); | ||
701 | |||
732 | if (!list_empty(&lock->wait_list)) { | 702 | if (!list_empty(&lock->wait_list)) { |
733 | /* get the first entry from the wait-list: */ | 703 | /* get the first entry from the wait-list: */ |
734 | struct mutex_waiter *waiter = | 704 | struct mutex_waiter *waiter = |