diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-22 17:54:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-22 17:54:22 -0400 |
commit | 1bf7067c6e173dc10411704db48338ed69c05565 (patch) | |
tree | 06d731d9647c525fa598d03d7ec957ff9772ff40 | |
parent | fc934d40178ad4e551a17e2733241d9f29fddd70 (diff) | |
parent | 68722101ec3a0e179408a13708dd020e04f54aab (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes are:
- 'qspinlock' support, enabled on x86: queued spinlocks - these are
now the spinlock variant used by x86 as they outperform ticket
spinlocks in every category. (Waiman Long)
- 'pvqspinlock' support on x86: paravirtualized variant of queued
spinlocks. (Waiman Long, Peter Zijlstra)
- 'qrwlock' support, enabled on x86: queued rwlocks. Similar to
queued spinlocks, they are now the variant used by x86:
CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
CONFIG_QUEUED_SPINLOCKS=y
CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
CONFIG_QUEUED_RWLOCKS=y
- various lockdep fixlets
- various locking primitives cleanups, further WRITE_ONCE()
propagation"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
locking/lockdep: Remove hard coded array size dependency
locking/qrwlock: Don't contend with readers when setting _QW_WAITING
lockdep: Do not break user-visible string
locking/arch: Rename set_mb() to smp_store_mb()
locking/arch: Add WRITE_ONCE() to set_mb()
rtmutex: Warn if trylock is called from hard/softirq context
arch: Remove __ARCH_HAVE_CMPXCHG
locking/rtmutex: Drop usage of __HAVE_ARCH_CMPXCHG
locking/qrwlock: Rename QUEUE_RWLOCK to QUEUED_RWLOCKS
locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS
locking/pvqspinlock: Replace xchg() by the more descriptive set_mb()
locking/pvqspinlock, x86: Enable PV qspinlock for Xen
locking/pvqspinlock, x86: Enable PV qspinlock for KVM
locking/pvqspinlock, x86: Implement the paravirt qspinlock call patching
locking/pvqspinlock: Implement simple paravirt support for the qspinlock
locking/qspinlock: Revert to test-and-set on hypervisors
locking/qspinlock: Use a simple write to grab the lock
locking/qspinlock: Optimize for smaller NR_CPUS
locking/qspinlock: Extract out code snippets for the next patch
locking/qspinlock: Add pending bit
...
61 files changed, 1423 insertions, 102 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 360841da3744..13feb697271f 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt | |||
@@ -1673,7 +1673,7 @@ CPU from reordering them. | |||
1673 | 1673 | ||
1674 | There are some more advanced barrier functions: | 1674 | There are some more advanced barrier functions: |
1675 | 1675 | ||
1676 | (*) set_mb(var, value) | 1676 | (*) smp_store_mb(var, value) |
1677 | 1677 | ||
1678 | This assigns the value to the variable and then inserts a full memory | 1678 | This assigns the value to the variable and then inserts a full memory |
1679 | barrier after it, depending on the function. It isn't guaranteed to | 1679 | barrier after it, depending on the function. It isn't guaranteed to |
@@ -1985,7 +1985,7 @@ after it has altered the task state: | |||
1985 | CPU 1 | 1985 | CPU 1 |
1986 | =============================== | 1986 | =============================== |
1987 | set_current_state(); | 1987 | set_current_state(); |
1988 | set_mb(); | 1988 | smp_store_mb(); |
1989 | STORE current->state | 1989 | STORE current->state |
1990 | <general barrier> | 1990 | <general barrier> |
1991 | LOAD event_indicated | 1991 | LOAD event_indicated |
@@ -2026,7 +2026,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING: | |||
2026 | CPU 1 CPU 2 | 2026 | CPU 1 CPU 2 |
2027 | =============================== =============================== | 2027 | =============================== =============================== |
2028 | set_current_state(); STORE event_indicated | 2028 | set_current_state(); STORE event_indicated |
2029 | set_mb(); wake_up(); | 2029 | smp_store_mb(); wake_up(); |
2030 | STORE current->state <write barrier> | 2030 | STORE current->state <write barrier> |
2031 | <general barrier> STORE current->state | 2031 | <general barrier> STORE current->state |
2032 | LOAD event_indicated | 2032 | LOAD event_indicated |
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h index 429e8cd0d78e..e5117766529e 100644 --- a/arch/alpha/include/asm/cmpxchg.h +++ b/arch/alpha/include/asm/cmpxchg.h | |||
@@ -66,6 +66,4 @@ | |||
66 | #undef __ASM__MB | 66 | #undef __ASM__MB |
67 | #undef ____cmpxchg | 67 | #undef ____cmpxchg |
68 | 68 | ||
69 | #define __HAVE_ARCH_CMPXCHG 1 | ||
70 | |||
71 | #endif /* _ALPHA_CMPXCHG_H */ | 69 | #endif /* _ALPHA_CMPXCHG_H */ |
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index d2f81e6b8c1c..6c2327e1c732 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h | |||
@@ -81,7 +81,7 @@ do { \ | |||
81 | #define read_barrier_depends() do { } while(0) | 81 | #define read_barrier_depends() do { } while(0) |
82 | #define smp_read_barrier_depends() do { } while(0) | 82 | #define smp_read_barrier_depends() do { } while(0) |
83 | 83 | ||
84 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | 84 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) |
85 | 85 | ||
86 | #define smp_mb__before_atomic() smp_mb() | 86 | #define smp_mb__before_atomic() smp_mb() |
87 | #define smp_mb__after_atomic() smp_mb() | 87 | #define smp_mb__after_atomic() smp_mb() |
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 71f19c4dc0de..0fa47c4275cb 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h | |||
@@ -114,7 +114,7 @@ do { \ | |||
114 | #define read_barrier_depends() do { } while(0) | 114 | #define read_barrier_depends() do { } while(0) |
115 | #define smp_read_barrier_depends() do { } while(0) | 115 | #define smp_read_barrier_depends() do { } while(0) |
116 | 116 | ||
117 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | 117 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) |
118 | #define nop() asm volatile("nop"); | 118 | #define nop() asm volatile("nop"); |
119 | 119 | ||
120 | #define smp_mb__before_atomic() smp_mb() | 120 | #define smp_mb__before_atomic() smp_mb() |
diff --git a/arch/avr32/include/asm/cmpxchg.h b/arch/avr32/include/asm/cmpxchg.h index 962a6aeab787..366bbeaeb405 100644 --- a/arch/avr32/include/asm/cmpxchg.h +++ b/arch/avr32/include/asm/cmpxchg.h | |||
@@ -70,8 +70,6 @@ extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( | |||
70 | if something tries to do an invalid cmpxchg(). */ | 70 | if something tries to do an invalid cmpxchg(). */ |
71 | extern void __cmpxchg_called_with_bad_pointer(void); | 71 | extern void __cmpxchg_called_with_bad_pointer(void); |
72 | 72 | ||
73 | #define __HAVE_ARCH_CMPXCHG 1 | ||
74 | |||
75 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 73 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
76 | unsigned long new, int size) | 74 | unsigned long new, int size) |
77 | { | 75 | { |
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index 9e7802911a57..a6e34e2acbba 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h | |||
@@ -64,7 +64,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
64 | * looks just like atomic_cmpxchg on our arch currently with a bunch of | 64 | * looks just like atomic_cmpxchg on our arch currently with a bunch of |
65 | * variable casting. | 65 | * variable casting. |
66 | */ | 66 | */ |
67 | #define __HAVE_ARCH_CMPXCHG 1 | ||
68 | 67 | ||
69 | #define cmpxchg(ptr, old, new) \ | 68 | #define cmpxchg(ptr, old, new) \ |
70 | ({ \ | 69 | ({ \ |
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index f6769eb2bbf9..843ba435e43b 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h | |||
@@ -77,12 +77,7 @@ do { \ | |||
77 | ___p1; \ | 77 | ___p1; \ |
78 | }) | 78 | }) |
79 | 79 | ||
80 | /* | 80 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) |
81 | * XXX check on this ---I suspect what Linus really wants here is | ||
82 | * acquire vs release semantics but we can't discuss this stuff with | ||
83 | * Linus just yet. Grrr... | ||
84 | */ | ||
85 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | ||
86 | 81 | ||
87 | /* | 82 | /* |
88 | * The group barrier in front of the rsm & ssm are necessary to ensure | 83 | * The group barrier in front of the rsm & ssm are necessary to ensure |
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h index f35109b1d907..a0e3620f8f13 100644 --- a/arch/ia64/include/uapi/asm/cmpxchg.h +++ b/arch/ia64/include/uapi/asm/cmpxchg.h | |||
@@ -61,8 +61,6 @@ extern void ia64_xchg_called_with_bad_pointer(void); | |||
61 | * indicated by comparing RETURN with OLD. | 61 | * indicated by comparing RETURN with OLD. |
62 | */ | 62 | */ |
63 | 63 | ||
64 | #define __HAVE_ARCH_CMPXCHG 1 | ||
65 | |||
66 | /* | 64 | /* |
67 | * This function doesn't exist, so you'll get a linker error | 65 | * This function doesn't exist, so you'll get a linker error |
68 | * if something tries to do an invalid cmpxchg(). | 66 | * if something tries to do an invalid cmpxchg(). |
diff --git a/arch/m32r/include/asm/cmpxchg.h b/arch/m32r/include/asm/cmpxchg.h index de651db20b43..14bf9b739dd2 100644 --- a/arch/m32r/include/asm/cmpxchg.h +++ b/arch/m32r/include/asm/cmpxchg.h | |||
@@ -107,8 +107,6 @@ __xchg_local(unsigned long x, volatile void *ptr, int size) | |||
107 | ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \ | 107 | ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \ |
108 | sizeof(*(ptr)))) | 108 | sizeof(*(ptr)))) |
109 | 109 | ||
110 | #define __HAVE_ARCH_CMPXCHG 1 | ||
111 | |||
112 | static inline unsigned long | 110 | static inline unsigned long |
113 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | 111 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) |
114 | { | 112 | { |
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h index bc755bc620ad..83b1df80f0ac 100644 --- a/arch/m68k/include/asm/cmpxchg.h +++ b/arch/m68k/include/asm/cmpxchg.h | |||
@@ -90,7 +90,6 @@ extern unsigned long __invalid_cmpxchg_size(volatile void *, | |||
90 | * indicated by comparing RETURN with OLD. | 90 | * indicated by comparing RETURN with OLD. |
91 | */ | 91 | */ |
92 | #ifdef CONFIG_RMW_INSNS | 92 | #ifdef CONFIG_RMW_INSNS |
93 | #define __HAVE_ARCH_CMPXCHG 1 | ||
94 | 93 | ||
95 | static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, | 94 | static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, |
96 | unsigned long new, int size) | 95 | unsigned long new, int size) |
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index d703d8e26a65..5a696e507930 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h | |||
@@ -84,7 +84,7 @@ static inline void fence(void) | |||
84 | #define read_barrier_depends() do { } while (0) | 84 | #define read_barrier_depends() do { } while (0) |
85 | #define smp_read_barrier_depends() do { } while (0) | 85 | #define smp_read_barrier_depends() do { } while (0) |
86 | 86 | ||
87 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | 87 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) |
88 | 88 | ||
89 | #define smp_store_release(p, v) \ | 89 | #define smp_store_release(p, v) \ |
90 | do { \ | 90 | do { \ |
diff --git a/arch/metag/include/asm/cmpxchg.h b/arch/metag/include/asm/cmpxchg.h index b1bc1be8540f..be29e3e44321 100644 --- a/arch/metag/include/asm/cmpxchg.h +++ b/arch/metag/include/asm/cmpxchg.h | |||
@@ -51,8 +51,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
51 | return old; | 51 | return old; |
52 | } | 52 | } |
53 | 53 | ||
54 | #define __HAVE_ARCH_CMPXCHG 1 | ||
55 | |||
56 | #define cmpxchg(ptr, o, n) \ | 54 | #define cmpxchg(ptr, o, n) \ |
57 | ({ \ | 55 | ({ \ |
58 | __typeof__(*(ptr)) _o_ = (o); \ | 56 | __typeof__(*(ptr)) _o_ = (o); \ |
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 2b8bbbcb9be0..7ecba84656d4 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h | |||
@@ -112,8 +112,8 @@ | |||
112 | #define __WEAK_LLSC_MB " \n" | 112 | #define __WEAK_LLSC_MB " \n" |
113 | #endif | 113 | #endif |
114 | 114 | ||
115 | #define set_mb(var, value) \ | 115 | #define smp_store_mb(var, value) \ |
116 | do { var = value; smp_mb(); } while (0) | 116 | do { WRITE_ONCE(var, value); smp_mb(); } while (0) |
117 | 117 | ||
118 | #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") | 118 | #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") |
119 | 119 | ||
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 412f945f1f5e..b71ab4a5fd50 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h | |||
@@ -138,8 +138,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
138 | __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ | 138 | __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ |
139 | }) | 139 | }) |
140 | 140 | ||
141 | #define __HAVE_ARCH_CMPXCHG 1 | ||
142 | |||
143 | #define __cmpxchg_asm(ld, st, m, old, new) \ | 141 | #define __cmpxchg_asm(ld, st, m, old, new) \ |
144 | ({ \ | 142 | ({ \ |
145 | __typeof(*(m)) __ret; \ | 143 | __typeof(*(m)) __ret; \ |
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index dbd13354ec41..0a90b965cccb 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h | |||
@@ -46,8 +46,6 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size) | |||
46 | #define xchg(ptr, x) \ | 46 | #define xchg(ptr, x) \ |
47 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) | 47 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) |
48 | 48 | ||
49 | #define __HAVE_ARCH_CMPXCHG 1 | ||
50 | |||
51 | /* bug catcher for when unsupported size is used - won't link */ | 49 | /* bug catcher for when unsupported size is used - won't link */ |
52 | extern void __cmpxchg_called_with_bad_pointer(void); | 50 | extern void __cmpxchg_called_with_bad_pointer(void); |
53 | 51 | ||
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 1124f59b8df4..51ccc7232042 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h | |||
@@ -34,7 +34,7 @@ | |||
34 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | 34 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") |
35 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | 35 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
36 | 36 | ||
37 | #define set_mb(var, value) do { var = value; mb(); } while (0) | 37 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) |
38 | 38 | ||
39 | #ifdef __SUBARCH_HAS_LWSYNC | 39 | #ifdef __SUBARCH_HAS_LWSYNC |
40 | # define SMPWMB LWSYNC | 40 | # define SMPWMB LWSYNC |
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index d463c68fe7f0..ad6263cffb0f 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h | |||
@@ -144,7 +144,6 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) | |||
144 | * Compare and exchange - if *p == old, set it to new, | 144 | * Compare and exchange - if *p == old, set it to new, |
145 | * and return the old value of *p. | 145 | * and return the old value of *p. |
146 | */ | 146 | */ |
147 | #define __HAVE_ARCH_CMPXCHG 1 | ||
148 | 147 | ||
149 | static __always_inline unsigned long | 148 | static __always_inline unsigned long |
150 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | 149 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) |
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 8d724718ec21..e6f8615a11eb 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h | |||
@@ -36,7 +36,7 @@ | |||
36 | #define smp_mb__before_atomic() smp_mb() | 36 | #define smp_mb__before_atomic() smp_mb() |
37 | #define smp_mb__after_atomic() smp_mb() | 37 | #define smp_mb__after_atomic() smp_mb() |
38 | 38 | ||
39 | #define set_mb(var, value) do { var = value; mb(); } while (0) | 39 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) |
40 | 40 | ||
41 | #define smp_store_release(p, v) \ | 41 | #define smp_store_release(p, v) \ |
42 | do { \ | 42 | do { \ |
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 4eadec466b8c..411464f4c97a 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h | |||
@@ -32,8 +32,6 @@ | |||
32 | __old; \ | 32 | __old; \ |
33 | }) | 33 | }) |
34 | 34 | ||
35 | #define __HAVE_ARCH_CMPXCHG | ||
36 | |||
37 | #define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ | 35 | #define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ |
38 | ({ \ | 36 | ({ \ |
39 | register __typeof__(*(p1)) __old1 asm("2") = (o1); \ | 37 | register __typeof__(*(p1)) __old1 asm("2") = (o1); \ |
diff --git a/arch/score/include/asm/cmpxchg.h b/arch/score/include/asm/cmpxchg.h index f384839c3ee5..cc3f6420b71c 100644 --- a/arch/score/include/asm/cmpxchg.h +++ b/arch/score/include/asm/cmpxchg.h | |||
@@ -42,8 +42,6 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m, | |||
42 | (unsigned long)(o), \ | 42 | (unsigned long)(o), \ |
43 | (unsigned long)(n))) | 43 | (unsigned long)(n))) |
44 | 44 | ||
45 | #define __HAVE_ARCH_CMPXCHG 1 | ||
46 | |||
47 | #include <asm-generic/cmpxchg-local.h> | 45 | #include <asm-generic/cmpxchg-local.h> |
48 | 46 | ||
49 | #endif /* _ASM_SCORE_CMPXCHG_H */ | 47 | #endif /* _ASM_SCORE_CMPXCHG_H */ |
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index 43715308b068..bf91037db4e0 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") | 32 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 35 | #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
36 | 36 | ||
37 | #include <asm-generic/barrier.h> | 37 | #include <asm-generic/barrier.h> |
38 | 38 | ||
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h index f6bd1406b897..85c97b188d71 100644 --- a/arch/sh/include/asm/cmpxchg.h +++ b/arch/sh/include/asm/cmpxchg.h | |||
@@ -46,8 +46,6 @@ extern void __xchg_called_with_bad_pointer(void); | |||
46 | * if something tries to do an invalid cmpxchg(). */ | 46 | * if something tries to do an invalid cmpxchg(). */ |
47 | extern void __cmpxchg_called_with_bad_pointer(void); | 47 | extern void __cmpxchg_called_with_bad_pointer(void); |
48 | 48 | ||
49 | #define __HAVE_ARCH_CMPXCHG 1 | ||
50 | |||
51 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | 49 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, |
52 | unsigned long new, int size) | 50 | unsigned long new, int size) |
53 | { | 51 | { |
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 76648941fea7..809941e33e12 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h | |||
@@ -40,8 +40,8 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | |||
40 | #define dma_rmb() rmb() | 40 | #define dma_rmb() rmb() |
41 | #define dma_wmb() wmb() | 41 | #define dma_wmb() wmb() |
42 | 42 | ||
43 | #define set_mb(__var, __value) \ | 43 | #define smp_store_mb(__var, __value) \ |
44 | do { __var = __value; membar_safe("#StoreLoad"); } while(0) | 44 | do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) |
45 | 45 | ||
46 | #ifdef CONFIG_SMP | 46 | #ifdef CONFIG_SMP |
47 | #define smp_mb() mb() | 47 | #define smp_mb() mb() |
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index d38b52dca216..83ffb83c5397 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h | |||
@@ -34,7 +34,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int | |||
34 | * | 34 | * |
35 | * Cribbed from <asm-parisc/atomic.h> | 35 | * Cribbed from <asm-parisc/atomic.h> |
36 | */ | 36 | */ |
37 | #define __HAVE_ARCH_CMPXCHG 1 | ||
38 | 37 | ||
39 | /* bug catcher for when unsupported size is used - won't link */ | 38 | /* bug catcher for when unsupported size is used - won't link */ |
40 | void __cmpxchg_called_with_bad_pointer(void); | 39 | void __cmpxchg_called_with_bad_pointer(void); |
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 0e1ed6cfbf68..faa2f61058c2 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h | |||
@@ -65,8 +65,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, | |||
65 | 65 | ||
66 | #include <asm-generic/cmpxchg-local.h> | 66 | #include <asm-generic/cmpxchg-local.h> |
67 | 67 | ||
68 | #define __HAVE_ARCH_CMPXCHG 1 | ||
69 | |||
70 | static inline unsigned long | 68 | static inline unsigned long |
71 | __cmpxchg_u32(volatile int *m, int old, int new) | 69 | __cmpxchg_u32(volatile int *m, int old, int new) |
72 | { | 70 | { |
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 7b11c5fadd42..0496970cef82 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h | |||
@@ -105,9 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) | |||
105 | 105 | ||
106 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 106 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
107 | 107 | ||
108 | /* Define this to indicate that cmpxchg is an efficient operation. */ | ||
109 | #define __HAVE_ARCH_CMPXCHG | ||
110 | |||
111 | #endif /* !__ASSEMBLY__ */ | 108 | #endif /* !__ASSEMBLY__ */ |
112 | 109 | ||
113 | #endif /* _ASM_TILE_ATOMIC_64_H */ | 110 | #endif /* _ASM_TILE_ATOMIC_64_H */ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 226d5696e1d1..4e986e809861 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -127,7 +127,8 @@ config X86 | |||
127 | select MODULES_USE_ELF_RELA if X86_64 | 127 | select MODULES_USE_ELF_RELA if X86_64 |
128 | select CLONE_BACKWARDS if X86_32 | 128 | select CLONE_BACKWARDS if X86_32 |
129 | select ARCH_USE_BUILTIN_BSWAP | 129 | select ARCH_USE_BUILTIN_BSWAP |
130 | select ARCH_USE_QUEUE_RWLOCK | 130 | select ARCH_USE_QUEUED_SPINLOCKS |
131 | select ARCH_USE_QUEUED_RWLOCKS | ||
131 | select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION | 132 | select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION |
132 | select OLD_SIGACTION if X86_32 | 133 | select OLD_SIGACTION if X86_32 |
133 | select COMPAT_OLD_SIGACTION if IA32_EMULATION | 134 | select COMPAT_OLD_SIGACTION if IA32_EMULATION |
@@ -666,7 +667,7 @@ config PARAVIRT_DEBUG | |||
666 | config PARAVIRT_SPINLOCKS | 667 | config PARAVIRT_SPINLOCKS |
667 | bool "Paravirtualization layer for spinlocks" | 668 | bool "Paravirtualization layer for spinlocks" |
668 | depends on PARAVIRT && SMP | 669 | depends on PARAVIRT && SMP |
669 | select UNINLINE_SPIN_UNLOCK | 670 | select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS |
670 | ---help--- | 671 | ---help--- |
671 | Paravirtualized spinlocks allow a pvops backend to replace the | 672 | Paravirtualized spinlocks allow a pvops backend to replace the |
672 | spinlock implementation with something virtualization-friendly | 673 | spinlock implementation with something virtualization-friendly |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 959e45b81fe2..e51a8f803f55 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
@@ -35,12 +35,12 @@ | |||
35 | #define smp_mb() mb() | 35 | #define smp_mb() mb() |
36 | #define smp_rmb() dma_rmb() | 36 | #define smp_rmb() dma_rmb() |
37 | #define smp_wmb() barrier() | 37 | #define smp_wmb() barrier() |
38 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 38 | #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
39 | #else /* !SMP */ | 39 | #else /* !SMP */ |
40 | #define smp_mb() barrier() | 40 | #define smp_mb() barrier() |
41 | #define smp_rmb() barrier() | 41 | #define smp_rmb() barrier() |
42 | #define smp_wmb() barrier() | 42 | #define smp_wmb() barrier() |
43 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 43 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) |
44 | #endif /* SMP */ | 44 | #endif /* SMP */ |
45 | 45 | ||
46 | #define read_barrier_depends() do { } while (0) | 46 | #define read_barrier_depends() do { } while (0) |
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 99c105d78b7e..ad19841eddfe 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | 5 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
6 | 6 | ||
7 | #define __HAVE_ARCH_CMPXCHG 1 | ||
8 | |||
9 | /* | 7 | /* |
10 | * Non-existant functions to indicate usage errors at link time | 8 | * Non-existant functions to indicate usage errors at link time |
11 | * (or compile-time if the compiler implements __compiletime_error(). | 9 | * (or compile-time if the compiler implements __compiletime_error(). |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 8957810ad7d1..d143bfad45d7 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -712,6 +712,31 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
712 | 712 | ||
713 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 713 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
714 | 714 | ||
715 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
716 | |||
717 | static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, | ||
718 | u32 val) | ||
719 | { | ||
720 | PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val); | ||
721 | } | ||
722 | |||
723 | static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) | ||
724 | { | ||
725 | PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock); | ||
726 | } | ||
727 | |||
728 | static __always_inline void pv_wait(u8 *ptr, u8 val) | ||
729 | { | ||
730 | PVOP_VCALL2(pv_lock_ops.wait, ptr, val); | ||
731 | } | ||
732 | |||
733 | static __always_inline void pv_kick(int cpu) | ||
734 | { | ||
735 | PVOP_VCALL1(pv_lock_ops.kick, cpu); | ||
736 | } | ||
737 | |||
738 | #else /* !CONFIG_QUEUED_SPINLOCKS */ | ||
739 | |||
715 | static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, | 740 | static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, |
716 | __ticket_t ticket) | 741 | __ticket_t ticket) |
717 | { | 742 | { |
@@ -724,7 +749,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, | |||
724 | PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); | 749 | PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); |
725 | } | 750 | } |
726 | 751 | ||
727 | #endif | 752 | #endif /* CONFIG_QUEUED_SPINLOCKS */ |
753 | |||
754 | #endif /* SMP && PARAVIRT_SPINLOCKS */ | ||
728 | 755 | ||
729 | #ifdef CONFIG_X86_32 | 756 | #ifdef CONFIG_X86_32 |
730 | #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" | 757 | #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index f7b0b5c112f2..8766c7c395c2 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -333,9 +333,19 @@ struct arch_spinlock; | |||
333 | typedef u16 __ticket_t; | 333 | typedef u16 __ticket_t; |
334 | #endif | 334 | #endif |
335 | 335 | ||
336 | struct qspinlock; | ||
337 | |||
336 | struct pv_lock_ops { | 338 | struct pv_lock_ops { |
339 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
340 | void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); | ||
341 | struct paravirt_callee_save queued_spin_unlock; | ||
342 | |||
343 | void (*wait)(u8 *ptr, u8 val); | ||
344 | void (*kick)(int cpu); | ||
345 | #else /* !CONFIG_QUEUED_SPINLOCKS */ | ||
337 | struct paravirt_callee_save lock_spinning; | 346 | struct paravirt_callee_save lock_spinning; |
338 | void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); | 347 | void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); |
348 | #endif /* !CONFIG_QUEUED_SPINLOCKS */ | ||
339 | }; | 349 | }; |
340 | 350 | ||
341 | /* This contains all the paravirt structures: we get a convenient | 351 | /* This contains all the paravirt structures: we get a convenient |
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h new file mode 100644 index 000000000000..9d51fae1cba3 --- /dev/null +++ b/arch/x86/include/asm/qspinlock.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef _ASM_X86_QSPINLOCK_H | ||
2 | #define _ASM_X86_QSPINLOCK_H | ||
3 | |||
4 | #include <asm/cpufeature.h> | ||
5 | #include <asm-generic/qspinlock_types.h> | ||
6 | #include <asm/paravirt.h> | ||
7 | |||
8 | #define queued_spin_unlock queued_spin_unlock | ||
9 | /** | ||
10 | * queued_spin_unlock - release a queued spinlock | ||
11 | * @lock : Pointer to queued spinlock structure | ||
12 | * | ||
13 | * A smp_store_release() on the least-significant byte. | ||
14 | */ | ||
15 | static inline void native_queued_spin_unlock(struct qspinlock *lock) | ||
16 | { | ||
17 | smp_store_release((u8 *)lock, 0); | ||
18 | } | ||
19 | |||
20 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | ||
21 | extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | ||
22 | extern void __pv_init_lock_hash(void); | ||
23 | extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | ||
24 | extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); | ||
25 | |||
26 | static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | ||
27 | { | ||
28 | pv_queued_spin_lock_slowpath(lock, val); | ||
29 | } | ||
30 | |||
31 | static inline void queued_spin_unlock(struct qspinlock *lock) | ||
32 | { | ||
33 | pv_queued_spin_unlock(lock); | ||
34 | } | ||
35 | #else | ||
36 | static inline void queued_spin_unlock(struct qspinlock *lock) | ||
37 | { | ||
38 | native_queued_spin_unlock(lock); | ||
39 | } | ||
40 | #endif | ||
41 | |||
42 | #define virt_queued_spin_lock virt_queued_spin_lock | ||
43 | |||
44 | static inline bool virt_queued_spin_lock(struct qspinlock *lock) | ||
45 | { | ||
46 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | ||
47 | return false; | ||
48 | |||
49 | while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) | ||
50 | cpu_relax(); | ||
51 | |||
52 | return true; | ||
53 | } | ||
54 | |||
55 | #include <asm-generic/qspinlock.h> | ||
56 | |||
57 | #endif /* _ASM_X86_QSPINLOCK_H */ | ||
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h new file mode 100644 index 000000000000..b002e711ba88 --- /dev/null +++ b/arch/x86/include/asm/qspinlock_paravirt.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ASM_QSPINLOCK_PARAVIRT_H | ||
2 | #define __ASM_QSPINLOCK_PARAVIRT_H | ||
3 | |||
4 | PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock); | ||
5 | |||
6 | #endif | ||
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 64b611782ef0..be0a05913b91 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -42,6 +42,10 @@ | |||
42 | extern struct static_key paravirt_ticketlocks_enabled; | 42 | extern struct static_key paravirt_ticketlocks_enabled; |
43 | static __always_inline bool static_key_false(struct static_key *key); | 43 | static __always_inline bool static_key_false(struct static_key *key); |
44 | 44 | ||
45 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
46 | #include <asm/qspinlock.h> | ||
47 | #else | ||
48 | |||
45 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | 49 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
46 | 50 | ||
47 | static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) | 51 | static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) |
@@ -196,6 +200,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
196 | cpu_relax(); | 200 | cpu_relax(); |
197 | } | 201 | } |
198 | } | 202 | } |
203 | #endif /* CONFIG_QUEUED_SPINLOCKS */ | ||
199 | 204 | ||
200 | /* | 205 | /* |
201 | * Read-write spinlocks, allowing multiple readers | 206 | * Read-write spinlocks, allowing multiple readers |
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 5f9d7572d82b..65c3e37f879a 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h | |||
@@ -23,6 +23,9 @@ typedef u32 __ticketpair_t; | |||
23 | 23 | ||
24 | #define TICKET_SHIFT (sizeof(__ticket_t) * 8) | 24 | #define TICKET_SHIFT (sizeof(__ticket_t) * 8) |
25 | 25 | ||
26 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
27 | #include <asm-generic/qspinlock_types.h> | ||
28 | #else | ||
26 | typedef struct arch_spinlock { | 29 | typedef struct arch_spinlock { |
27 | union { | 30 | union { |
28 | __ticketpair_t head_tail; | 31 | __ticketpair_t head_tail; |
@@ -33,6 +36,7 @@ typedef struct arch_spinlock { | |||
33 | } arch_spinlock_t; | 36 | } arch_spinlock_t; |
34 | 37 | ||
35 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } | 38 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } |
39 | #endif /* CONFIG_QUEUED_SPINLOCKS */ | ||
36 | 40 | ||
37 | #include <asm-generic/qrwlock_types.h> | 41 | #include <asm-generic/qrwlock_types.h> |
38 | 42 | ||
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 9435620062df..1681504e44a4 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -584,6 +584,39 @@ static void kvm_kick_cpu(int cpu) | |||
584 | kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); | 584 | kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); |
585 | } | 585 | } |
586 | 586 | ||
587 | |||
588 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
589 | |||
590 | #include <asm/qspinlock.h> | ||
591 | |||
592 | static void kvm_wait(u8 *ptr, u8 val) | ||
593 | { | ||
594 | unsigned long flags; | ||
595 | |||
596 | if (in_nmi()) | ||
597 | return; | ||
598 | |||
599 | local_irq_save(flags); | ||
600 | |||
601 | if (READ_ONCE(*ptr) != val) | ||
602 | goto out; | ||
603 | |||
604 | /* | ||
605 | * halt until it's our turn and kicked. Note that we do safe halt | ||
606 | * for irq enabled case to avoid hang when lock info is overwritten | ||
607 | * in irq spinlock slowpath and no spurious interrupt occur to save us. | ||
608 | */ | ||
609 | if (arch_irqs_disabled_flags(flags)) | ||
610 | halt(); | ||
611 | else | ||
612 | safe_halt(); | ||
613 | |||
614 | out: | ||
615 | local_irq_restore(flags); | ||
616 | } | ||
617 | |||
618 | #else /* !CONFIG_QUEUED_SPINLOCKS */ | ||
619 | |||
587 | enum kvm_contention_stat { | 620 | enum kvm_contention_stat { |
588 | TAKEN_SLOW, | 621 | TAKEN_SLOW, |
589 | TAKEN_SLOW_PICKUP, | 622 | TAKEN_SLOW_PICKUP, |
@@ -817,6 +850,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket) | |||
817 | } | 850 | } |
818 | } | 851 | } |
819 | 852 | ||
853 | #endif /* !CONFIG_QUEUED_SPINLOCKS */ | ||
854 | |||
820 | /* | 855 | /* |
821 | * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. | 856 | * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. |
822 | */ | 857 | */ |
@@ -828,8 +863,16 @@ void __init kvm_spinlock_init(void) | |||
828 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | 863 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) |
829 | return; | 864 | return; |
830 | 865 | ||
866 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
867 | __pv_init_lock_hash(); | ||
868 | pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; | ||
869 | pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); | ||
870 | pv_lock_ops.wait = kvm_wait; | ||
871 | pv_lock_ops.kick = kvm_kick_cpu; | ||
872 | #else /* !CONFIG_QUEUED_SPINLOCKS */ | ||
831 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); | 873 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); |
832 | pv_lock_ops.unlock_kick = kvm_unlock_kick; | 874 | pv_lock_ops.unlock_kick = kvm_unlock_kick; |
875 | #endif | ||
833 | } | 876 | } |
834 | 877 | ||
835 | static __init int kvm_spinlock_init_jump(void) | 878 | static __init int kvm_spinlock_init_jump(void) |
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index bbb6c7316341..33ee3e0efd65 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c | |||
@@ -8,11 +8,33 @@ | |||
8 | 8 | ||
9 | #include <asm/paravirt.h> | 9 | #include <asm/paravirt.h> |
10 | 10 | ||
11 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
12 | __visible void __native_queued_spin_unlock(struct qspinlock *lock) | ||
13 | { | ||
14 | native_queued_spin_unlock(lock); | ||
15 | } | ||
16 | |||
17 | PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); | ||
18 | |||
19 | bool pv_is_native_spin_unlock(void) | ||
20 | { | ||
21 | return pv_lock_ops.queued_spin_unlock.func == | ||
22 | __raw_callee_save___native_queued_spin_unlock; | ||
23 | } | ||
24 | #endif | ||
25 | |||
11 | struct pv_lock_ops pv_lock_ops = { | 26 | struct pv_lock_ops pv_lock_ops = { |
12 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
28 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
29 | .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, | ||
30 | .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), | ||
31 | .wait = paravirt_nop, | ||
32 | .kick = paravirt_nop, | ||
33 | #else /* !CONFIG_QUEUED_SPINLOCKS */ | ||
13 | .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop), | 34 | .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop), |
14 | .unlock_kick = paravirt_nop, | 35 | .unlock_kick = paravirt_nop, |
15 | #endif | 36 | #endif /* !CONFIG_QUEUED_SPINLOCKS */ |
37 | #endif /* SMP */ | ||
16 | }; | 38 | }; |
17 | EXPORT_SYMBOL(pv_lock_ops); | 39 | EXPORT_SYMBOL(pv_lock_ops); |
18 | 40 | ||
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index d9f32e6d6ab6..e1b013696dde 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c | |||
@@ -12,6 +12,10 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); | |||
12 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); | 12 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); |
13 | DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); | 13 | DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc"); |
14 | 14 | ||
15 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) | ||
16 | DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)"); | ||
17 | #endif | ||
18 | |||
15 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) | 19 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) |
16 | { | 20 | { |
17 | /* arg in %eax, return in %eax */ | 21 | /* arg in %eax, return in %eax */ |
@@ -24,6 +28,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) | |||
24 | return 0; | 28 | return 0; |
25 | } | 29 | } |
26 | 30 | ||
31 | extern bool pv_is_native_spin_unlock(void); | ||
32 | |||
27 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | 33 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
28 | unsigned long addr, unsigned len) | 34 | unsigned long addr, unsigned len) |
29 | { | 35 | { |
@@ -47,14 +53,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
47 | PATCH_SITE(pv_mmu_ops, write_cr3); | 53 | PATCH_SITE(pv_mmu_ops, write_cr3); |
48 | PATCH_SITE(pv_cpu_ops, clts); | 54 | PATCH_SITE(pv_cpu_ops, clts); |
49 | PATCH_SITE(pv_cpu_ops, read_tsc); | 55 | PATCH_SITE(pv_cpu_ops, read_tsc); |
50 | 56 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) | |
51 | patch_site: | 57 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
52 | ret = paravirt_patch_insns(ibuf, len, start, end); | 58 | if (pv_is_native_spin_unlock()) { |
53 | break; | 59 | start = start_pv_lock_ops_queued_spin_unlock; |
60 | end = end_pv_lock_ops_queued_spin_unlock; | ||
61 | goto patch_site; | ||
62 | } | ||
63 | #endif | ||
54 | 64 | ||
55 | default: | 65 | default: |
56 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); | 66 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
57 | break; | 67 | break; |
68 | |||
69 | patch_site: | ||
70 | ret = paravirt_patch_insns(ibuf, len, start, end); | ||
71 | break; | ||
58 | } | 72 | } |
59 | #undef PATCH_SITE | 73 | #undef PATCH_SITE |
60 | return ret; | 74 | return ret; |
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index a1da6737ba5b..a1fa86782186 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c | |||
@@ -21,6 +21,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); | |||
21 | DEF_NATIVE(, mov32, "mov %edi, %eax"); | 21 | DEF_NATIVE(, mov32, "mov %edi, %eax"); |
22 | DEF_NATIVE(, mov64, "mov %rdi, %rax"); | 22 | DEF_NATIVE(, mov64, "mov %rdi, %rax"); |
23 | 23 | ||
24 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) | ||
25 | DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); | ||
26 | #endif | ||
27 | |||
24 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) | 28 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) |
25 | { | 29 | { |
26 | return paravirt_patch_insns(insnbuf, len, | 30 | return paravirt_patch_insns(insnbuf, len, |
@@ -33,6 +37,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) | |||
33 | start__mov64, end__mov64); | 37 | start__mov64, end__mov64); |
34 | } | 38 | } |
35 | 39 | ||
40 | extern bool pv_is_native_spin_unlock(void); | ||
41 | |||
36 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | 42 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
37 | unsigned long addr, unsigned len) | 43 | unsigned long addr, unsigned len) |
38 | { | 44 | { |
@@ -59,14 +65,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
59 | PATCH_SITE(pv_cpu_ops, clts); | 65 | PATCH_SITE(pv_cpu_ops, clts); |
60 | PATCH_SITE(pv_mmu_ops, flush_tlb_single); | 66 | PATCH_SITE(pv_mmu_ops, flush_tlb_single); |
61 | PATCH_SITE(pv_cpu_ops, wbinvd); | 67 | PATCH_SITE(pv_cpu_ops, wbinvd); |
62 | 68 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) | |
63 | patch_site: | 69 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
64 | ret = paravirt_patch_insns(ibuf, len, start, end); | 70 | if (pv_is_native_spin_unlock()) { |
65 | break; | 71 | start = start_pv_lock_ops_queued_spin_unlock; |
72 | end = end_pv_lock_ops_queued_spin_unlock; | ||
73 | goto patch_site; | ||
74 | } | ||
75 | #endif | ||
66 | 76 | ||
67 | default: | 77 | default: |
68 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); | 78 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
69 | break; | 79 | break; |
80 | |||
81 | patch_site: | ||
82 | ret = paravirt_patch_insns(ibuf, len, start, end); | ||
83 | break; | ||
70 | } | 84 | } |
71 | #undef PATCH_SITE | 85 | #undef PATCH_SITE |
72 | return ret; | 86 | return ret; |
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 7e8a1a650435..b9531d343134 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h | |||
@@ -39,7 +39,8 @@ | |||
39 | #define smp_mb() barrier() | 39 | #define smp_mb() barrier() |
40 | #define smp_rmb() barrier() | 40 | #define smp_rmb() barrier() |
41 | #define smp_wmb() barrier() | 41 | #define smp_wmb() barrier() |
42 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 42 | |
43 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) | ||
43 | 44 | ||
44 | #define read_barrier_depends() do { } while (0) | 45 | #define read_barrier_depends() do { } while (0) |
45 | #define smp_read_barrier_depends() do { } while (0) | 46 | #define smp_read_barrier_depends() do { } while (0) |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 956374c1edbc..9e2ba5c6e1dd 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -17,6 +17,56 @@ | |||
17 | #include "xen-ops.h" | 17 | #include "xen-ops.h" |
18 | #include "debugfs.h" | 18 | #include "debugfs.h" |
19 | 19 | ||
20 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | ||
21 | static DEFINE_PER_CPU(char *, irq_name); | ||
22 | static bool xen_pvspin = true; | ||
23 | |||
24 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
25 | |||
26 | #include <asm/qspinlock.h> | ||
27 | |||
28 | static void xen_qlock_kick(int cpu) | ||
29 | { | ||
30 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * Halt the current CPU & release it back to the host | ||
35 | */ | ||
36 | static void xen_qlock_wait(u8 *byte, u8 val) | ||
37 | { | ||
38 | int irq = __this_cpu_read(lock_kicker_irq); | ||
39 | |||
40 | /* If kicker interrupts not initialized yet, just spin */ | ||
41 | if (irq == -1) | ||
42 | return; | ||
43 | |||
44 | /* clear pending */ | ||
45 | xen_clear_irq_pending(irq); | ||
46 | barrier(); | ||
47 | |||
48 | /* | ||
49 | * We check the byte value after clearing pending IRQ to make sure | ||
50 | * that we won't miss a wakeup event because of the clearing. | ||
51 | * | ||
52 | * The sync_clear_bit() call in xen_clear_irq_pending() is atomic. | ||
53 | * So it is effectively a memory barrier for x86. | ||
54 | */ | ||
55 | if (READ_ONCE(*byte) != val) | ||
56 | return; | ||
57 | |||
58 | /* | ||
59 | * If an interrupt happens here, it will leave the wakeup irq | ||
60 | * pending, which will cause xen_poll_irq() to return | ||
61 | * immediately. | ||
62 | */ | ||
63 | |||
64 | /* Block until irq becomes pending (or perhaps a spurious wakeup) */ | ||
65 | xen_poll_irq(irq); | ||
66 | } | ||
67 | |||
68 | #else /* CONFIG_QUEUED_SPINLOCKS */ | ||
69 | |||
20 | enum xen_contention_stat { | 70 | enum xen_contention_stat { |
21 | TAKEN_SLOW, | 71 | TAKEN_SLOW, |
22 | TAKEN_SLOW_PICKUP, | 72 | TAKEN_SLOW_PICKUP, |
@@ -100,12 +150,9 @@ struct xen_lock_waiting { | |||
100 | __ticket_t want; | 150 | __ticket_t want; |
101 | }; | 151 | }; |
102 | 152 | ||
103 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | ||
104 | static DEFINE_PER_CPU(char *, irq_name); | ||
105 | static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); | 153 | static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); |
106 | static cpumask_t waiting_cpus; | 154 | static cpumask_t waiting_cpus; |
107 | 155 | ||
108 | static bool xen_pvspin = true; | ||
109 | __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | 156 | __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) |
110 | { | 157 | { |
111 | int irq = __this_cpu_read(lock_kicker_irq); | 158 | int irq = __this_cpu_read(lock_kicker_irq); |
@@ -217,6 +264,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) | |||
217 | } | 264 | } |
218 | } | 265 | } |
219 | } | 266 | } |
267 | #endif /* CONFIG_QUEUED_SPINLOCKS */ | ||
220 | 268 | ||
221 | static irqreturn_t dummy_handler(int irq, void *dev_id) | 269 | static irqreturn_t dummy_handler(int irq, void *dev_id) |
222 | { | 270 | { |
@@ -280,8 +328,16 @@ void __init xen_init_spinlocks(void) | |||
280 | return; | 328 | return; |
281 | } | 329 | } |
282 | printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); | 330 | printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); |
331 | #ifdef CONFIG_QUEUED_SPINLOCKS | ||
332 | __pv_init_lock_hash(); | ||
333 | pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; | ||
334 | pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); | ||
335 | pv_lock_ops.wait = xen_qlock_wait; | ||
336 | pv_lock_ops.kick = xen_qlock_kick; | ||
337 | #else | ||
283 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); | 338 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); |
284 | pv_lock_ops.unlock_kick = xen_unlock_kick; | 339 | pv_lock_ops.unlock_kick = xen_unlock_kick; |
340 | #endif | ||
285 | } | 341 | } |
286 | 342 | ||
287 | /* | 343 | /* |
@@ -310,7 +366,7 @@ static __init int xen_parse_nopvspin(char *arg) | |||
310 | } | 366 | } |
311 | early_param("xen_nopvspin", xen_parse_nopvspin); | 367 | early_param("xen_nopvspin", xen_parse_nopvspin); |
312 | 368 | ||
313 | #ifdef CONFIG_XEN_DEBUG_FS | 369 | #if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS) |
314 | 370 | ||
315 | static struct dentry *d_spin_debug; | 371 | static struct dentry *d_spin_debug; |
316 | 372 | ||
diff --git a/fs/select.c b/fs/select.c index f684c750e08a..015547330e88 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -189,7 +189,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
189 | * doesn't imply write barrier and the users expect write | 189 | * doesn't imply write barrier and the users expect write |
190 | * barrier semantics on wakeup functions. The following | 190 | * barrier semantics on wakeup functions. The following |
191 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() | 191 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() |
192 | * and is paired with set_mb() in poll_schedule_timeout. | 192 | * and is paired with smp_store_mb() in poll_schedule_timeout. |
193 | */ | 193 | */ |
194 | smp_wmb(); | 194 | smp_wmb(); |
195 | pwq->triggered = 1; | 195 | pwq->triggered = 1; |
@@ -244,7 +244,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state, | |||
244 | /* | 244 | /* |
245 | * Prepare for the next iteration. | 245 | * Prepare for the next iteration. |
246 | * | 246 | * |
247 | * The following set_mb() serves two purposes. First, it's | 247 | * The following smp_store_mb() serves two purposes. First, it's |
248 | * the counterpart rmb of the wmb in pollwake() such that data | 248 | * the counterpart rmb of the wmb in pollwake() such that data |
249 | * written before wake up is always visible after wake up. | 249 | * written before wake up is always visible after wake up. |
250 | * Second, the full barrier guarantees that triggered clearing | 250 | * Second, the full barrier guarantees that triggered clearing |
@@ -252,7 +252,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state, | |||
252 | * this problem doesn't exist for the first iteration as | 252 | * this problem doesn't exist for the first iteration as |
253 | * add_wait_queue() has full barrier semantics. | 253 | * add_wait_queue() has full barrier semantics. |
254 | */ | 254 | */ |
255 | set_mb(pwq->triggered, 0); | 255 | smp_store_mb(pwq->triggered, 0); |
256 | 256 | ||
257 | return rc; | 257 | return rc; |
258 | } | 258 | } |
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index f5c40b0fadc2..e6a83d712ef6 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h | |||
@@ -66,8 +66,8 @@ | |||
66 | #define smp_read_barrier_depends() do { } while (0) | 66 | #define smp_read_barrier_depends() do { } while (0) |
67 | #endif | 67 | #endif |
68 | 68 | ||
69 | #ifndef set_mb | 69 | #ifndef smp_store_mb |
70 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | 70 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #ifndef smp_mb__before_atomic | 73 | #ifndef smp_mb__before_atomic |
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index 811fb1e9b061..3766ab34aa45 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h | |||
@@ -86,9 +86,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |||
86 | 86 | ||
87 | /* | 87 | /* |
88 | * Atomic compare and exchange. | 88 | * Atomic compare and exchange. |
89 | * | ||
90 | * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether | ||
91 | * a cmpxchg primitive faster than repeated local irq save/restore exists. | ||
92 | */ | 89 | */ |
93 | #include <asm-generic/cmpxchg-local.h> | 90 | #include <asm-generic/cmpxchg-local.h> |
94 | 91 | ||
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h new file mode 100644 index 000000000000..83bfb87f5bf1 --- /dev/null +++ b/include/asm-generic/qspinlock.h | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * Queued spinlock | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. | ||
15 | * | ||
16 | * Authors: Waiman Long <waiman.long@hp.com> | ||
17 | */ | ||
18 | #ifndef __ASM_GENERIC_QSPINLOCK_H | ||
19 | #define __ASM_GENERIC_QSPINLOCK_H | ||
20 | |||
21 | #include <asm-generic/qspinlock_types.h> | ||
22 | |||
23 | /** | ||
24 | * queued_spin_is_locked - is the spinlock locked? | ||
25 | * @lock: Pointer to queued spinlock structure | ||
26 | * Return: 1 if it is locked, 0 otherwise | ||
27 | */ | ||
28 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) | ||
29 | { | ||
30 | return atomic_read(&lock->val); | ||
31 | } | ||
32 | |||
33 | /** | ||
34 | * queued_spin_value_unlocked - is the spinlock structure unlocked? | ||
35 | * @lock: queued spinlock structure | ||
36 | * Return: 1 if it is unlocked, 0 otherwise | ||
37 | * | ||
38 | * N.B. Whenever there are tasks waiting for the lock, it is considered | ||
39 | * locked wrt the lockref code to avoid lock stealing by the lockref | ||
40 | * code and change things underneath the lock. This also allows some | ||
41 | * optimizations to be applied without conflict with lockref. | ||
42 | */ | ||
43 | static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) | ||
44 | { | ||
45 | return !atomic_read(&lock.val); | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * queued_spin_is_contended - check if the lock is contended | ||
50 | * @lock : Pointer to queued spinlock structure | ||
51 | * Return: 1 if lock contended, 0 otherwise | ||
52 | */ | ||
53 | static __always_inline int queued_spin_is_contended(struct qspinlock *lock) | ||
54 | { | ||
55 | return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; | ||
56 | } | ||
57 | /** | ||
58 | * queued_spin_trylock - try to acquire the queued spinlock | ||
59 | * @lock : Pointer to queued spinlock structure | ||
60 | * Return: 1 if lock acquired, 0 if failed | ||
61 | */ | ||
62 | static __always_inline int queued_spin_trylock(struct qspinlock *lock) | ||
63 | { | ||
64 | if (!atomic_read(&lock->val) && | ||
65 | (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0)) | ||
66 | return 1; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | ||
71 | |||
72 | /** | ||
73 | * queued_spin_lock - acquire a queued spinlock | ||
74 | * @lock: Pointer to queued spinlock structure | ||
75 | */ | ||
76 | static __always_inline void queued_spin_lock(struct qspinlock *lock) | ||
77 | { | ||
78 | u32 val; | ||
79 | |||
80 | val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); | ||
81 | if (likely(val == 0)) | ||
82 | return; | ||
83 | queued_spin_lock_slowpath(lock, val); | ||
84 | } | ||
85 | |||
86 | #ifndef queued_spin_unlock | ||
87 | /** | ||
88 | * queued_spin_unlock - release a queued spinlock | ||
89 | * @lock : Pointer to queued spinlock structure | ||
90 | */ | ||
91 | static __always_inline void queued_spin_unlock(struct qspinlock *lock) | ||
92 | { | ||
93 | /* | ||
94 | * smp_mb__before_atomic() in order to guarantee release semantics | ||
95 | */ | ||
96 | smp_mb__before_atomic_dec(); | ||
97 | atomic_sub(_Q_LOCKED_VAL, &lock->val); | ||
98 | } | ||
99 | #endif | ||
100 | |||
101 | /** | ||
102 | * queued_spin_unlock_wait - wait until current lock holder releases the lock | ||
103 | * @lock : Pointer to queued spinlock structure | ||
104 | * | ||
105 | * There is a very slight possibility of live-lock if the lockers keep coming | ||
106 | * and the waiter is just unfortunate enough to not see any unlock state. | ||
107 | */ | ||
108 | static inline void queued_spin_unlock_wait(struct qspinlock *lock) | ||
109 | { | ||
110 | while (atomic_read(&lock->val) & _Q_LOCKED_MASK) | ||
111 | cpu_relax(); | ||
112 | } | ||
113 | |||
114 | #ifndef virt_queued_spin_lock | ||
115 | static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) | ||
116 | { | ||
117 | return false; | ||
118 | } | ||
119 | #endif | ||
120 | |||
121 | /* | ||
122 | * Initializier | ||
123 | */ | ||
124 | #define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } | ||
125 | |||
126 | /* | ||
127 | * Remapping spinlock architecture specific functions to the corresponding | ||
128 | * queued spinlock functions. | ||
129 | */ | ||
130 | #define arch_spin_is_locked(l) queued_spin_is_locked(l) | ||
131 | #define arch_spin_is_contended(l) queued_spin_is_contended(l) | ||
132 | #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) | ||
133 | #define arch_spin_lock(l) queued_spin_lock(l) | ||
134 | #define arch_spin_trylock(l) queued_spin_trylock(l) | ||
135 | #define arch_spin_unlock(l) queued_spin_unlock(l) | ||
136 | #define arch_spin_lock_flags(l, f) queued_spin_lock(l) | ||
137 | #define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l) | ||
138 | |||
139 | #endif /* __ASM_GENERIC_QSPINLOCK_H */ | ||
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h new file mode 100644 index 000000000000..85f888e86761 --- /dev/null +++ b/include/asm-generic/qspinlock_types.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Queued spinlock | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. | ||
15 | * | ||
16 | * Authors: Waiman Long <waiman.long@hp.com> | ||
17 | */ | ||
18 | #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H | ||
19 | #define __ASM_GENERIC_QSPINLOCK_TYPES_H | ||
20 | |||
21 | /* | ||
22 | * Including atomic.h with PARAVIRT on will cause compilation errors because | ||
23 | * of recursive header file incluson via paravirt_types.h. So don't include | ||
24 | * it if PARAVIRT is on. | ||
25 | */ | ||
26 | #ifndef CONFIG_PARAVIRT | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/atomic.h> | ||
29 | #endif | ||
30 | |||
31 | typedef struct qspinlock { | ||
32 | atomic_t val; | ||
33 | } arch_spinlock_t; | ||
34 | |||
35 | /* | ||
36 | * Bitfields in the atomic value: | ||
37 | * | ||
38 | * When NR_CPUS < 16K | ||
39 | * 0- 7: locked byte | ||
40 | * 8: pending | ||
41 | * 9-15: not used | ||
42 | * 16-17: tail index | ||
43 | * 18-31: tail cpu (+1) | ||
44 | * | ||
45 | * When NR_CPUS >= 16K | ||
46 | * 0- 7: locked byte | ||
47 | * 8: pending | ||
48 | * 9-10: tail index | ||
49 | * 11-31: tail cpu (+1) | ||
50 | */ | ||
51 | #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ | ||
52 | << _Q_ ## type ## _OFFSET) | ||
53 | #define _Q_LOCKED_OFFSET 0 | ||
54 | #define _Q_LOCKED_BITS 8 | ||
55 | #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) | ||
56 | |||
57 | #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) | ||
58 | #if CONFIG_NR_CPUS < (1U << 14) | ||
59 | #define _Q_PENDING_BITS 8 | ||
60 | #else | ||
61 | #define _Q_PENDING_BITS 1 | ||
62 | #endif | ||
63 | #define _Q_PENDING_MASK _Q_SET_MASK(PENDING) | ||
64 | |||
65 | #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) | ||
66 | #define _Q_TAIL_IDX_BITS 2 | ||
67 | #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) | ||
68 | |||
69 | #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) | ||
70 | #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) | ||
71 | #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) | ||
72 | |||
73 | #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET | ||
74 | #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) | ||
75 | |||
76 | #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) | ||
77 | #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) | ||
78 | |||
79 | #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5d66777914db..05be2352fef8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -250,7 +250,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
250 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) | 250 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) |
251 | 251 | ||
252 | #define WRITE_ONCE(x, val) \ | 252 | #define WRITE_ONCE(x, val) \ |
253 | ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; }) | 253 | ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) |
254 | 254 | ||
255 | /** | 255 | /** |
256 | * READ_ONCE_CTRL - Read a value heading a control dependency | 256 | * READ_ONCE_CTRL - Read a value heading a control dependency |
@@ -466,7 +466,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
466 | * with an explicit memory barrier or atomic instruction that provides the | 466 | * with an explicit memory barrier or atomic instruction that provides the |
467 | * required ordering. | 467 | * required ordering. |
468 | * | 468 | * |
469 | * If possible use READ_ONCE/ASSIGN_ONCE instead. | 469 | * If possible use READ_ONCE()/WRITE_ONCE() instead. |
470 | */ | 470 | */ |
471 | #define __ACCESS_ONCE(x) ({ \ | 471 | #define __ACCESS_ONCE(x) ({ \ |
472 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ | 472 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 066ba4157541..2722111591a3 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -130,8 +130,8 @@ enum bounce_type { | |||
130 | }; | 130 | }; |
131 | 131 | ||
132 | struct lock_class_stats { | 132 | struct lock_class_stats { |
133 | unsigned long contention_point[4]; | 133 | unsigned long contention_point[LOCKSTAT_POINTS]; |
134 | unsigned long contending_point[4]; | 134 | unsigned long contending_point[LOCKSTAT_POINTS]; |
135 | struct lock_time read_waittime; | 135 | struct lock_time read_waittime; |
136 | struct lock_time write_waittime; | 136 | struct lock_time write_waittime; |
137 | struct lock_time read_holdtime; | 137 | struct lock_time read_holdtime; |
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h index 3a6490e81b28..703ea5c30a33 100644 --- a/include/linux/osq_lock.h +++ b/include/linux/osq_lock.h | |||
@@ -32,4 +32,9 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock) | |||
32 | extern bool osq_lock(struct optimistic_spin_queue *lock); | 32 | extern bool osq_lock(struct optimistic_spin_queue *lock); |
33 | extern void osq_unlock(struct optimistic_spin_queue *lock); | 33 | extern void osq_unlock(struct optimistic_spin_queue *lock); |
34 | 34 | ||
35 | static inline bool osq_is_locked(struct optimistic_spin_queue *lock) | ||
36 | { | ||
37 | return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; | ||
38 | } | ||
39 | |||
35 | #endif | 40 | #endif |
diff --git a/include/linux/sched.h b/include/linux/sched.h index a1158c954f0f..8ca95f6a9395 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -253,7 +253,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
253 | #define set_task_state(tsk, state_value) \ | 253 | #define set_task_state(tsk, state_value) \ |
254 | do { \ | 254 | do { \ |
255 | (tsk)->task_state_change = _THIS_IP_; \ | 255 | (tsk)->task_state_change = _THIS_IP_; \ |
256 | set_mb((tsk)->state, (state_value)); \ | 256 | smp_store_mb((tsk)->state, (state_value)); \ |
257 | } while (0) | 257 | } while (0) |
258 | 258 | ||
259 | /* | 259 | /* |
@@ -275,7 +275,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
275 | #define set_current_state(state_value) \ | 275 | #define set_current_state(state_value) \ |
276 | do { \ | 276 | do { \ |
277 | current->task_state_change = _THIS_IP_; \ | 277 | current->task_state_change = _THIS_IP_; \ |
278 | set_mb(current->state, (state_value)); \ | 278 | smp_store_mb(current->state, (state_value)); \ |
279 | } while (0) | 279 | } while (0) |
280 | 280 | ||
281 | #else | 281 | #else |
@@ -283,7 +283,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
283 | #define __set_task_state(tsk, state_value) \ | 283 | #define __set_task_state(tsk, state_value) \ |
284 | do { (tsk)->state = (state_value); } while (0) | 284 | do { (tsk)->state = (state_value); } while (0) |
285 | #define set_task_state(tsk, state_value) \ | 285 | #define set_task_state(tsk, state_value) \ |
286 | set_mb((tsk)->state, (state_value)) | 286 | smp_store_mb((tsk)->state, (state_value)) |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * set_current_state() includes a barrier so that the write of current->state | 289 | * set_current_state() includes a barrier so that the write of current->state |
@@ -299,7 +299,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
299 | #define __set_current_state(state_value) \ | 299 | #define __set_current_state(state_value) \ |
300 | do { current->state = (state_value); } while (0) | 300 | do { current->state = (state_value); } while (0) |
301 | #define set_current_state(state_value) \ | 301 | #define set_current_state(state_value) \ |
302 | set_mb(current->state, (state_value)) | 302 | smp_store_mb(current->state, (state_value)) |
303 | 303 | ||
304 | #endif | 304 | #endif |
305 | 305 | ||
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index 08561f1acd13..ebdb0043203a 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks | |||
@@ -235,9 +235,16 @@ config LOCK_SPIN_ON_OWNER | |||
235 | def_bool y | 235 | def_bool y |
236 | depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER | 236 | depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER |
237 | 237 | ||
238 | config ARCH_USE_QUEUE_RWLOCK | 238 | config ARCH_USE_QUEUED_SPINLOCKS |
239 | bool | 239 | bool |
240 | 240 | ||
241 | config QUEUE_RWLOCK | 241 | config QUEUED_SPINLOCKS |
242 | def_bool y if ARCH_USE_QUEUE_RWLOCK | 242 | def_bool y if ARCH_USE_QUEUED_SPINLOCKS |
243 | depends on SMP | ||
244 | |||
245 | config ARCH_USE_QUEUED_RWLOCKS | ||
246 | bool | ||
247 | |||
248 | config QUEUED_RWLOCKS | ||
249 | def_bool y if ARCH_USE_QUEUED_RWLOCKS | ||
243 | depends on SMP | 250 | depends on SMP |
diff --git a/kernel/futex.c b/kernel/futex.c index 2579e407ff67..55ca63ad9622 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -2055,7 +2055,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | |||
2055 | { | 2055 | { |
2056 | /* | 2056 | /* |
2057 | * The task state is guaranteed to be set before another task can | 2057 | * The task state is guaranteed to be set before another task can |
2058 | * wake it. set_current_state() is implemented using set_mb() and | 2058 | * wake it. set_current_state() is implemented using smp_store_mb() and |
2059 | * queue_me() calls spin_unlock() upon completion, both serializing | 2059 | * queue_me() calls spin_unlock() upon completion, both serializing |
2060 | * access to the hash list and forcing another memory barrier. | 2060 | * access to the hash list and forcing another memory barrier. |
2061 | */ | 2061 | */ |
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index de7a416cca2a..7dd5c9918e4c 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_SMP) += spinlock.o | |||
17 | obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o | 17 | obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o |
18 | obj-$(CONFIG_SMP) += lglock.o | 18 | obj-$(CONFIG_SMP) += lglock.o |
19 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 19 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
20 | obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o | ||
20 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | 21 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o |
21 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 22 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
22 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 23 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
@@ -25,5 +26,5 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | |||
25 | obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 26 | obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
26 | obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o | 27 | obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o |
27 | obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o | 28 | obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o |
28 | obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o | 29 | obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o |
29 | obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o | 30 | obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index aaeae885d9af..456614136f1a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -4067,8 +4067,7 @@ void __init lockdep_info(void) | |||
4067 | 4067 | ||
4068 | #ifdef CONFIG_DEBUG_LOCKDEP | 4068 | #ifdef CONFIG_DEBUG_LOCKDEP |
4069 | if (lockdep_init_error) { | 4069 | if (lockdep_init_error) { |
4070 | printk("WARNING: lockdep init error! lock-%s was acquired" | 4070 | printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error); |
4071 | "before lockdep_init\n", lock_init_error); | ||
4072 | printk("Call stack leading to lockdep invocation was:\n"); | 4071 | printk("Call stack leading to lockdep invocation was:\n"); |
4073 | print_stack_trace(&lockdep_init_trace, 0); | 4072 | print_stack_trace(&lockdep_init_trace, 0); |
4074 | } | 4073 | } |
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 75e114bdf3f2..fd91aaa4554c 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h | |||
@@ -17,6 +17,7 @@ | |||
17 | struct mcs_spinlock { | 17 | struct mcs_spinlock { |
18 | struct mcs_spinlock *next; | 18 | struct mcs_spinlock *next; |
19 | int locked; /* 1 if lock acquired */ | 19 | int locked; /* 1 if lock acquired */ |
20 | int count; /* nesting count, see qspinlock.c */ | ||
20 | }; | 21 | }; |
21 | 22 | ||
22 | #ifndef arch_mcs_spin_lock_contended | 23 | #ifndef arch_mcs_spin_lock_contended |
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index f956ede7f90d..6c5da483966b 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Queue read/write lock | 2 | * Queued read/write locks |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
@@ -22,6 +22,26 @@ | |||
22 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
23 | #include <asm/qrwlock.h> | 23 | #include <asm/qrwlock.h> |
24 | 24 | ||
25 | /* | ||
26 | * This internal data structure is used for optimizing access to some of | ||
27 | * the subfields within the atomic_t cnts. | ||
28 | */ | ||
29 | struct __qrwlock { | ||
30 | union { | ||
31 | atomic_t cnts; | ||
32 | struct { | ||
33 | #ifdef __LITTLE_ENDIAN | ||
34 | u8 wmode; /* Writer mode */ | ||
35 | u8 rcnts[3]; /* Reader counts */ | ||
36 | #else | ||
37 | u8 rcnts[3]; /* Reader counts */ | ||
38 | u8 wmode; /* Writer mode */ | ||
39 | #endif | ||
40 | }; | ||
41 | }; | ||
42 | arch_spinlock_t lock; | ||
43 | }; | ||
44 | |||
25 | /** | 45 | /** |
26 | * rspin_until_writer_unlock - inc reader count & spin until writer is gone | 46 | * rspin_until_writer_unlock - inc reader count & spin until writer is gone |
27 | * @lock : Pointer to queue rwlock structure | 47 | * @lock : Pointer to queue rwlock structure |
@@ -107,10 +127,10 @@ void queue_write_lock_slowpath(struct qrwlock *lock) | |||
107 | * or wait for a previous writer to go away. | 127 | * or wait for a previous writer to go away. |
108 | */ | 128 | */ |
109 | for (;;) { | 129 | for (;;) { |
110 | cnts = atomic_read(&lock->cnts); | 130 | struct __qrwlock *l = (struct __qrwlock *)lock; |
111 | if (!(cnts & _QW_WMASK) && | 131 | |
112 | (atomic_cmpxchg(&lock->cnts, cnts, | 132 | if (!READ_ONCE(l->wmode) && |
113 | cnts | _QW_WAITING) == cnts)) | 133 | (cmpxchg(&l->wmode, 0, _QW_WAITING) == 0)) |
114 | break; | 134 | break; |
115 | 135 | ||
116 | cpu_relax_lowlatency(); | 136 | cpu_relax_lowlatency(); |
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c new file mode 100644 index 000000000000..38c49202d532 --- /dev/null +++ b/kernel/locking/qspinlock.c | |||
@@ -0,0 +1,473 @@ | |||
1 | /* | ||
2 | * Queued spinlock | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. | ||
15 | * (C) Copyright 2013-2014 Red Hat, Inc. | ||
16 | * (C) Copyright 2015 Intel Corp. | ||
17 | * | ||
18 | * Authors: Waiman Long <waiman.long@hp.com> | ||
19 | * Peter Zijlstra <peterz@infradead.org> | ||
20 | */ | ||
21 | |||
22 | #ifndef _GEN_PV_LOCK_SLOWPATH | ||
23 | |||
24 | #include <linux/smp.h> | ||
25 | #include <linux/bug.h> | ||
26 | #include <linux/cpumask.h> | ||
27 | #include <linux/percpu.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | #include <linux/mutex.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/qspinlock.h> | ||
32 | |||
33 | /* | ||
34 | * The basic principle of a queue-based spinlock can best be understood | ||
35 | * by studying a classic queue-based spinlock implementation called the | ||
36 | * MCS lock. The paper below provides a good description for this kind | ||
37 | * of lock. | ||
38 | * | ||
39 | * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf | ||
40 | * | ||
41 | * This queued spinlock implementation is based on the MCS lock, however to make | ||
42 | * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing | ||
43 | * API, we must modify it somehow. | ||
44 | * | ||
45 | * In particular; where the traditional MCS lock consists of a tail pointer | ||
46 | * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to | ||
47 | * unlock the next pending (next->locked), we compress both these: {tail, | ||
48 | * next->locked} into a single u32 value. | ||
49 | * | ||
50 | * Since a spinlock disables recursion of its own context and there is a limit | ||
51 | * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there | ||
52 | * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now | ||
53 | * we can encode the tail by combining the 2-bit nesting level with the cpu | ||
54 | * number. With one byte for the lock value and 3 bytes for the tail, only a | ||
55 | * 32-bit word is now needed. Even though we only need 1 bit for the lock, | ||
56 | * we extend it to a full byte to achieve better performance for architectures | ||
57 | * that support atomic byte write. | ||
58 | * | ||
59 | * We also change the first spinner to spin on the lock bit instead of its | ||
60 | * node; whereby avoiding the need to carry a node from lock to unlock, and | ||
61 | * preserving existing lock API. This also makes the unlock code simpler and | ||
62 | * faster. | ||
63 | * | ||
64 | * N.B. The current implementation only supports architectures that allow | ||
65 | * atomic operations on smaller 8-bit and 16-bit data types. | ||
66 | * | ||
67 | */ | ||
68 | |||
69 | #include "mcs_spinlock.h" | ||
70 | |||
71 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | ||
72 | #define MAX_NODES 8 | ||
73 | #else | ||
74 | #define MAX_NODES 4 | ||
75 | #endif | ||
76 | |||
77 | /* | ||
78 | * Per-CPU queue node structures; we can never have more than 4 nested | ||
79 | * contexts: task, softirq, hardirq, nmi. | ||
80 | * | ||
81 | * Exactly fits one 64-byte cacheline on a 64-bit architecture. | ||
82 | * | ||
83 | * PV doubles the storage and uses the second cacheline for PV state. | ||
84 | */ | ||
85 | static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); | ||
86 | |||
87 | /* | ||
88 | * We must be able to distinguish between no-tail and the tail at 0:0, | ||
89 | * therefore increment the cpu number by one. | ||
90 | */ | ||
91 | |||
92 | static inline u32 encode_tail(int cpu, int idx) | ||
93 | { | ||
94 | u32 tail; | ||
95 | |||
96 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
97 | BUG_ON(idx > 3); | ||
98 | #endif | ||
99 | tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; | ||
100 | tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ | ||
101 | |||
102 | return tail; | ||
103 | } | ||
104 | |||
105 | static inline struct mcs_spinlock *decode_tail(u32 tail) | ||
106 | { | ||
107 | int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; | ||
108 | int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; | ||
109 | |||
110 | return per_cpu_ptr(&mcs_nodes[idx], cpu); | ||
111 | } | ||
112 | |||
113 | #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) | ||
114 | |||
115 | /* | ||
116 | * By using the whole 2nd least significant byte for the pending bit, we | ||
117 | * can allow better optimization of the lock acquisition for the pending | ||
118 | * bit holder. | ||
119 | * | ||
120 | * This internal structure is also used by the set_locked function which | ||
121 | * is not restricted to _Q_PENDING_BITS == 8. | ||
122 | */ | ||
123 | struct __qspinlock { | ||
124 | union { | ||
125 | atomic_t val; | ||
126 | #ifdef __LITTLE_ENDIAN | ||
127 | struct { | ||
128 | u8 locked; | ||
129 | u8 pending; | ||
130 | }; | ||
131 | struct { | ||
132 | u16 locked_pending; | ||
133 | u16 tail; | ||
134 | }; | ||
135 | #else | ||
136 | struct { | ||
137 | u16 tail; | ||
138 | u16 locked_pending; | ||
139 | }; | ||
140 | struct { | ||
141 | u8 reserved[2]; | ||
142 | u8 pending; | ||
143 | u8 locked; | ||
144 | }; | ||
145 | #endif | ||
146 | }; | ||
147 | }; | ||
148 | |||
149 | #if _Q_PENDING_BITS == 8 | ||
150 | /** | ||
151 | * clear_pending_set_locked - take ownership and clear the pending bit. | ||
152 | * @lock: Pointer to queued spinlock structure | ||
153 | * | ||
154 | * *,1,0 -> *,0,1 | ||
155 | * | ||
156 | * Lock stealing is not allowed if this function is used. | ||
157 | */ | ||
158 | static __always_inline void clear_pending_set_locked(struct qspinlock *lock) | ||
159 | { | ||
160 | struct __qspinlock *l = (void *)lock; | ||
161 | |||
162 | WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * xchg_tail - Put in the new queue tail code word & retrieve previous one | ||
167 | * @lock : Pointer to queued spinlock structure | ||
168 | * @tail : The new queue tail code word | ||
169 | * Return: The previous queue tail code word | ||
170 | * | ||
171 | * xchg(lock, tail) | ||
172 | * | ||
173 | * p,*,* -> n,*,* ; prev = xchg(lock, node) | ||
174 | */ | ||
175 | static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) | ||
176 | { | ||
177 | struct __qspinlock *l = (void *)lock; | ||
178 | |||
179 | return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; | ||
180 | } | ||
181 | |||
182 | #else /* _Q_PENDING_BITS == 8 */ | ||
183 | |||
184 | /** | ||
185 | * clear_pending_set_locked - take ownership and clear the pending bit. | ||
186 | * @lock: Pointer to queued spinlock structure | ||
187 | * | ||
188 | * *,1,0 -> *,0,1 | ||
189 | */ | ||
190 | static __always_inline void clear_pending_set_locked(struct qspinlock *lock) | ||
191 | { | ||
192 | atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * xchg_tail - Put in the new queue tail code word & retrieve previous one | ||
197 | * @lock : Pointer to queued spinlock structure | ||
198 | * @tail : The new queue tail code word | ||
199 | * Return: The previous queue tail code word | ||
200 | * | ||
201 | * xchg(lock, tail) | ||
202 | * | ||
203 | * p,*,* -> n,*,* ; prev = xchg(lock, node) | ||
204 | */ | ||
205 | static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) | ||
206 | { | ||
207 | u32 old, new, val = atomic_read(&lock->val); | ||
208 | |||
209 | for (;;) { | ||
210 | new = (val & _Q_LOCKED_PENDING_MASK) | tail; | ||
211 | old = atomic_cmpxchg(&lock->val, val, new); | ||
212 | if (old == val) | ||
213 | break; | ||
214 | |||
215 | val = old; | ||
216 | } | ||
217 | return old; | ||
218 | } | ||
219 | #endif /* _Q_PENDING_BITS == 8 */ | ||
220 | |||
221 | /** | ||
222 | * set_locked - Set the lock bit and own the lock | ||
223 | * @lock: Pointer to queued spinlock structure | ||
224 | * | ||
225 | * *,*,0 -> *,0,1 | ||
226 | */ | ||
227 | static __always_inline void set_locked(struct qspinlock *lock) | ||
228 | { | ||
229 | struct __qspinlock *l = (void *)lock; | ||
230 | |||
231 | WRITE_ONCE(l->locked, _Q_LOCKED_VAL); | ||
232 | } | ||
233 | |||
234 | |||
235 | /* | ||
236 | * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for | ||
237 | * all the PV callbacks. | ||
238 | */ | ||
239 | |||
240 | static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } | ||
241 | static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { } | ||
242 | static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { } | ||
243 | |||
244 | static __always_inline void __pv_wait_head(struct qspinlock *lock, | ||
245 | struct mcs_spinlock *node) { } | ||
246 | |||
247 | #define pv_enabled() false | ||
248 | |||
249 | #define pv_init_node __pv_init_node | ||
250 | #define pv_wait_node __pv_wait_node | ||
251 | #define pv_kick_node __pv_kick_node | ||
252 | #define pv_wait_head __pv_wait_head | ||
253 | |||
254 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | ||
255 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath | ||
256 | #endif | ||
257 | |||
258 | #endif /* _GEN_PV_LOCK_SLOWPATH */ | ||
259 | |||
260 | /** | ||
261 | * queued_spin_lock_slowpath - acquire the queued spinlock | ||
262 | * @lock: Pointer to queued spinlock structure | ||
263 | * @val: Current value of the queued spinlock 32-bit word | ||
264 | * | ||
265 | * (queue tail, pending bit, lock value) | ||
266 | * | ||
267 | * fast : slow : unlock | ||
268 | * : : | ||
269 | * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) | ||
270 | * : | ^--------.------. / : | ||
271 | * : v \ \ | : | ||
272 | * pending : (0,1,1) +--> (0,1,0) \ | : | ||
273 | * : | ^--' | | : | ||
274 | * : v | | : | ||
275 | * uncontended : (n,x,y) +--> (n,0,0) --' | : | ||
276 | * queue : | ^--' | : | ||
277 | * : v | : | ||
278 | * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : | ||
279 | * queue : ^--' : | ||
280 | */ | ||
281 | void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | ||
282 | { | ||
283 | struct mcs_spinlock *prev, *next, *node; | ||
284 | u32 new, old, tail; | ||
285 | int idx; | ||
286 | |||
287 | BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); | ||
288 | |||
289 | if (pv_enabled()) | ||
290 | goto queue; | ||
291 | |||
292 | if (virt_queued_spin_lock(lock)) | ||
293 | return; | ||
294 | |||
295 | /* | ||
296 | * wait for in-progress pending->locked hand-overs | ||
297 | * | ||
298 | * 0,1,0 -> 0,0,1 | ||
299 | */ | ||
300 | if (val == _Q_PENDING_VAL) { | ||
301 | while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL) | ||
302 | cpu_relax(); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * trylock || pending | ||
307 | * | ||
308 | * 0,0,0 -> 0,0,1 ; trylock | ||
309 | * 0,0,1 -> 0,1,1 ; pending | ||
310 | */ | ||
311 | for (;;) { | ||
312 | /* | ||
313 | * If we observe any contention; queue. | ||
314 | */ | ||
315 | if (val & ~_Q_LOCKED_MASK) | ||
316 | goto queue; | ||
317 | |||
318 | new = _Q_LOCKED_VAL; | ||
319 | if (val == new) | ||
320 | new |= _Q_PENDING_VAL; | ||
321 | |||
322 | old = atomic_cmpxchg(&lock->val, val, new); | ||
323 | if (old == val) | ||
324 | break; | ||
325 | |||
326 | val = old; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * we won the trylock | ||
331 | */ | ||
332 | if (new == _Q_LOCKED_VAL) | ||
333 | return; | ||
334 | |||
335 | /* | ||
336 | * we're pending, wait for the owner to go away. | ||
337 | * | ||
338 | * *,1,1 -> *,1,0 | ||
339 | * | ||
340 | * this wait loop must be a load-acquire such that we match the | ||
341 | * store-release that clears the locked bit and create lock | ||
342 | * sequentiality; this is because not all clear_pending_set_locked() | ||
343 | * implementations imply full barriers. | ||
344 | */ | ||
345 | while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK) | ||
346 | cpu_relax(); | ||
347 | |||
348 | /* | ||
349 | * take ownership and clear the pending bit. | ||
350 | * | ||
351 | * *,1,0 -> *,0,1 | ||
352 | */ | ||
353 | clear_pending_set_locked(lock); | ||
354 | return; | ||
355 | |||
356 | /* | ||
357 | * End of pending bit optimistic spinning and beginning of MCS | ||
358 | * queuing. | ||
359 | */ | ||
360 | queue: | ||
361 | node = this_cpu_ptr(&mcs_nodes[0]); | ||
362 | idx = node->count++; | ||
363 | tail = encode_tail(smp_processor_id(), idx); | ||
364 | |||
365 | node += idx; | ||
366 | node->locked = 0; | ||
367 | node->next = NULL; | ||
368 | pv_init_node(node); | ||
369 | |||
370 | /* | ||
371 | * We touched a (possibly) cold cacheline in the per-cpu queue node; | ||
372 | * attempt the trylock once more in the hope someone let go while we | ||
373 | * weren't watching. | ||
374 | */ | ||
375 | if (queued_spin_trylock(lock)) | ||
376 | goto release; | ||
377 | |||
378 | /* | ||
379 | * We have already touched the queueing cacheline; don't bother with | ||
380 | * pending stuff. | ||
381 | * | ||
382 | * p,*,* -> n,*,* | ||
383 | */ | ||
384 | old = xchg_tail(lock, tail); | ||
385 | |||
386 | /* | ||
387 | * if there was a previous node; link it and wait until reaching the | ||
388 | * head of the waitqueue. | ||
389 | */ | ||
390 | if (old & _Q_TAIL_MASK) { | ||
391 | prev = decode_tail(old); | ||
392 | WRITE_ONCE(prev->next, node); | ||
393 | |||
394 | pv_wait_node(node); | ||
395 | arch_mcs_spin_lock_contended(&node->locked); | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * we're at the head of the waitqueue, wait for the owner & pending to | ||
400 | * go away. | ||
401 | * | ||
402 | * *,x,y -> *,0,0 | ||
403 | * | ||
404 | * this wait loop must use a load-acquire such that we match the | ||
405 | * store-release that clears the locked bit and create lock | ||
406 | * sequentiality; this is because the set_locked() function below | ||
407 | * does not imply a full barrier. | ||
408 | * | ||
409 | */ | ||
410 | pv_wait_head(lock, node); | ||
411 | while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK) | ||
412 | cpu_relax(); | ||
413 | |||
414 | /* | ||
415 | * claim the lock: | ||
416 | * | ||
417 | * n,0,0 -> 0,0,1 : lock, uncontended | ||
418 | * *,0,0 -> *,0,1 : lock, contended | ||
419 | * | ||
420 | * If the queue head is the only one in the queue (lock value == tail), | ||
421 | * clear the tail code and grab the lock. Otherwise, we only need | ||
422 | * to grab the lock. | ||
423 | */ | ||
424 | for (;;) { | ||
425 | if (val != tail) { | ||
426 | set_locked(lock); | ||
427 | break; | ||
428 | } | ||
429 | old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL); | ||
430 | if (old == val) | ||
431 | goto release; /* No contention */ | ||
432 | |||
433 | val = old; | ||
434 | } | ||
435 | |||
436 | /* | ||
437 | * contended path; wait for next, release. | ||
438 | */ | ||
439 | while (!(next = READ_ONCE(node->next))) | ||
440 | cpu_relax(); | ||
441 | |||
442 | arch_mcs_spin_unlock_contended(&next->locked); | ||
443 | pv_kick_node(next); | ||
444 | |||
445 | release: | ||
446 | /* | ||
447 | * release the node | ||
448 | */ | ||
449 | this_cpu_dec(mcs_nodes[0].count); | ||
450 | } | ||
451 | EXPORT_SYMBOL(queued_spin_lock_slowpath); | ||
452 | |||
453 | /* | ||
454 | * Generate the paravirt code for queued_spin_unlock_slowpath(). | ||
455 | */ | ||
456 | #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) | ||
457 | #define _GEN_PV_LOCK_SLOWPATH | ||
458 | |||
459 | #undef pv_enabled | ||
460 | #define pv_enabled() true | ||
461 | |||
462 | #undef pv_init_node | ||
463 | #undef pv_wait_node | ||
464 | #undef pv_kick_node | ||
465 | #undef pv_wait_head | ||
466 | |||
467 | #undef queued_spin_lock_slowpath | ||
468 | #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath | ||
469 | |||
470 | #include "qspinlock_paravirt.h" | ||
471 | #include "qspinlock.c" | ||
472 | |||
473 | #endif | ||
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h new file mode 100644 index 000000000000..04ab18151cc8 --- /dev/null +++ b/kernel/locking/qspinlock_paravirt.h | |||
@@ -0,0 +1,325 @@ | |||
1 | #ifndef _GEN_PV_LOCK_SLOWPATH | ||
2 | #error "do not include this file" | ||
3 | #endif | ||
4 | |||
5 | #include <linux/hash.h> | ||
6 | #include <linux/bootmem.h> | ||
7 | |||
8 | /* | ||
9 | * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead | ||
10 | * of spinning them. | ||
11 | * | ||
12 | * This relies on the architecture to provide two paravirt hypercalls: | ||
13 | * | ||
14 | * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val | ||
15 | * pv_kick(cpu) -- wakes a suspended vcpu | ||
16 | * | ||
17 | * Using these we implement __pv_queued_spin_lock_slowpath() and | ||
18 | * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and | ||
19 | * native_queued_spin_unlock(). | ||
20 | */ | ||
21 | |||
22 | #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) | ||
23 | |||
24 | enum vcpu_state { | ||
25 | vcpu_running = 0, | ||
26 | vcpu_halted, | ||
27 | }; | ||
28 | |||
29 | struct pv_node { | ||
30 | struct mcs_spinlock mcs; | ||
31 | struct mcs_spinlock __res[3]; | ||
32 | |||
33 | int cpu; | ||
34 | u8 state; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * Lock and MCS node addresses hash table for fast lookup | ||
39 | * | ||
40 | * Hashing is done on a per-cacheline basis to minimize the need to access | ||
41 | * more than one cacheline. | ||
42 | * | ||
43 | * Dynamically allocate a hash table big enough to hold at least 4X the | ||
44 | * number of possible cpus in the system. Allocation is done on page | ||
45 | * granularity. So the minimum number of hash buckets should be at least | ||
46 | * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page. | ||
47 | * | ||
48 | * Since we should not be holding locks from NMI context (very rare indeed) the | ||
49 | * max load factor is 0.75, which is around the point where open addressing | ||
50 | * breaks down. | ||
51 | * | ||
52 | */ | ||
53 | struct pv_hash_entry { | ||
54 | struct qspinlock *lock; | ||
55 | struct pv_node *node; | ||
56 | }; | ||
57 | |||
58 | #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry)) | ||
59 | #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry)) | ||
60 | |||
61 | static struct pv_hash_entry *pv_lock_hash; | ||
62 | static unsigned int pv_lock_hash_bits __read_mostly; | ||
63 | |||
64 | /* | ||
65 | * Allocate memory for the PV qspinlock hash buckets | ||
66 | * | ||
67 | * This function should be called from the paravirt spinlock initialization | ||
68 | * routine. | ||
69 | */ | ||
70 | void __init __pv_init_lock_hash(void) | ||
71 | { | ||
72 | int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); | ||
73 | |||
74 | if (pv_hash_size < PV_HE_MIN) | ||
75 | pv_hash_size = PV_HE_MIN; | ||
76 | |||
77 | /* | ||
78 | * Allocate space from bootmem which should be page-size aligned | ||
79 | * and hence cacheline aligned. | ||
80 | */ | ||
81 | pv_lock_hash = alloc_large_system_hash("PV qspinlock", | ||
82 | sizeof(struct pv_hash_entry), | ||
83 | pv_hash_size, 0, HASH_EARLY, | ||
84 | &pv_lock_hash_bits, NULL, | ||
85 | pv_hash_size, pv_hash_size); | ||
86 | } | ||
87 | |||
88 | #define for_each_hash_entry(he, offset, hash) \ | ||
89 | for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \ | ||
90 | offset < (1 << pv_lock_hash_bits); \ | ||
91 | offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)]) | ||
92 | |||
93 | static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) | ||
94 | { | ||
95 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); | ||
96 | struct pv_hash_entry *he; | ||
97 | |||
98 | for_each_hash_entry(he, offset, hash) { | ||
99 | if (!cmpxchg(&he->lock, NULL, lock)) { | ||
100 | WRITE_ONCE(he->node, node); | ||
101 | return &he->lock; | ||
102 | } | ||
103 | } | ||
104 | /* | ||
105 | * Hard assume there is a free entry for us. | ||
106 | * | ||
107 | * This is guaranteed by ensuring every blocked lock only ever consumes | ||
108 | * a single entry, and since we only have 4 nesting levels per CPU | ||
109 | * and allocated 4*nr_possible_cpus(), this must be so. | ||
110 | * | ||
111 | * The single entry is guaranteed by having the lock owner unhash | ||
112 | * before it releases. | ||
113 | */ | ||
114 | BUG(); | ||
115 | } | ||
116 | |||
117 | static struct pv_node *pv_unhash(struct qspinlock *lock) | ||
118 | { | ||
119 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); | ||
120 | struct pv_hash_entry *he; | ||
121 | struct pv_node *node; | ||
122 | |||
123 | for_each_hash_entry(he, offset, hash) { | ||
124 | if (READ_ONCE(he->lock) == lock) { | ||
125 | node = READ_ONCE(he->node); | ||
126 | WRITE_ONCE(he->lock, NULL); | ||
127 | return node; | ||
128 | } | ||
129 | } | ||
130 | /* | ||
131 | * Hard assume we'll find an entry. | ||
132 | * | ||
133 | * This guarantees a limited lookup time and is itself guaranteed by | ||
134 | * having the lock owner do the unhash -- IFF the unlock sees the | ||
135 | * SLOW flag, there MUST be a hash entry. | ||
136 | */ | ||
137 | BUG(); | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Initialize the PV part of the mcs_spinlock node. | ||
142 | */ | ||
143 | static void pv_init_node(struct mcs_spinlock *node) | ||
144 | { | ||
145 | struct pv_node *pn = (struct pv_node *)node; | ||
146 | |||
147 | BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock)); | ||
148 | |||
149 | pn->cpu = smp_processor_id(); | ||
150 | pn->state = vcpu_running; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Wait for node->locked to become true, halt the vcpu after a short spin. | ||
155 | * pv_kick_node() is used to wake the vcpu again. | ||
156 | */ | ||
157 | static void pv_wait_node(struct mcs_spinlock *node) | ||
158 | { | ||
159 | struct pv_node *pn = (struct pv_node *)node; | ||
160 | int loop; | ||
161 | |||
162 | for (;;) { | ||
163 | for (loop = SPIN_THRESHOLD; loop; loop--) { | ||
164 | if (READ_ONCE(node->locked)) | ||
165 | return; | ||
166 | cpu_relax(); | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Order pn->state vs pn->locked thusly: | ||
171 | * | ||
172 | * [S] pn->state = vcpu_halted [S] next->locked = 1 | ||
173 | * MB MB | ||
174 | * [L] pn->locked [RmW] pn->state = vcpu_running | ||
175 | * | ||
176 | * Matches the xchg() from pv_kick_node(). | ||
177 | */ | ||
178 | smp_store_mb(pn->state, vcpu_halted); | ||
179 | |||
180 | if (!READ_ONCE(node->locked)) | ||
181 | pv_wait(&pn->state, vcpu_halted); | ||
182 | |||
183 | /* | ||
184 | * Reset the vCPU state to avoid unncessary CPU kicking | ||
185 | */ | ||
186 | WRITE_ONCE(pn->state, vcpu_running); | ||
187 | |||
188 | /* | ||
189 | * If the locked flag is still not set after wakeup, it is a | ||
190 | * spurious wakeup and the vCPU should wait again. However, | ||
191 | * there is a pretty high overhead for CPU halting and kicking. | ||
192 | * So it is better to spin for a while in the hope that the | ||
193 | * MCS lock will be released soon. | ||
194 | */ | ||
195 | } | ||
196 | /* | ||
197 | * By now our node->locked should be 1 and our caller will not actually | ||
198 | * spin-wait for it. We do however rely on our caller to do a | ||
199 | * load-acquire for us. | ||
200 | */ | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Called after setting next->locked = 1, used to wake those stuck in | ||
205 | * pv_wait_node(). | ||
206 | */ | ||
207 | static void pv_kick_node(struct mcs_spinlock *node) | ||
208 | { | ||
209 | struct pv_node *pn = (struct pv_node *)node; | ||
210 | |||
211 | /* | ||
212 | * Note that because node->locked is already set, this actual | ||
213 | * mcs_spinlock entry could be re-used already. | ||
214 | * | ||
215 | * This should be fine however, kicking people for no reason is | ||
216 | * harmless. | ||
217 | * | ||
218 | * See the comment in pv_wait_node(). | ||
219 | */ | ||
220 | if (xchg(&pn->state, vcpu_running) == vcpu_halted) | ||
221 | pv_kick(pn->cpu); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Wait for l->locked to become clear; halt the vcpu after a short spin. | ||
226 | * __pv_queued_spin_unlock() will wake us. | ||
227 | */ | ||
228 | static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node) | ||
229 | { | ||
230 | struct pv_node *pn = (struct pv_node *)node; | ||
231 | struct __qspinlock *l = (void *)lock; | ||
232 | struct qspinlock **lp = NULL; | ||
233 | int loop; | ||
234 | |||
235 | for (;;) { | ||
236 | for (loop = SPIN_THRESHOLD; loop; loop--) { | ||
237 | if (!READ_ONCE(l->locked)) | ||
238 | return; | ||
239 | cpu_relax(); | ||
240 | } | ||
241 | |||
242 | WRITE_ONCE(pn->state, vcpu_halted); | ||
243 | if (!lp) { /* ONCE */ | ||
244 | lp = pv_hash(lock, pn); | ||
245 | /* | ||
246 | * lp must be set before setting _Q_SLOW_VAL | ||
247 | * | ||
248 | * [S] lp = lock [RmW] l = l->locked = 0 | ||
249 | * MB MB | ||
250 | * [S] l->locked = _Q_SLOW_VAL [L] lp | ||
251 | * | ||
252 | * Matches the cmpxchg() in __pv_queued_spin_unlock(). | ||
253 | */ | ||
254 | if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) { | ||
255 | /* | ||
256 | * The lock is free and _Q_SLOW_VAL has never | ||
257 | * been set. Therefore we need to unhash before | ||
258 | * getting the lock. | ||
259 | */ | ||
260 | WRITE_ONCE(*lp, NULL); | ||
261 | return; | ||
262 | } | ||
263 | } | ||
264 | pv_wait(&l->locked, _Q_SLOW_VAL); | ||
265 | |||
266 | /* | ||
267 | * The unlocker should have freed the lock before kicking the | ||
268 | * CPU. So if the lock is still not free, it is a spurious | ||
269 | * wakeup and so the vCPU should wait again after spinning for | ||
270 | * a while. | ||
271 | */ | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Lock is unlocked now; the caller will acquire it without waiting. | ||
276 | * As with pv_wait_node() we rely on the caller to do a load-acquire | ||
277 | * for us. | ||
278 | */ | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * PV version of the unlock function to be used in stead of | ||
283 | * queued_spin_unlock(). | ||
284 | */ | ||
285 | __visible void __pv_queued_spin_unlock(struct qspinlock *lock) | ||
286 | { | ||
287 | struct __qspinlock *l = (void *)lock; | ||
288 | struct pv_node *node; | ||
289 | |||
290 | /* | ||
291 | * We must not unlock if SLOW, because in that case we must first | ||
292 | * unhash. Otherwise it would be possible to have multiple @lock | ||
293 | * entries, which would be BAD. | ||
294 | */ | ||
295 | if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL)) | ||
296 | return; | ||
297 | |||
298 | /* | ||
299 | * Since the above failed to release, this must be the SLOW path. | ||
300 | * Therefore start by looking up the blocked node and unhashing it. | ||
301 | */ | ||
302 | node = pv_unhash(lock); | ||
303 | |||
304 | /* | ||
305 | * Now that we have a reference to the (likely) blocked pv_node, | ||
306 | * release the lock. | ||
307 | */ | ||
308 | smp_store_release(&l->locked, 0); | ||
309 | |||
310 | /* | ||
311 | * At this point the memory pointed at by lock can be freed/reused, | ||
312 | * however we can still use the pv_node to kick the CPU. | ||
313 | */ | ||
314 | if (READ_ONCE(node->state) == vcpu_halted) | ||
315 | pv_kick(node->cpu); | ||
316 | } | ||
317 | /* | ||
318 | * Include the architecture specific callee-save thunk of the | ||
319 | * __pv_queued_spin_unlock(). This thunk is put together with | ||
320 | * __pv_queued_spin_unlock() near the top of the file to make sure | ||
321 | * that the callee-save thunk and the real unlock function are close | ||
322 | * to each other sharing consecutive instruction cachelines. | ||
323 | */ | ||
324 | #include <asm/qspinlock_paravirt.h> | ||
325 | |||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b025295f4966..30ec5b46cd8c 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -70,10 +70,10 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * We can speed up the acquire/release, if the architecture | 73 | * We can speed up the acquire/release, if there's no debugging state to be |
74 | * supports cmpxchg and if there's no debugging state to be set up | 74 | * set up. |
75 | */ | 75 | */ |
76 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | 76 | #ifndef CONFIG_DEBUG_RT_MUTEXES |
77 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | 77 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) |
78 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | 78 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) |
79 | { | 79 | { |
@@ -1443,10 +1443,17 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |||
1443 | * | 1443 | * |
1444 | * @lock: the rt_mutex to be locked | 1444 | * @lock: the rt_mutex to be locked |
1445 | * | 1445 | * |
1446 | * This function can only be called in thread context. It's safe to | ||
1447 | * call it from atomic regions, but not from hard interrupt or soft | ||
1448 | * interrupt context. | ||
1449 | * | ||
1446 | * Returns 1 on success and 0 on contention | 1450 | * Returns 1 on success and 0 on contention |
1447 | */ | 1451 | */ |
1448 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | 1452 | int __sched rt_mutex_trylock(struct rt_mutex *lock) |
1449 | { | 1453 | { |
1454 | if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq())) | ||
1455 | return 0; | ||
1456 | |||
1450 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | 1457 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); |
1451 | } | 1458 | } |
1452 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | 1459 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); |
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 3417d0172a5d..0f189714e457 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -409,11 +409,24 @@ done: | |||
409 | return taken; | 409 | return taken; |
410 | } | 410 | } |
411 | 411 | ||
412 | /* | ||
413 | * Return true if the rwsem has active spinner | ||
414 | */ | ||
415 | static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | ||
416 | { | ||
417 | return osq_is_locked(&sem->osq); | ||
418 | } | ||
419 | |||
412 | #else | 420 | #else |
413 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | 421 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) |
414 | { | 422 | { |
415 | return false; | 423 | return false; |
416 | } | 424 | } |
425 | |||
426 | static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | ||
427 | { | ||
428 | return false; | ||
429 | } | ||
417 | #endif | 430 | #endif |
418 | 431 | ||
419 | /* | 432 | /* |
@@ -496,7 +509,38 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
496 | { | 509 | { |
497 | unsigned long flags; | 510 | unsigned long flags; |
498 | 511 | ||
512 | /* | ||
513 | * If a spinner is present, it is not necessary to do the wakeup. | ||
514 | * Try to do wakeup only if the trylock succeeds to minimize | ||
515 | * spinlock contention which may introduce too much delay in the | ||
516 | * unlock operation. | ||
517 | * | ||
518 | * spinning writer up_write/up_read caller | ||
519 | * --------------- ----------------------- | ||
520 | * [S] osq_unlock() [L] osq | ||
521 | * MB RMB | ||
522 | * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock) | ||
523 | * | ||
524 | * Here, it is important to make sure that there won't be a missed | ||
525 | * wakeup while the rwsem is free and the only spinning writer goes | ||
526 | * to sleep without taking the rwsem. Even when the spinning writer | ||
527 | * is just going to break out of the waiting loop, it will still do | ||
528 | * a trylock in rwsem_down_write_failed() before sleeping. IOW, if | ||
529 | * rwsem_has_spinner() is true, it will guarantee at least one | ||
530 | * trylock attempt on the rwsem later on. | ||
531 | */ | ||
532 | if (rwsem_has_spinner(sem)) { | ||
533 | /* | ||
534 | * The smp_rmb() here is to make sure that the spinner | ||
535 | * state is consulted before reading the wait_lock. | ||
536 | */ | ||
537 | smp_rmb(); | ||
538 | if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags)) | ||
539 | return sem; | ||
540 | goto locked; | ||
541 | } | ||
499 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 542 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
543 | locked: | ||
500 | 544 | ||
501 | /* do nothing if list empty */ | 545 | /* do nothing if list empty */ |
502 | if (!list_empty(&sem->wait_list)) | 546 | if (!list_empty(&sem->wait_list)) |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 852143a79f36..9bc82329eaad 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -341,7 +341,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) | |||
341 | * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss | 341 | * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss |
342 | * an event. | 342 | * an event. |
343 | */ | 343 | */ |
344 | set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ | 344 | smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */ |
345 | 345 | ||
346 | return timeout; | 346 | return timeout; |
347 | } | 347 | } |
@@ -354,7 +354,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
354 | * doesn't imply write barrier and the users expects write | 354 | * doesn't imply write barrier and the users expects write |
355 | * barrier semantics on wakeup functions. The following | 355 | * barrier semantics on wakeup functions. The following |
356 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() | 356 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() |
357 | * and is paired with set_mb() in wait_woken(). | 357 | * and is paired with smp_store_mb() in wait_woken(). |
358 | */ | 358 | */ |
359 | smp_wmb(); /* C */ | 359 | smp_wmb(); /* C */ |
360 | wait->flags |= WQ_FLAG_WOKEN; | 360 | wait->flags |= WQ_FLAG_WOKEN; |