diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 08:08:53 -0400 |
commit | 0200fbdd431519d730b5d399a12840ec832b27cc (patch) | |
tree | 2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /arch/x86/include/asm/atomic.h | |
parent | de3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff) | |
parent | 01a14bda11add9dcd4a59200f13834d634559935 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar:
"Lots of changes in this cycle - in part because locking/core attracted
a number of related x86 low level work which was easier to handle in a
single tree:
- Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E.
McKenney, Andrea Parri)
- lockdep scalability improvements and micro-optimizations (Waiman
Long)
- rwsem improvements (Waiman Long)
- spinlock micro-optimization (Matthew Wilcox)
- qspinlocks: Provide a liveness guarantee (more fairness) on x86.
(Peter Zijlstra)
- Add support for relative references in jump tables on arm64, x86
and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens)
- Be a lot less permissive on weird (kernel address) uaccess faults
on x86: BUG() when uaccess helpers fault on kernel addresses (Jann
Horn)
- macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav
Amit)
- ... and a handful of other smaller changes as well"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
locking/lockdep: Make global debug_locks* variables read-mostly
locking/lockdep: Fix debug_locks off performance problem
locking/pvqspinlock: Extend node size when pvqspinlock is configured
locking/qspinlock_stat: Count instances of nested lock slowpaths
locking/qspinlock, x86: Provide liveness guarantee
x86/asm: 'Simplify' GEN_*_RMWcc() macros
locking/qspinlock: Rework some comments
locking/qspinlock: Re-order code
locking/lockdep: Remove duplicated 'lock_class_ops' percpu array
x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y
futex: Replace spin_is_locked() with lockdep
locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y
x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs
x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs
x86/extable: Macrofy inline assembly code to work around GCC inlining bugs
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs
x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs
x86/refcount: Work around GCC inlining bug
x86/objtool: Use asm macros to work around GCC inlining bugs
...
Diffstat (limited to 'arch/x86/include/asm/atomic.h')
-rw-r--r-- | arch/x86/include/asm/atomic.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index ce84388e540c..ea3d95275b43 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -82,7 +82,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) | |||
82 | */ | 82 | */ |
83 | static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) | 83 | static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) |
84 | { | 84 | { |
85 | GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); | 85 | return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); |
86 | } | 86 | } |
87 | #define arch_atomic_sub_and_test arch_atomic_sub_and_test | 87 | #define arch_atomic_sub_and_test arch_atomic_sub_and_test |
88 | 88 | ||
@@ -122,7 +122,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v) | |||
122 | */ | 122 | */ |
123 | static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) | 123 | static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) |
124 | { | 124 | { |
125 | GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); | 125 | return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); |
126 | } | 126 | } |
127 | #define arch_atomic_dec_and_test arch_atomic_dec_and_test | 127 | #define arch_atomic_dec_and_test arch_atomic_dec_and_test |
128 | 128 | ||
@@ -136,7 +136,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) | |||
136 | */ | 136 | */ |
137 | static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) | 137 | static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) |
138 | { | 138 | { |
139 | GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); | 139 | return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); |
140 | } | 140 | } |
141 | #define arch_atomic_inc_and_test arch_atomic_inc_and_test | 141 | #define arch_atomic_inc_and_test arch_atomic_inc_and_test |
142 | 142 | ||
@@ -151,7 +151,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) | |||
151 | */ | 151 | */ |
152 | static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) | 152 | static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) |
153 | { | 153 | { |
154 | GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); | 154 | return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); |
155 | } | 155 | } |
156 | #define arch_atomic_add_negative arch_atomic_add_negative | 156 | #define arch_atomic_add_negative arch_atomic_add_negative |
157 | 157 | ||