aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/compiler.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-23 08:08:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-23 08:08:53 -0400
commit0200fbdd431519d730b5d399a12840ec832b27cc (patch)
tree2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /include/linux/compiler.h
parentde3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff)
parent01a14bda11add9dcd4a59200f13834d634559935 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar: "Lots of changes in this cycle - in part because locking/core attracted a number of related x86 low level work which was easier to handle in a single tree: - Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E. McKenney, Andrea Parri) - lockdep scalability improvements and micro-optimizations (Waiman Long) - rwsem improvements (Waiman Long) - spinlock micro-optimization (Matthew Wilcox) - qspinlocks: Provide a liveness guarantee (more fairness) on x86. (Peter Zijlstra) - Add support for relative references in jump tables on arm64, x86 and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens) - Be a lot less permissive on weird (kernel address) uaccess faults on x86: BUG() when uaccess helpers fault on kernel addresses (Jann Horn) - macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav Amit) - ... and a handful of other smaller changes as well" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits) locking/lockdep: Make global debug_locks* variables read-mostly locking/lockdep: Fix debug_locks off performance problem locking/pvqspinlock: Extend node size when pvqspinlock is configured locking/qspinlock_stat: Count instances of nested lock slowpaths locking/qspinlock, x86: Provide liveness guarantee x86/asm: 'Simplify' GEN_*_RMWcc() macros locking/qspinlock: Rework some comments locking/qspinlock: Re-order code locking/lockdep: Remove duplicated 'lock_class_ops' percpu array x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y futex: Replace spin_is_locked() with lockdep locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs x86/extable: Macrofy inline assembly code to work around GCC inlining bugs x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs x86/refcount: Work around GCC inlining bug x86/objtool: Use asm macros to work around GCC inlining bugs ...
Diffstat (limited to 'include/linux/compiler.h')
-rw-r--r--include/linux/compiler.h56
1 files changed, 43 insertions, 13 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 681d866efb1e..1921545c6351 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
99 * unique, to convince GCC not to merge duplicate inline asm statements. 99 * unique, to convince GCC not to merge duplicate inline asm statements.
100 */ 100 */
101#define annotate_reachable() ({ \ 101#define annotate_reachable() ({ \
102 asm volatile("%c0:\n\t" \ 102 asm volatile("ANNOTATE_REACHABLE counter=%c0" \
103 ".pushsection .discard.reachable\n\t" \ 103 : : "i" (__COUNTER__)); \
104 ".long %c0b - .\n\t" \
105 ".popsection\n\t" : : "i" (__COUNTER__)); \
106}) 104})
107#define annotate_unreachable() ({ \ 105#define annotate_unreachable() ({ \
108 asm volatile("%c0:\n\t" \ 106 asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
109 ".pushsection .discard.unreachable\n\t" \ 107 : : "i" (__COUNTER__)); \
110 ".long %c0b - .\n\t" \
111 ".popsection\n\t" : : "i" (__COUNTER__)); \
112}) 108})
113#define ASM_UNREACHABLE \
114 "999:\n\t" \
115 ".pushsection .discard.unreachable\n\t" \
116 ".long 999b - .\n\t" \
117 ".popsection\n\t"
118#else 109#else
119#define annotate_reachable() 110#define annotate_reachable()
120#define annotate_unreachable() 111#define annotate_unreachable()
@@ -299,6 +290,45 @@ static inline void *offset_to_ptr(const int *off)
299 return (void *)((unsigned long)off + *off); 290 return (void *)((unsigned long)off + *off);
300} 291}
301 292
293#else /* __ASSEMBLY__ */
294
295#ifdef __KERNEL__
296#ifndef LINKER_SCRIPT
297
298#ifdef CONFIG_STACK_VALIDATION
299.macro ANNOTATE_UNREACHABLE counter:req
300\counter:
301 .pushsection .discard.unreachable
302 .long \counter\()b -.
303 .popsection
304.endm
305
306.macro ANNOTATE_REACHABLE counter:req
307\counter:
308 .pushsection .discard.reachable
309 .long \counter\()b -.
310 .popsection
311.endm
312
313.macro ASM_UNREACHABLE
314999:
315 .pushsection .discard.unreachable
316 .long 999b - .
317 .popsection
318.endm
319#else /* CONFIG_STACK_VALIDATION */
320.macro ANNOTATE_UNREACHABLE counter:req
321.endm
322
323.macro ANNOTATE_REACHABLE counter:req
324.endm
325
326.macro ASM_UNREACHABLE
327.endm
328#endif /* CONFIG_STACK_VALIDATION */
329
330#endif /* LINKER_SCRIPT */
331#endif /* __KERNEL__ */
302#endif /* __ASSEMBLY__ */ 332#endif /* __ASSEMBLY__ */
303 333
304#ifndef __optimize 334#ifndef __optimize