aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-12-19 05:20:23 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-19 05:58:10 -0500
commite769742d35841a8198dd6af94e2931083abdee08 (patch)
tree092cf92b8f35cc4317b75442a90dfce4a2c368d9
parent32043fa065b51e0b1433e48d118821c71b5cd65d (diff)
Revert "x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs"
This reverts commit 5bdcd510c2ac9efaf55c4cbd8d46421d8e2320cd. The macro based workarounds for GCC's inlining bugs caused regressions: distcc and other distro build setups broke, and the fixes are not easy nor will they solve regressions on already existing installations. So we are reverting this patch and the 8 followup patches. What makes this revert easier is that GCC9 will likely include the new 'asm inline' syntax that makes inlining of assembly blocks a lot more robust. This is a superior method to any macro based hackeries - and might even be backported to GCC8, which would make all modern distros get the inlining fixes as well. Many thanks to Masahiro Yamada and others for helping sort out these problems. Reported-by: Masahiro Yamada <yamada.masahiro@socionext.com> Reviewed-by: Borislav Petkov <bp@alien8.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Juergen Gross <jgross@suse.com> Cc: Richard Biener <rguenther@suse.de> Cc: Kees Cook <keescook@chromium.org> Cc: Segher Boessenkool <segher@kernel.crashing.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Nadav Amit <namit@vmware.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/entry/calling.h2
-rw-r--r--arch/x86/include/asm/jump_label.h72
-rw-r--r--arch/x86/kernel/macros.S1
3 files changed, 55 insertions, 20 deletions
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 25e5a6bda8c3..20d0885b00fb 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
352.macro CALL_enter_from_user_mode 352.macro CALL_enter_from_user_mode
353#ifdef CONFIG_CONTEXT_TRACKING 353#ifdef CONFIG_CONTEXT_TRACKING
354#ifdef HAVE_JUMP_LABEL 354#ifdef HAVE_JUMP_LABEL
355 STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1 355 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
356#endif 356#endif
357 call enter_from_user_mode 357 call enter_from_user_mode
358.Lafter_call_\@: 358.Lafter_call_\@:
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a5fb34fe56a4..21efc9d07ed9 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -2,6 +2,19 @@
2#ifndef _ASM_X86_JUMP_LABEL_H 2#ifndef _ASM_X86_JUMP_LABEL_H
3#define _ASM_X86_JUMP_LABEL_H 3#define _ASM_X86_JUMP_LABEL_H
4 4
5#ifndef HAVE_JUMP_LABEL
6/*
7 * For better or for worse, if jump labels (the gcc extension) are missing,
8 * then the entire static branch patching infrastructure is compiled out.
9 * If that happens, the code in here will malfunction. Raise a compiler
10 * error instead.
11 *
12 * In theory, jump labels and the static branch patching infrastructure
13 * could be decoupled to fix this.
14 */
15#error asm/jump_label.h included on a non-jump-label kernel
16#endif
17
5#define JUMP_LABEL_NOP_SIZE 5 18#define JUMP_LABEL_NOP_SIZE 5
6 19
7#ifdef CONFIG_X86_64 20#ifdef CONFIG_X86_64
@@ -20,9 +33,15 @@
20 33
21static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 34static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
22{ 35{
23 asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" " 36 asm_volatile_goto("1:"
24 "branch=\"%c1\"" 37 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
25 : : "i" (key), "i" (branch) : : l_yes); 38 ".pushsection __jump_table, \"aw\" \n\t"
39 _ASM_ALIGN "\n\t"
40 ".long 1b - ., %l[l_yes] - . \n\t"
41 _ASM_PTR "%c0 + %c1 - .\n\t"
42 ".popsection \n\t"
43 : : "i" (key), "i" (branch) : : l_yes);
44
26 return false; 45 return false;
27l_yes: 46l_yes:
28 return true; 47 return true;
@@ -30,8 +49,14 @@ l_yes:
30 49
31static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 50static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
32{ 51{
33 asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" " 52 asm_volatile_goto("1:"
34 "branch=\"%c1\"" 53 ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
54 "2:\n\t"
55 ".pushsection __jump_table, \"aw\" \n\t"
56 _ASM_ALIGN "\n\t"
57 ".long 1b - ., %l[l_yes] - . \n\t"
58 _ASM_PTR "%c0 + %c1 - .\n\t"
59 ".popsection \n\t"
35 : : "i" (key), "i" (branch) : : l_yes); 60 : : "i" (key), "i" (branch) : : l_yes);
36 61
37 return false; 62 return false;
@@ -41,26 +66,37 @@ l_yes:
41 66
42#else /* __ASSEMBLY__ */ 67#else /* __ASSEMBLY__ */
43 68
44.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req 69.macro STATIC_JUMP_IF_TRUE target, key, def
45.Lstatic_branch_nop_\@: 70.Lstatic_jump_\@:
46 .byte STATIC_KEY_INIT_NOP 71 .if \def
47.Lstatic_branch_no_after_\@: 72 /* Equivalent to "jmp.d32 \target" */
73 .byte 0xe9
74 .long \target - .Lstatic_jump_after_\@
75.Lstatic_jump_after_\@:
76 .else
77 .byte STATIC_KEY_INIT_NOP
78 .endif
48 .pushsection __jump_table, "aw" 79 .pushsection __jump_table, "aw"
49 _ASM_ALIGN 80 _ASM_ALIGN
50 .long .Lstatic_branch_nop_\@ - ., \l_yes - . 81 .long .Lstatic_jump_\@ - ., \target - .
51 _ASM_PTR \key + \branch - . 82 _ASM_PTR \key - .
52 .popsection 83 .popsection
53.endm 84.endm
54 85
55.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req 86.macro STATIC_JUMP_IF_FALSE target, key, def
56.Lstatic_branch_jmp_\@: 87.Lstatic_jump_\@:
57 .byte 0xe9 88 .if \def
58 .long \l_yes - .Lstatic_branch_jmp_after_\@ 89 .byte STATIC_KEY_INIT_NOP
59.Lstatic_branch_jmp_after_\@: 90 .else
91 /* Equivalent to "jmp.d32 \target" */
92 .byte 0xe9
93 .long \target - .Lstatic_jump_after_\@
94.Lstatic_jump_after_\@:
95 .endif
60 .pushsection __jump_table, "aw" 96 .pushsection __jump_table, "aw"
61 _ASM_ALIGN 97 _ASM_ALIGN
62 .long .Lstatic_branch_jmp_\@ - ., \l_yes - . 98 .long .Lstatic_jump_\@ - ., \target - .
63 _ASM_PTR \key + \branch - . 99 _ASM_PTR \key + 1 - .
64 .popsection 100 .popsection
65.endm 101.endm
66 102
diff --git a/arch/x86/kernel/macros.S b/arch/x86/kernel/macros.S
index 161c95059044..bf8b9c93e255 100644
--- a/arch/x86/kernel/macros.S
+++ b/arch/x86/kernel/macros.S
@@ -13,4 +13,3 @@
13#include <asm/paravirt.h> 13#include <asm/paravirt.h>
14#include <asm/asm.h> 14#include <asm/asm.h>
15#include <asm/cpufeature.h> 15#include <asm/cpufeature.h>
16#include <asm/jump_label.h>