diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-17 14:54:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-17 14:54:56 -0500 |
commit | 88dc7fca18001fd883e5ace775afa316b68c8f2c (patch) | |
tree | 224bd71272b4cbc268ced27d9b3160fabb5755d8 | |
parent | dd43f3465d138439db451a5e7ddccefc5af2fa85 (diff) | |
parent | 6cfb521ac0d5b97470883ff9b7facae264b7ab12 (diff) |
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 pti bits and fixes from Thomas Gleixner:
"This last update contains:
- An objtool fix to prevent a segfault with the gold linker by
changing the invocation order. That's not just for gold, it's a
general robustness improvement.
- An improved error message for objtool which spares tearing hairs.
- Make KASAN fail loudly if there is not enough memory instead of
oopsing at some random place later
- RSB fill on context switch to prevent RSB underflow and speculation
through other units.
- Make the retpoline/RSB functionality work reliably for both Intel
and AMD
- Add retpoline to the module version magic so mismatch can be
detected
- A small (non-fix) update for cpufeatures which prevents cpu feature
clashing for the upcoming extra mitigation bits to ease
backporting"
* 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
module: Add retpoline tag to VERMAGIC
x86/cpufeature: Move processor tracing out of scattered features
objtool: Improve error message for bad file argument
objtool: Fix seg fault with gold linker
x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
x86/retpoline: Fill RSB on context switch for affected CPUs
x86/kasan: Panic if there is not enough memory to boot
-rw-r--r-- | arch/x86/entry/entry_32.S | 11 | ||||
-rw-r--r-- | arch/x86/entry/entry_64.S | 11 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeatures.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/nospec-branch.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/scattered.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/kasan_init_64.c | 24 | ||||
-rw-r--r-- | include/linux/vermagic.h | 8 | ||||
-rw-r--r-- | scripts/Makefile.build | 14 | ||||
-rw-r--r-- | tools/objtool/elf.c | 4 |
10 files changed, 99 insertions, 19 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index a1f28a54f23a..60c4c342316c 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -244,6 +244,17 @@ ENTRY(__switch_to_asm) | |||
244 | movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset | 244 | movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset |
245 | #endif | 245 | #endif |
246 | 246 | ||
247 | #ifdef CONFIG_RETPOLINE | ||
248 | /* | ||
249 | * When switching from a shallower to a deeper call stack | ||
250 | * the RSB may either underflow or use entries populated | ||
251 | * with userspace addresses. On CPUs where those concerns | ||
252 | * exist, overwrite the RSB with entries which capture | ||
253 | * speculative execution to prevent attack. | ||
254 | */ | ||
255 | FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW | ||
256 | #endif | ||
257 | |||
247 | /* restore callee-saved registers */ | 258 | /* restore callee-saved registers */ |
248 | popl %esi | 259 | popl %esi |
249 | popl %edi | 260 | popl %edi |
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 4f8e1d35a97c..aa15b4c0e3d1 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -491,6 +491,17 @@ ENTRY(__switch_to_asm) | |||
491 | movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset | 491 | movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset |
492 | #endif | 492 | #endif |
493 | 493 | ||
494 | #ifdef CONFIG_RETPOLINE | ||
495 | /* | ||
496 | * When switching from a shallower to a deeper call stack | ||
497 | * the RSB may either underflow or use entries populated | ||
498 | * with userspace addresses. On CPUs where those concerns | ||
499 | * exist, overwrite the RSB with entries which capture | ||
500 | * speculative execution to prevent attack. | ||
501 | */ | ||
502 | FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW | ||
503 | #endif | ||
504 | |||
494 | /* restore callee-saved registers */ | 505 | /* restore callee-saved registers */ |
495 | popq %r15 | 506 | popq %r15 |
496 | popq %r14 | 507 | popq %r14 |
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index f275447862f4..25b9375c1484 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
@@ -206,11 +206,11 @@ | |||
206 | #define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ | 206 | #define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ |
207 | #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ | 207 | #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ |
208 | #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ | 208 | #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ |
209 | #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ | ||
210 | #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ | 209 | #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ |
211 | #define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ | 210 | #define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */ |
212 | 211 | ||
213 | #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ | 212 | #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ |
213 | #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */ | ||
214 | 214 | ||
215 | /* Virtualization flags: Linux defined, word 8 */ | 215 | /* Virtualization flags: Linux defined, word 8 */ |
216 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ | 216 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
@@ -245,6 +245,7 @@ | |||
245 | #define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ | 245 | #define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ |
246 | #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ | 246 | #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ |
247 | #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ | 247 | #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ |
248 | #define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */ | ||
248 | #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ | 249 | #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ |
249 | #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ | 250 | #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ |
250 | #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ | 251 | #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ |
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 402a11c803c3..7b45d8424150 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h | |||
@@ -11,7 +11,7 @@ | |||
11 | * Fill the CPU return stack buffer. | 11 | * Fill the CPU return stack buffer. |
12 | * | 12 | * |
13 | * Each entry in the RSB, if used for a speculative 'ret', contains an | 13 | * Each entry in the RSB, if used for a speculative 'ret', contains an |
14 | * infinite 'pause; jmp' loop to capture speculative execution. | 14 | * infinite 'pause; lfence; jmp' loop to capture speculative execution. |
15 | * | 15 | * |
16 | * This is required in various cases for retpoline and IBRS-based | 16 | * This is required in various cases for retpoline and IBRS-based |
17 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to | 17 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to |
@@ -38,11 +38,13 @@ | |||
38 | call 772f; \ | 38 | call 772f; \ |
39 | 773: /* speculation trap */ \ | 39 | 773: /* speculation trap */ \ |
40 | pause; \ | 40 | pause; \ |
41 | lfence; \ | ||
41 | jmp 773b; \ | 42 | jmp 773b; \ |
42 | 772: \ | 43 | 772: \ |
43 | call 774f; \ | 44 | call 774f; \ |
44 | 775: /* speculation trap */ \ | 45 | 775: /* speculation trap */ \ |
45 | pause; \ | 46 | pause; \ |
47 | lfence; \ | ||
46 | jmp 775b; \ | 48 | jmp 775b; \ |
47 | 774: \ | 49 | 774: \ |
48 | dec reg; \ | 50 | dec reg; \ |
@@ -73,6 +75,7 @@ | |||
73 | call .Ldo_rop_\@ | 75 | call .Ldo_rop_\@ |
74 | .Lspec_trap_\@: | 76 | .Lspec_trap_\@: |
75 | pause | 77 | pause |
78 | lfence | ||
76 | jmp .Lspec_trap_\@ | 79 | jmp .Lspec_trap_\@ |
77 | .Ldo_rop_\@: | 80 | .Ldo_rop_\@: |
78 | mov \reg, (%_ASM_SP) | 81 | mov \reg, (%_ASM_SP) |
@@ -165,6 +168,7 @@ | |||
165 | " .align 16\n" \ | 168 | " .align 16\n" \ |
166 | "901: call 903f;\n" \ | 169 | "901: call 903f;\n" \ |
167 | "902: pause;\n" \ | 170 | "902: pause;\n" \ |
171 | " lfence;\n" \ | ||
168 | " jmp 902b;\n" \ | 172 | " jmp 902b;\n" \ |
169 | " .align 16\n" \ | 173 | " .align 16\n" \ |
170 | "903: addl $4, %%esp;\n" \ | 174 | "903: addl $4, %%esp;\n" \ |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index e4dc26185aa7..390b3dc3d438 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/alternative.h> | 23 | #include <asm/alternative.h> |
24 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
25 | #include <asm/set_memory.h> | 25 | #include <asm/set_memory.h> |
26 | #include <asm/intel-family.h> | ||
26 | 27 | ||
27 | static void __init spectre_v2_select_mitigation(void); | 28 | static void __init spectre_v2_select_mitigation(void); |
28 | 29 | ||
@@ -155,6 +156,23 @@ disable: | |||
155 | return SPECTRE_V2_CMD_NONE; | 156 | return SPECTRE_V2_CMD_NONE; |
156 | } | 157 | } |
157 | 158 | ||
159 | /* Check for Skylake-like CPUs (for RSB handling) */ | ||
160 | static bool __init is_skylake_era(void) | ||
161 | { | ||
162 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
163 | boot_cpu_data.x86 == 6) { | ||
164 | switch (boot_cpu_data.x86_model) { | ||
165 | case INTEL_FAM6_SKYLAKE_MOBILE: | ||
166 | case INTEL_FAM6_SKYLAKE_DESKTOP: | ||
167 | case INTEL_FAM6_SKYLAKE_X: | ||
168 | case INTEL_FAM6_KABYLAKE_MOBILE: | ||
169 | case INTEL_FAM6_KABYLAKE_DESKTOP: | ||
170 | return true; | ||
171 | } | ||
172 | } | ||
173 | return false; | ||
174 | } | ||
175 | |||
158 | static void __init spectre_v2_select_mitigation(void) | 176 | static void __init spectre_v2_select_mitigation(void) |
159 | { | 177 | { |
160 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); | 178 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); |
@@ -213,6 +231,24 @@ retpoline_auto: | |||
213 | 231 | ||
214 | spectre_v2_enabled = mode; | 232 | spectre_v2_enabled = mode; |
215 | pr_info("%s\n", spectre_v2_strings[mode]); | 233 | pr_info("%s\n", spectre_v2_strings[mode]); |
234 | |||
235 | /* | ||
236 | * If neither SMEP or KPTI are available, there is a risk of | ||
237 | * hitting userspace addresses in the RSB after a context switch | ||
238 | * from a shallow call stack to a deeper one. To prevent this fill | ||
239 | * the entire RSB, even when using IBRS. | ||
240 | * | ||
241 | * Skylake era CPUs have a separate issue with *underflow* of the | ||
242 | * RSB, when they will predict 'ret' targets from the generic BTB. | ||
243 | * The proper mitigation for this is IBRS. If IBRS is not supported | ||
244 | * or deactivated in favour of retpolines the RSB fill on context | ||
245 | * switch is required. | ||
246 | */ | ||
247 | if ((!boot_cpu_has(X86_FEATURE_PTI) && | ||
248 | !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { | ||
249 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); | ||
250 | pr_info("Filling RSB on context switch\n"); | ||
251 | } | ||
216 | } | 252 | } |
217 | 253 | ||
218 | #undef pr_fmt | 254 | #undef pr_fmt |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 05459ad3db46..d0e69769abfd 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -21,7 +21,6 @@ struct cpuid_bit { | |||
21 | static const struct cpuid_bit cpuid_bits[] = { | 21 | static const struct cpuid_bit cpuid_bits[] = { |
22 | { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, | 22 | { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, |
23 | { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, | 23 | { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, |
24 | { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 }, | ||
25 | { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 }, | 24 | { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 }, |
26 | { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 }, | 25 | { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 }, |
27 | { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, | 26 | { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, |
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 47388f0c0e59..af6f2f9c6a26 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
@@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES]; | |||
21 | 21 | ||
22 | static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); | 22 | static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); |
23 | 23 | ||
24 | static __init void *early_alloc(size_t size, int nid) | 24 | static __init void *early_alloc(size_t size, int nid, bool panic) |
25 | { | 25 | { |
26 | return memblock_virt_alloc_try_nid_nopanic(size, size, | 26 | if (panic) |
27 | __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); | 27 | return memblock_virt_alloc_try_nid(size, size, |
28 | __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); | ||
29 | else | ||
30 | return memblock_virt_alloc_try_nid_nopanic(size, size, | ||
31 | __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); | ||
28 | } | 32 | } |
29 | 33 | ||
30 | static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, | 34 | static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, |
@@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, | |||
38 | if (boot_cpu_has(X86_FEATURE_PSE) && | 42 | if (boot_cpu_has(X86_FEATURE_PSE) && |
39 | ((end - addr) == PMD_SIZE) && | 43 | ((end - addr) == PMD_SIZE) && |
40 | IS_ALIGNED(addr, PMD_SIZE)) { | 44 | IS_ALIGNED(addr, PMD_SIZE)) { |
41 | p = early_alloc(PMD_SIZE, nid); | 45 | p = early_alloc(PMD_SIZE, nid, false); |
42 | if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) | 46 | if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) |
43 | return; | 47 | return; |
44 | else if (p) | 48 | else if (p) |
45 | memblock_free(__pa(p), PMD_SIZE); | 49 | memblock_free(__pa(p), PMD_SIZE); |
46 | } | 50 | } |
47 | 51 | ||
48 | p = early_alloc(PAGE_SIZE, nid); | 52 | p = early_alloc(PAGE_SIZE, nid, true); |
49 | pmd_populate_kernel(&init_mm, pmd, p); | 53 | pmd_populate_kernel(&init_mm, pmd, p); |
50 | } | 54 | } |
51 | 55 | ||
@@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, | |||
57 | if (!pte_none(*pte)) | 61 | if (!pte_none(*pte)) |
58 | continue; | 62 | continue; |
59 | 63 | ||
60 | p = early_alloc(PAGE_SIZE, nid); | 64 | p = early_alloc(PAGE_SIZE, nid, true); |
61 | entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); | 65 | entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); |
62 | set_pte_at(&init_mm, addr, pte, entry); | 66 | set_pte_at(&init_mm, addr, pte, entry); |
63 | } while (pte++, addr += PAGE_SIZE, addr != end); | 67 | } while (pte++, addr += PAGE_SIZE, addr != end); |
@@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, | |||
75 | if (boot_cpu_has(X86_FEATURE_GBPAGES) && | 79 | if (boot_cpu_has(X86_FEATURE_GBPAGES) && |
76 | ((end - addr) == PUD_SIZE) && | 80 | ((end - addr) == PUD_SIZE) && |
77 | IS_ALIGNED(addr, PUD_SIZE)) { | 81 | IS_ALIGNED(addr, PUD_SIZE)) { |
78 | p = early_alloc(PUD_SIZE, nid); | 82 | p = early_alloc(PUD_SIZE, nid, false); |
79 | if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) | 83 | if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) |
80 | return; | 84 | return; |
81 | else if (p) | 85 | else if (p) |
82 | memblock_free(__pa(p), PUD_SIZE); | 86 | memblock_free(__pa(p), PUD_SIZE); |
83 | } | 87 | } |
84 | 88 | ||
85 | p = early_alloc(PAGE_SIZE, nid); | 89 | p = early_alloc(PAGE_SIZE, nid, true); |
86 | pud_populate(&init_mm, pud, p); | 90 | pud_populate(&init_mm, pud, p); |
87 | } | 91 | } |
88 | 92 | ||
@@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, | |||
101 | unsigned long next; | 105 | unsigned long next; |
102 | 106 | ||
103 | if (p4d_none(*p4d)) { | 107 | if (p4d_none(*p4d)) { |
104 | void *p = early_alloc(PAGE_SIZE, nid); | 108 | void *p = early_alloc(PAGE_SIZE, nid, true); |
105 | 109 | ||
106 | p4d_populate(&init_mm, p4d, p); | 110 | p4d_populate(&init_mm, p4d, p); |
107 | } | 111 | } |
@@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, | |||
122 | unsigned long next; | 126 | unsigned long next; |
123 | 127 | ||
124 | if (pgd_none(*pgd)) { | 128 | if (pgd_none(*pgd)) { |
125 | p = early_alloc(PAGE_SIZE, nid); | 129 | p = early_alloc(PAGE_SIZE, nid, true); |
126 | pgd_populate(&init_mm, pgd, p); | 130 | pgd_populate(&init_mm, pgd, p); |
127 | } | 131 | } |
128 | 132 | ||
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index bae807eb2933..853291714ae0 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h | |||
@@ -31,11 +31,17 @@ | |||
31 | #else | 31 | #else |
32 | #define MODULE_RANDSTRUCT_PLUGIN | 32 | #define MODULE_RANDSTRUCT_PLUGIN |
33 | #endif | 33 | #endif |
34 | #ifdef RETPOLINE | ||
35 | #define MODULE_VERMAGIC_RETPOLINE "retpoline " | ||
36 | #else | ||
37 | #define MODULE_VERMAGIC_RETPOLINE "" | ||
38 | #endif | ||
34 | 39 | ||
35 | #define VERMAGIC_STRING \ | 40 | #define VERMAGIC_STRING \ |
36 | UTS_RELEASE " " \ | 41 | UTS_RELEASE " " \ |
37 | MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ | 42 | MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ |
38 | MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ | 43 | MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ |
39 | MODULE_ARCH_VERMAGIC \ | 44 | MODULE_ARCH_VERMAGIC \ |
40 | MODULE_RANDSTRUCT_PLUGIN | 45 | MODULE_RANDSTRUCT_PLUGIN \ |
46 | MODULE_VERMAGIC_RETPOLINE | ||
41 | 47 | ||
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index cb8997ed0149..47cddf32aeba 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -265,12 +265,18 @@ else | |||
265 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) | 265 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) |
266 | endif | 266 | endif |
267 | 267 | ||
268 | ifdef CONFIG_MODVERSIONS | ||
269 | objtool_o = $(@D)/.tmp_$(@F) | ||
270 | else | ||
271 | objtool_o = $(@) | ||
272 | endif | ||
273 | |||
268 | # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory | 274 | # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory |
269 | # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file | 275 | # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file |
270 | # 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file | 276 | # 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file |
271 | cmd_objtool = $(if $(patsubst y%,, \ | 277 | cmd_objtool = $(if $(patsubst y%,, \ |
272 | $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ | 278 | $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ |
273 | $(__objtool_obj) $(objtool_args) "$(@)";) | 279 | $(__objtool_obj) $(objtool_args) "$(objtool_o)";) |
274 | objtool_obj = $(if $(patsubst y%,, \ | 280 | objtool_obj = $(if $(patsubst y%,, \ |
275 | $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ | 281 | $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ |
276 | $(__objtool_obj)) | 282 | $(__objtool_obj)) |
@@ -286,16 +292,16 @@ objtool_dep = $(objtool_obj) \ | |||
286 | define rule_cc_o_c | 292 | define rule_cc_o_c |
287 | $(call echo-cmd,checksrc) $(cmd_checksrc) \ | 293 | $(call echo-cmd,checksrc) $(cmd_checksrc) \ |
288 | $(call cmd_and_fixdep,cc_o_c) \ | 294 | $(call cmd_and_fixdep,cc_o_c) \ |
289 | $(cmd_modversions_c) \ | ||
290 | $(cmd_checkdoc) \ | 295 | $(cmd_checkdoc) \ |
291 | $(call echo-cmd,objtool) $(cmd_objtool) \ | 296 | $(call echo-cmd,objtool) $(cmd_objtool) \ |
297 | $(cmd_modversions_c) \ | ||
292 | $(call echo-cmd,record_mcount) $(cmd_record_mcount) | 298 | $(call echo-cmd,record_mcount) $(cmd_record_mcount) |
293 | endef | 299 | endef |
294 | 300 | ||
295 | define rule_as_o_S | 301 | define rule_as_o_S |
296 | $(call cmd_and_fixdep,as_o_S) \ | 302 | $(call cmd_and_fixdep,as_o_S) \ |
297 | $(cmd_modversions_S) \ | 303 | $(call echo-cmd,objtool) $(cmd_objtool) \ |
298 | $(call echo-cmd,objtool) $(cmd_objtool) | 304 | $(cmd_modversions_S) |
299 | endef | 305 | endef |
300 | 306 | ||
301 | # List module undefined symbols (or empty line if not enabled) | 307 | # List module undefined symbols (or empty line if not enabled) |
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 24460155c82c..c1c338661699 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <stdlib.h> | 26 | #include <stdlib.h> |
27 | #include <string.h> | 27 | #include <string.h> |
28 | #include <unistd.h> | 28 | #include <unistd.h> |
29 | #include <errno.h> | ||
29 | 30 | ||
30 | #include "elf.h" | 31 | #include "elf.h" |
31 | #include "warn.h" | 32 | #include "warn.h" |
@@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags) | |||
358 | 359 | ||
359 | elf->fd = open(name, flags); | 360 | elf->fd = open(name, flags); |
360 | if (elf->fd == -1) { | 361 | if (elf->fd == -1) { |
361 | perror("open"); | 362 | fprintf(stderr, "objtool: Can't open '%s': %s\n", |
363 | name, strerror(errno)); | ||
362 | goto err; | 364 | goto err; |
363 | } | 365 | } |
364 | 366 | ||