aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-26 13:13:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-26 13:13:21 -0400
commit2a8a2b7c49d6eb5f3348892c4676267376cfd40b (patch)
tree2342006a8a8fa0d02b147c4d4482146a9f6353f6
parentde3750351c0de35472299506ace61a01f2bfc567 (diff)
parent6a012288d6906fee1dbc244050ade1dafe4a9c8d (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: - Correct the L1TF fallout on 32bit and the off by one in the 'too much RAM for protection' calculation. - Add a helpful kernel message for the 'too much RAM' case - Unbreak the VDSO in case that the compiler desides to use indirect jumps/calls and emits retpolines which cannot be resolved because the kernel uses its own thunks, which does not work for the VDSO. Make it use the builtin thunks. - Re-export start_thread() which was unexported when the 32/64bit implementation was unified. start_thread() is required by modular binfmt handlers. - Trivial cleanups * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/speculation/l1tf: Suggest what to do on systems with too much RAM x86/speculation/l1tf: Fix off-by-one error when warning that system has too much RAM x86/kvm/vmx: Remove duplicate l1d flush definitions x86/speculation/l1tf: Fix overflow in l1tf_pfn_limit() on 32bit x86/process: Re-export start_thread() x86/mce: Add notifier_block forward declaration x86/vdso: Fix vDSO build if a retpoline is emitted
-rw-r--r--Makefile4
-rw-r--r--arch/x86/entry/vdso/Makefile6
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/kernel/cpu/bugs.c4
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/mmap.c2
9 files changed, 19 insertions, 10 deletions
diff --git a/Makefile b/Makefile
index 2e2b0095131c..487f8f2c3f06 100644
--- a/Makefile
+++ b/Makefile
@@ -507,9 +507,13 @@ KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
507endif 507endif
508 508
509RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register 509RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
510RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
510RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk 511RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
512RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
511RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) 513RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
514RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
512export RETPOLINE_CFLAGS 515export RETPOLINE_CFLAGS
516export RETPOLINE_VDSO_CFLAGS
513 517
514KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 518KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
515KBUILD_AFLAGS += $(call cc-option,-fno-PIE) 519KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 9f695f517747..fa3f439f0a92 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -68,9 +68,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
68CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 68CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
69 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ 69 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
70 -fno-omit-frame-pointer -foptimize-sibling-calls \ 70 -fno-omit-frame-pointer -foptimize-sibling-calls \
71 -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO 71 -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
72 72
73$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) 73$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
74 74
75# 75#
76# vDSO code runs in userspace and -pg doesn't help with profiling anyway. 76# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@@ -132,11 +132,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
132KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) 132KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
133KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) 133KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
134KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) 134KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
135KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
135KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic 136KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
136KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) 137KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
137KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) 138KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
138KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 139KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
139KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING 140KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
141KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
140$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 142$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
141 143
142$(obj)/vdso32.so.dbg: FORCE \ 144$(obj)/vdso32.so.dbg: FORCE \
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 8c7b3e5a2d01..3a17107594c8 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -148,6 +148,7 @@ enum mce_notifier_prios {
148 MCE_PRIO_LOWEST = 0, 148 MCE_PRIO_LOWEST = 0,
149}; 149};
150 150
151struct notifier_block;
151extern void mce_register_decode_chain(struct notifier_block *nb); 152extern void mce_register_decode_chain(struct notifier_block *nb);
152extern void mce_unregister_decode_chain(struct notifier_block *nb); 153extern void mce_unregister_decode_chain(struct notifier_block *nb);
153 154
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 682286aca881..c24297268ebc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -181,9 +181,9 @@ extern const struct seq_operations cpuinfo_op;
181 181
182extern void cpu_detect(struct cpuinfo_x86 *c); 182extern void cpu_detect(struct cpuinfo_x86 *c);
183 183
184static inline unsigned long l1tf_pfn_limit(void) 184static inline unsigned long long l1tf_pfn_limit(void)
185{ 185{
186 return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; 186 return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
187} 187}
188 188
189extern void early_cpu_init(void); 189extern void early_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index cb4a16292aa7..4c2313d0b9ca 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -702,6 +702,10 @@ static void __init l1tf_select_mitigation(void)
702 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 702 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
703 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 703 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
704 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 704 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
705 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
706 half_pa);
707 pr_info("However, doing so will make a part of your RAM unusable.\n");
708 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
705 return; 709 return;
706 } 710 }
707 711
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 476e3ddf8890..a451bc374b9b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -384,6 +384,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
384 start_thread_common(regs, new_ip, new_sp, 384 start_thread_common(regs, new_ip, new_sp,
385 __USER_CS, __USER_DS, 0); 385 __USER_CS, __USER_DS, 0);
386} 386}
387EXPORT_SYMBOL_GPL(start_thread);
387 388
388#ifdef CONFIG_COMPAT 389#ifdef CONFIG_COMPAT
389void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) 390void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8dae47e7267a..1d26f3c4985b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10131,9 +10131,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
10131 * information but as all relevant affected CPUs have 32KiB L1D cache size 10131 * information but as all relevant affected CPUs have 32KiB L1D cache size
10132 * there is no point in doing so. 10132 * there is no point in doing so.
10133 */ 10133 */
10134#define L1D_CACHE_ORDER 4
10135static void *vmx_l1d_flush_pages;
10136
10137static void vmx_l1d_flush(struct kvm_vcpu *vcpu) 10134static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
10138{ 10135{
10139 int size = PAGE_SIZE << L1D_CACHE_ORDER; 10136 int size = PAGE_SIZE << L1D_CACHE_ORDER;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 5c32a7665492..7a8fc26c1115 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -930,7 +930,7 @@ unsigned long max_swapfile_size(void)
930 930
931 if (boot_cpu_has_bug(X86_BUG_L1TF)) { 931 if (boot_cpu_has_bug(X86_BUG_L1TF)) {
932 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ 932 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
933 unsigned long l1tf_limit = l1tf_pfn_limit() + 1; 933 unsigned long long l1tf_limit = l1tf_pfn_limit();
934 /* 934 /*
935 * We encode swap offsets also with 3 bits below those for pfn 935 * We encode swap offsets also with 3 bits below those for pfn
936 * which makes the usable limit higher. 936 * which makes the usable limit higher.
@@ -938,7 +938,7 @@ unsigned long max_swapfile_size(void)
938#if CONFIG_PGTABLE_LEVELS > 2 938#if CONFIG_PGTABLE_LEVELS > 2
939 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; 939 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
940#endif 940#endif
941 pages = min_t(unsigned long, l1tf_limit, pages); 941 pages = min_t(unsigned long long, l1tf_limit, pages);
942 } 942 }
943 return pages; 943 return pages;
944} 944}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index f40ab8185d94..1e95d57760cf 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -257,7 +257,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
257 /* If it's real memory always allow */ 257 /* If it's real memory always allow */
258 if (pfn_valid(pfn)) 258 if (pfn_valid(pfn))
259 return true; 259 return true;
260 if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) 260 if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
261 return false; 261 return false;
262 return true; 262 return true;
263} 263}