aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/boot/compressed/head_64.S10
-rw-r--r--arch/x86/boot/compressed/pgtable.h2
-rw-r--r--arch/x86/events/intel/core.c16
-rw-r--r--arch/x86/events/intel/uncore_snbep.c4
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/resctrl_sched.h4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/mce/core.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile4
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c3
-rw-r--r--arch/x86/kvm/vmx/nested.c1
-rw-r--r--arch/x86/kvm/vmx/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/iomem.c33
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/pageattr.c50
21 files changed, 112 insertions, 49 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 26387c7bf305..68261430fe6e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -446,12 +446,12 @@ config RETPOLINE
446 branches. Requires a compiler with -mindirect-branch=thunk-extern 446 branches. Requires a compiler with -mindirect-branch=thunk-extern
447 support for full protection. The kernel may run slower. 447 support for full protection. The kernel may run slower.
448 448
449config X86_RESCTRL 449config X86_CPU_RESCTRL
450 bool "Resource Control support" 450 bool "x86 CPU resource control support"
451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) 451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
452 select KERNFS 452 select KERNFS
453 help 453 help
454 Enable Resource Control support. 454 Enable x86 CPU resource control support.
455 455
456 Provide support for the allocation and monitoring of system resources 456 Provide support for the allocation and monitoring of system resources
457 usage by the CPU. 457 usage by the CPU.
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 64037895b085..f62e347862cc 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -600,6 +600,16 @@ ENTRY(trampoline_32bit_src)
600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax 600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
601 movl %eax, %cr3 601 movl %eax, %cr3
6023: 6023:
603 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
604 pushl %ecx
605 pushl %edx
606 movl $MSR_EFER, %ecx
607 rdmsr
608 btsl $_EFER_LME, %eax
609 wrmsr
610 popl %edx
611 popl %ecx
612
603 /* Enable PAE and LA57 (if required) paging modes */ 613 /* Enable PAE and LA57 (if required) paging modes */
604 movl $X86_CR4_PAE, %eax 614 movl $X86_CR4_PAE, %eax
605 cmpl $0, %edx 615 cmpl $0, %edx
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91f75638f6e6..6ff7e81b5628 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -6,7 +6,7 @@
6#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 6#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
7 7
8#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE 8#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
9#define TRAMPOLINE_32BIT_CODE_SIZE 0x60 9#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
10 10
11#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE 11#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
12 12
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 40e12cfc87f6..daafb893449b 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3559,6 +3559,14 @@ static void free_excl_cntrs(int cpu)
3559 3559
3560static void intel_pmu_cpu_dying(int cpu) 3560static void intel_pmu_cpu_dying(int cpu)
3561{ 3561{
3562 fini_debug_store_on_cpu(cpu);
3563
3564 if (x86_pmu.counter_freezing)
3565 disable_counter_freeze();
3566}
3567
3568static void intel_pmu_cpu_dead(int cpu)
3569{
3562 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 3570 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3563 struct intel_shared_regs *pc; 3571 struct intel_shared_regs *pc;
3564 3572
@@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu)
3570 } 3578 }
3571 3579
3572 free_excl_cntrs(cpu); 3580 free_excl_cntrs(cpu);
3573
3574 fini_debug_store_on_cpu(cpu);
3575
3576 if (x86_pmu.counter_freezing)
3577 disable_counter_freeze();
3578} 3581}
3579 3582
3580static void intel_pmu_sched_task(struct perf_event_context *ctx, 3583static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3663,6 +3666,7 @@ static __initconst const struct x86_pmu core_pmu = {
3663 .cpu_prepare = intel_pmu_cpu_prepare, 3666 .cpu_prepare = intel_pmu_cpu_prepare,
3664 .cpu_starting = intel_pmu_cpu_starting, 3667 .cpu_starting = intel_pmu_cpu_starting,
3665 .cpu_dying = intel_pmu_cpu_dying, 3668 .cpu_dying = intel_pmu_cpu_dying,
3669 .cpu_dead = intel_pmu_cpu_dead,
3666}; 3670};
3667 3671
3668static struct attribute *intel_pmu_attrs[]; 3672static struct attribute *intel_pmu_attrs[];
@@ -3703,6 +3707,8 @@ static __initconst const struct x86_pmu intel_pmu = {
3703 .cpu_prepare = intel_pmu_cpu_prepare, 3707 .cpu_prepare = intel_pmu_cpu_prepare,
3704 .cpu_starting = intel_pmu_cpu_starting, 3708 .cpu_starting = intel_pmu_cpu_starting,
3705 .cpu_dying = intel_pmu_cpu_dying, 3709 .cpu_dying = intel_pmu_cpu_dying,
3710 .cpu_dead = intel_pmu_cpu_dead,
3711
3706 .guest_get_msrs = intel_guest_get_msrs, 3712 .guest_get_msrs = intel_guest_get_msrs,
3707 .sched_task = intel_pmu_sched_task, 3713 .sched_task = intel_pmu_sched_task,
3708}; 3714};
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index c07bee31abe8..b10e04387f38 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
1222 .id_table = snbep_uncore_pci_ids, 1222 .id_table = snbep_uncore_pci_ids,
1223}; 1223};
1224 1224
1225#define NODE_ID_MASK 0x7
1226
1225/* 1227/*
1226 * build pci bus to socket mapping 1228 * build pci bus to socket mapping
1227 */ 1229 */
@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
1243 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); 1245 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1244 if (err) 1246 if (err)
1245 break; 1247 break;
1246 nodeid = config; 1248 nodeid = config & NODE_ID_MASK;
1247 /* get the Node ID mapping */ 1249 /* get the Node ID mapping */
1248 err = pci_read_config_dword(ubox_dev, idmap_loc, &config); 1250 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1249 if (err) 1251 if (err)
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0dd6b0f4000e..d9a9993af882 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -6,7 +6,7 @@
6 * "Big Core" Processors (Branded as Core, Xeon, etc...) 6 * "Big Core" Processors (Branded as Core, Xeon, etc...)
7 * 7 *
8 * The "_X" parts are generally the EP and EX Xeons, or the 8 * The "_X" parts are generally the EP and EX Xeons, or the
9 * "Extreme" ones, like Broadwell-E. 9 * "Extreme" ones, like Broadwell-E, or Atom microserver.
10 * 10 *
11 * While adding a new CPUID for a new microarchitecture, add a new 11 * While adding a new CPUID for a new microarchitecture, add a new
12 * group to keep logically sorted out in chronological order. Within 12 * group to keep logically sorted out in chronological order. Within
@@ -71,6 +71,7 @@
71#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ 71#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
72#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ 72#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
73#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ 73#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
74#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
74 75
75/* Xeon Phi */ 76/* Xeon Phi */
76 77
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8f657286d599..0ce558a8150d 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,7 +7,11 @@
7#endif 7#endif
8 8
9#ifdef CONFIG_KASAN 9#ifdef CONFIG_KASAN
10#ifdef CONFIG_KASAN_EXTRA
11#define KASAN_STACK_ORDER 2
12#else
10#define KASAN_STACK_ORDER 1 13#define KASAN_STACK_ORDER 1
14#endif
11#else 15#else
12#define KASAN_STACK_ORDER 0 16#define KASAN_STACK_ORDER 0
13#endif 17#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 40616e805292..2779ace16d23 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
1065static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1065static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1066 pmd_t *pmdp, pmd_t pmd) 1066 pmd_t *pmdp, pmd_t pmd)
1067{ 1067{
1068 native_set_pmd(pmdp, pmd); 1068 set_pmd(pmdp, pmd);
1069} 1069}
1070 1070
1071static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 1071static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
index 40ebddde6ac2..f6b7fe2833cc 100644
--- a/arch/x86/include/asm/resctrl_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -2,7 +2,7 @@
2#ifndef _ASM_X86_RESCTRL_SCHED_H 2#ifndef _ASM_X86_RESCTRL_SCHED_H
3#define _ASM_X86_RESCTRL_SCHED_H 3#define _ASM_X86_RESCTRL_SCHED_H
4 4
5#ifdef CONFIG_X86_RESCTRL 5#ifdef CONFIG_X86_CPU_RESCTRL
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/jump_label.h> 8#include <linux/jump_label.h>
@@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void)
88 88
89static inline void resctrl_sched_in(void) {} 89static inline void resctrl_sched_in(void) {}
90 90
91#endif /* CONFIG_X86_RESCTRL */ 91#endif /* CONFIG_X86_CPU_RESCTRL */
92 92
93#endif /* _ASM_X86_RESCTRL_SCHED_H */ 93#endif /* _ASM_X86_RESCTRL_SCHED_H */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index b6fa0869f7aa..cfd24f9f7614 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
39obj-$(CONFIG_X86_MCE) += mce/ 39obj-$(CONFIG_X86_MCE) += mce/
40obj-$(CONFIG_MTRR) += mtrr/ 40obj-$(CONFIG_MTRR) += mtrr/
41obj-$(CONFIG_MICROCODE) += microcode/ 41obj-$(CONFIG_MICROCODE) += microcode/
42obj-$(CONFIG_X86_RESCTRL) += resctrl/ 42obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/
43 43
44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
45 45
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 1de0f4170178..01874d54f4fd 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -71,7 +71,7 @@ void __init check_bugs(void)
71 * identify_boot_cpu() initialized SMT support information, let the 71 * identify_boot_cpu() initialized SMT support information, let the
72 * core code know. 72 * core code know.
73 */ 73 */
74 cpu_smt_check_topology_early(); 74 cpu_smt_check_topology();
75 75
76 if (!IS_ENABLED(CONFIG_SMP)) { 76 if (!IS_ENABLED(CONFIG_SMP)) {
77 pr_info("CPU: "); 77 pr_info("CPU: ");
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 672c7225cb1b..6ce290c506d9 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
784 quirk_no_way_out(i, m, regs); 784 quirk_no_way_out(i, m, regs);
785 785
786 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 786 if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
787 m->bank = i;
787 mce_read_aux(m, i); 788 mce_read_aux(m, i);
788 *msg = tmp; 789 *msg = tmp;
789 return 1; 790 return 1;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 51adde0a0f1a..e1f3ba19ba54 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
855 if (!p) { 855 if (!p) {
856 return ret; 856 return ret;
857 } else { 857 } else {
858 if (boot_cpu_data.microcode == p->patch_id) 858 if (boot_cpu_data.microcode >= p->patch_id)
859 return ret; 859 return ret;
860 860
861 ret = UCODE_NEW; 861 ret = UCODE_NEW;
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
index 1cabe6fd8e11..4a06c37b9cf1 100644
--- a/arch/x86/kernel/cpu/resctrl/Makefile
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -1,4 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o 2obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
3obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o 3obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o
4CFLAGS_pseudo_lock.o = -I$(src) 4CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 0d5efa34f359..53917a3ebf94 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
167 struct efi_info *current_ei = &boot_params.efi_info; 167 struct efi_info *current_ei = &boot_params.efi_info;
168 struct efi_info *ei = &params->efi_info; 168 struct efi_info *ei = &params->efi_info;
169 169
170 if (!efi_enabled(EFI_RUNTIME_SERVICES))
171 return 0;
172
170 if (!current_ei->efi_memmap_size) 173 if (!current_ei->efi_memmap_size)
171 return 0; 174 return 0;
172 175
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8ff20523661b..d8ea4ebd79e7 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
212 return; 212 return;
213 213
214 hrtimer_cancel(&vmx->nested.preemption_timer);
214 vmx->nested.vmxon = false; 215 vmx->nested.vmxon = false;
215 vmx->nested.smm.vmxon = false; 216 vmx->nested.smm.vmxon = false;
216 free_vpid(vmx->nested.vpid02); 217 free_vpid(vmx->nested.vpid02);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4341175339f3..95d618045001 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -26,6 +26,7 @@
26#include <linux/mod_devicetable.h> 26#include <linux/mod_devicetable.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/sched/smt.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/tboot.h> 31#include <linux/tboot.h>
31#include <linux/trace_events.h> 32#include <linux/trace_events.h>
@@ -6823,7 +6824,7 @@ static int vmx_vm_init(struct kvm *kvm)
6823 * Warn upon starting the first VM in a potentially 6824 * Warn upon starting the first VM in a potentially
6824 * insecure environment. 6825 * insecure environment.
6825 */ 6826 */
6826 if (cpu_smt_control == CPU_SMT_ENABLED) 6827 if (sched_smt_active())
6827 pr_warn_once(L1TF_MSG_SMT); 6828 pr_warn_once(L1TF_MSG_SMT);
6828 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6829 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
6829 pr_warn_once(L1TF_MSG_L1D); 6830 pr_warn_once(L1TF_MSG_L1D);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3d27206f6c01..e67ecf25e690 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5116,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
5116{ 5116{
5117 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5117 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5118 5118
5119 /*
5120 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
5121 * is returned, but our callers are not ready for that and they blindly
5122 * call kvm_inject_page_fault. Ensure that they at least do not leak
5123 * uninitialized kernel stack memory into cr2 and error code.
5124 */
5125 memset(exception, 0, sizeof(*exception));
5119 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 5126 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
5120 exception); 5127 exception);
5121} 5128}
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 66894675f3c8..df50451d94ef 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -2,8 +2,11 @@
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/io.h> 3#include <linux/io.h>
4 4
5#define movs(type,to,from) \
6 asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
7
5/* Originally from i386/string.h */ 8/* Originally from i386/string.h */
6static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) 9static __always_inline void rep_movs(void *to, const void *from, size_t n)
7{ 10{
8 unsigned long d0, d1, d2; 11 unsigned long d0, d1, d2;
9 asm volatile("rep ; movsl\n\t" 12 asm volatile("rep ; movsl\n\t"
@@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
21 24
22void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) 25void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
23{ 26{
24 __iomem_memcpy(to, (const void *)from, n); 27 if (unlikely(!n))
28 return;
29
30 /* Align any unaligned source IO */
31 if (unlikely(1 & (unsigned long)from)) {
32 movs("b", to, from);
33 n--;
34 }
35 if (n > 1 && unlikely(2 & (unsigned long)from)) {
36 movs("w", to, from);
37 n-=2;
38 }
39 rep_movs(to, (const void *)from, n);
25} 40}
26EXPORT_SYMBOL(memcpy_fromio); 41EXPORT_SYMBOL(memcpy_fromio);
27 42
28void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) 43void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
29{ 44{
30 __iomem_memcpy((void *)to, (const void *) from, n); 45 if (unlikely(!n))
46 return;
47
48 /* Align any unaligned destination IO */
49 if (unlikely(1 & (unsigned long)to)) {
50 movs("b", to, from);
51 n--;
52 }
53 if (n > 1 && unlikely(2 & (unsigned long)to)) {
54 movs("w", to, from);
55 n-=2;
56 }
57 rep_movs((void *)to, (const void *) from, n);
31} 58}
32EXPORT_SYMBOL(memcpy_toio); 59EXPORT_SYMBOL(memcpy_toio);
33 60
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2ff25ad33233..9d5c75f02295 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
595 return; 595 return;
596 } 596 }
597 597
598 addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); 598 addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
599#ifdef CONFIG_X86_64 599#ifdef CONFIG_X86_64
600 addr |= ((u64)desc.base3 << 32); 600 addr |= ((u64)desc.base3 << 32);
601#endif 601#endif
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4f8972311a77..14e6119838a6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
230 230
231#endif 231#endif
232 232
233/*
234 * See set_mce_nospec().
235 *
236 * Machine check recovery code needs to change cache mode of poisoned pages to
237 * UC to avoid speculative access logging another error. But passing the
238 * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
239 * speculative access. So we cheat and flip the top bit of the address. This
240 * works fine for the code that updates the page tables. But at the end of the
241 * process we need to flush the TLB and cache and the non-canonical address
242 * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
243 *
244 * But in the common case we already have a canonical address. This code
245 * will fix the top bit if needed and is a no-op otherwise.
246 */
247static inline unsigned long fix_addr(unsigned long addr)
248{
249#ifdef CONFIG_X86_64
250 return (long)(addr << 1) >> 1;
251#else
252 return addr;
253#endif
254}
255
233static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) 256static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
234{ 257{
235 if (cpa->flags & CPA_PAGES_ARRAY) { 258 if (cpa->flags & CPA_PAGES_ARRAY) {
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
313 unsigned int i; 336 unsigned int i;
314 337
315 for (i = 0; i < cpa->numpages; i++) 338 for (i = 0; i < cpa->numpages; i++)
316 __flush_tlb_one_kernel(__cpa_addr(cpa, i)); 339 __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
317} 340}
318 341
319static void cpa_flush(struct cpa_data *data, int cache) 342static void cpa_flush(struct cpa_data *data, int cache)
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
347 * Only flush present addresses: 370 * Only flush present addresses:
348 */ 371 */
349 if (pte && (pte_val(*pte) & _PAGE_PRESENT)) 372 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
350 clflush_cache_range_opt((void *)addr, PAGE_SIZE); 373 clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
351 } 374 }
352 mb(); 375 mb();
353} 376}
@@ -1627,29 +1650,6 @@ out:
1627 return ret; 1650 return ret;
1628} 1651}
1629 1652
1630/*
1631 * Machine check recovery code needs to change cache mode of poisoned
1632 * pages to UC to avoid speculative access logging another error. But
1633 * passing the address of the 1:1 mapping to set_memory_uc() is a fine
1634 * way to encourage a speculative access. So we cheat and flip the top
1635 * bit of the address. This works fine for the code that updates the
1636 * page tables. But at the end of the process we need to flush the cache
1637 * and the non-canonical address causes a #GP fault when used by the
1638 * CLFLUSH instruction.
1639 *
1640 * But in the common case we already have a canonical address. This code
1641 * will fix the top bit if needed and is a no-op otherwise.
1642 */
1643static inline unsigned long make_addr_canonical_again(unsigned long addr)
1644{
1645#ifdef CONFIG_X86_64
1646 return (long)(addr << 1) >> 1;
1647#else
1648 return addr;
1649#endif
1650}
1651
1652
1653static int change_page_attr_set_clr(unsigned long *addr, int numpages, 1653static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1654 pgprot_t mask_set, pgprot_t mask_clr, 1654 pgprot_t mask_set, pgprot_t mask_clr,
1655 int force_split, int in_flag, 1655 int force_split, int in_flag,