summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/entry/entry_32.S13
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/intel/ds.c2
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/include/uapi/asm/byteorder.h2
-rw-r--r--arch/x86/include/uapi/asm/hwcap2.h2
-rw-r--r--arch/x86/include/uapi/asm/sigcontext32.h2
-rw-r--r--arch/x86/include/uapi/asm/types.h2
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/head_64.S8
-rw-r--r--arch/x86/kernel/hpet.c12
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/sysfb_efi.c46
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/svm.c13
-rw-r--r--arch/x86/kvm/vmx/nested.c4
-rw-r--r--arch/x86/kvm/vmx/vmx.c13
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/mm/fault.c15
19 files changed, 128 insertions, 44 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 2bb986f305ac..4f86928246e7 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -1443,8 +1443,12 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
1443 1443
1444ENTRY(page_fault) 1444ENTRY(page_fault)
1445 ASM_CLAC 1445 ASM_CLAC
1446 pushl $0; /* %gs's slot on the stack */ 1446 pushl $do_page_fault
1447 jmp common_exception_read_cr2
1448END(page_fault)
1447 1449
1450common_exception_read_cr2:
1451 /* the function address is in %gs's slot on the stack */
1448 SAVE_ALL switch_stacks=1 skip_gs=1 1452 SAVE_ALL switch_stacks=1 skip_gs=1
1449 1453
1450 ENCODE_FRAME_POINTER 1454 ENCODE_FRAME_POINTER
@@ -1452,6 +1456,7 @@ ENTRY(page_fault)
1452 1456
1453 /* fixup %gs */ 1457 /* fixup %gs */
1454 GS_TO_REG %ecx 1458 GS_TO_REG %ecx
1459 movl PT_GS(%esp), %edi
1455 REG_TO_PTGS %ecx 1460 REG_TO_PTGS %ecx
1456 SET_KERNEL_GS %ecx 1461 SET_KERNEL_GS %ecx
1457 1462
@@ -1463,9 +1468,9 @@ ENTRY(page_fault)
1463 1468
1464 TRACE_IRQS_OFF 1469 TRACE_IRQS_OFF
1465 movl %esp, %eax # pt_regs pointer 1470 movl %esp, %eax # pt_regs pointer
1466 call do_page_fault 1471 CALL_NOSPEC %edi
1467 jmp ret_from_exception 1472 jmp ret_from_exception
1468END(page_fault) 1473END(common_exception_read_cr2)
1469 1474
1470common_exception: 1475common_exception:
1471 /* the function address is in %gs's slot on the stack */ 1476 /* the function address is in %gs's slot on the stack */
@@ -1595,7 +1600,7 @@ END(general_protection)
1595ENTRY(async_page_fault) 1600ENTRY(async_page_fault)
1596 ASM_CLAC 1601 ASM_CLAC
1597 pushl $do_async_page_fault 1602 pushl $do_async_page_fault
1598 jmp common_exception 1603 jmp common_exception_read_cr2
1599END(async_page_fault) 1604END(async_page_fault)
1600#endif 1605#endif
1601 1606
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 9e911a96972b..648260b5f367 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -20,7 +20,6 @@
20#include <asm/intel-family.h> 20#include <asm/intel-family.h>
21#include <asm/apic.h> 21#include <asm/apic.h>
22#include <asm/cpu_device_id.h> 22#include <asm/cpu_device_id.h>
23#include <asm/hypervisor.h>
24 23
25#include "../perf_event.h" 24#include "../perf_event.h"
26 25
@@ -263,8 +262,8 @@ static struct event_constraint intel_icl_event_constraints[] = {
263}; 262};
264 263
265static struct extra_reg intel_icl_extra_regs[] __read_mostly = { 264static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
266 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0), 265 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
267 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1), 266 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
268 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 267 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
269 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 268 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
270 EVENT_EXTRA_END 269 EVENT_EXTRA_END
@@ -4053,7 +4052,7 @@ static bool check_msr(unsigned long msr, u64 mask)
4053 * Disable the check for real HW, so we don't 4052 * Disable the check for real HW, so we don't
4054 * mess with potentionaly enabled registers: 4053 * mess with potentionaly enabled registers:
4055 */ 4054 */
4056 if (hypervisor_is_type(X86_HYPER_NATIVE)) 4055 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
4057 return true; 4056 return true;
4058 4057
4059 /* 4058 /*
@@ -4955,6 +4954,7 @@ __init int intel_pmu_init(void)
4955 4954
4956 case INTEL_FAM6_SKYLAKE_X: 4955 case INTEL_FAM6_SKYLAKE_X:
4957 pmem = true; 4956 pmem = true;
4957 /* fall through */
4958 case INTEL_FAM6_SKYLAKE_MOBILE: 4958 case INTEL_FAM6_SKYLAKE_MOBILE:
4959 case INTEL_FAM6_SKYLAKE_DESKTOP: 4959 case INTEL_FAM6_SKYLAKE_DESKTOP:
4960 case INTEL_FAM6_KABYLAKE_MOBILE: 4960 case INTEL_FAM6_KABYLAKE_MOBILE:
@@ -5004,6 +5004,7 @@ __init int intel_pmu_init(void)
5004 case INTEL_FAM6_ICELAKE_X: 5004 case INTEL_FAM6_ICELAKE_X:
5005 case INTEL_FAM6_ICELAKE_XEON_D: 5005 case INTEL_FAM6_ICELAKE_XEON_D:
5006 pmem = true; 5006 pmem = true;
5007 /* fall through */
5007 case INTEL_FAM6_ICELAKE_MOBILE: 5008 case INTEL_FAM6_ICELAKE_MOBILE:
5008 case INTEL_FAM6_ICELAKE_DESKTOP: 5009 case INTEL_FAM6_ICELAKE_DESKTOP:
5009 x86_pmu.late_ack = true; 5010 x86_pmu.late_ack = true;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 2c8db2c19328..f1269e804e9b 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -851,7 +851,7 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
851 851
852struct event_constraint intel_icl_pebs_event_constraints[] = { 852struct event_constraint intel_icl_pebs_event_constraints[] = {
853 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ 853 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */ 854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
855 855
856 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 856 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
857 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ 857 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8282b8d41209..7b0a4ee77313 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -607,15 +607,16 @@ struct kvm_vcpu_arch {
607 607
608 /* 608 /*
609 * QEMU userspace and the guest each have their own FPU state. 609 * QEMU userspace and the guest each have their own FPU state.
610 * In vcpu_run, we switch between the user, maintained in the 610 * In vcpu_run, we switch between the user and guest FPU contexts.
611 * task_struct struct, and guest FPU contexts. While running a VCPU, 611 * While running a VCPU, the VCPU thread will have the guest FPU
612 * the VCPU thread will have the guest FPU context. 612 * context.
613 * 613 *
614 * Note that while the PKRU state lives inside the fpu registers, 614 * Note that while the PKRU state lives inside the fpu registers,
615 * it is switched out separately at VMENTER and VMEXIT time. The 615 * it is switched out separately at VMENTER and VMEXIT time. The
616 * "guest_fpu" state here contains the guest FPU context, with the 616 * "guest_fpu" state here contains the guest FPU context, with the
617 * host PRKU bits. 617 * host PRKU bits.
618 */ 618 */
619 struct fpu *user_fpu;
619 struct fpu *guest_fpu; 620 struct fpu *guest_fpu;
620 621
621 u64 xcr0; 622 u64 xcr0;
diff --git a/arch/x86/include/uapi/asm/byteorder.h b/arch/x86/include/uapi/asm/byteorder.h
index 484e3cfd7ef2..149143cab9ff 100644
--- a/arch/x86/include/uapi/asm/byteorder.h
+++ b/arch/x86/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_X86_BYTEORDER_H 2#ifndef _ASM_X86_BYTEORDER_H
3#define _ASM_X86_BYTEORDER_H 3#define _ASM_X86_BYTEORDER_H
4 4
diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
index 6ebaae90e207..8b2effe6efb8 100644
--- a/arch/x86/include/uapi/asm/hwcap2.h
+++ b/arch/x86/include/uapi/asm/hwcap2.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_X86_HWCAP2_H 2#ifndef _ASM_X86_HWCAP2_H
3#define _ASM_X86_HWCAP2_H 3#define _ASM_X86_HWCAP2_H
4 4
diff --git a/arch/x86/include/uapi/asm/sigcontext32.h b/arch/x86/include/uapi/asm/sigcontext32.h
index 6b18e88de8a6..7114801d0499 100644
--- a/arch/x86/include/uapi/asm/sigcontext32.h
+++ b/arch/x86/include/uapi/asm/sigcontext32.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_X86_SIGCONTEXT32_H 2#ifndef _ASM_X86_SIGCONTEXT32_H
3#define _ASM_X86_SIGCONTEXT32_H 3#define _ASM_X86_SIGCONTEXT32_H
4 4
diff --git a/arch/x86/include/uapi/asm/types.h b/arch/x86/include/uapi/asm/types.h
index df55e1ddb0c9..9d5c11a24279 100644
--- a/arch/x86/include/uapi/asm/types.h
+++ b/arch/x86/include/uapi/asm/types.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_X86_TYPES_H 2#ifndef _ASM_X86_TYPES_H
3#define _ASM_X86_TYPES_H 3#define _ASM_X86_TYPES_H
4 4
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 66ca906aa790..801ecd1c3fd5 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1226,7 +1226,7 @@ static ssize_t l1tf_show_state(char *buf)
1226 1226
1227static ssize_t mds_show_state(char *buf) 1227static ssize_t mds_show_state(char *buf)
1228{ 1228{
1229 if (!hypervisor_is_type(X86_HYPER_NATIVE)) { 1229 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1230 return sprintf(buf, "%s; SMT Host state unknown\n", 1230 return sprintf(buf, "%s; SMT Host state unknown\n",
1231 mds_strings[mds_mitigation]); 1231 mds_strings[mds_mitigation]);
1232 } 1232 }
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index a6342c899be5..f3d3e9646a99 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -193,10 +193,10 @@ ENTRY(secondary_startup_64)
193 193
194 /* Set up %gs. 194 /* Set up %gs.
195 * 195 *
196 * The base of %gs always points to the bottom of the irqstack 196 * The base of %gs always points to fixed_percpu_data. If the
197 * union. If the stack protector canary is enabled, it is 197 * stack protector canary is enabled, it is located at %gs:40.
198 * located at %gs:40. Note that, on SMP, the boot cpu uses 198 * Note that, on SMP, the boot cpu uses init data section until
199 * init data section till per cpu areas are set up. 199 * the per cpu areas are set up.
200 */ 200 */
201 movl $MSR_GS_BASE,%ecx 201 movl $MSR_GS_BASE,%ecx
202 movl initial_gs(%rip),%eax 202 movl initial_gs(%rip),%eax
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index c43e96a938d0..c6f791bc481e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -827,10 +827,6 @@ int __init hpet_enable(void)
827 if (!hpet_cfg_working()) 827 if (!hpet_cfg_working())
828 goto out_nohpet; 828 goto out_nohpet;
829 829
830 /* Validate that the counter is counting */
831 if (!hpet_counting())
832 goto out_nohpet;
833
834 /* 830 /*
835 * Read the period and check for a sane value: 831 * Read the period and check for a sane value:
836 */ 832 */
@@ -896,6 +892,14 @@ int __init hpet_enable(void)
896 } 892 }
897 hpet_print_config(); 893 hpet_print_config();
898 894
895 /*
896 * Validate that the counter is counting. This needs to be done
897 * after sanitizing the config registers to properly deal with
898 * force enabled HPETs.
899 */
900 if (!hpet_counting())
901 goto out_nohpet;
902
899 clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); 903 clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
900 904
901 if (id & HPET_ID_LEGSUP) { 905 if (id & HPET_ID_LEGSUP) {
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 4f36d3241faf..2d6898c2cb64 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
100{ 100{
101 int ret; 101 int ret;
102 102
103 if (!access_ok(fp, sizeof(*frame))) 103 if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
104 return 0; 104 return 0;
105 105
106 ret = 1; 106 ret = 1;
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 8eb67a670b10..653b7f617b61 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
230 {}, 230 {},
231}; 231};
232 232
233/*
234 * Some devices have a portrait LCD but advertise a landscape resolution (and
235 * pitch). We simply swap width and height for these devices so that we can
236 * correctly deal with some of them coming with multiple resolutions.
237 */
238static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
239 {
240 /*
241 * Lenovo MIIX310-10ICR, only some batches have the troublesome
242 * 800x1280 portrait screen. Luckily the portrait version has
243 * its own BIOS version, so we match on that.
244 */
245 .matches = {
246 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
247 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
248 DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
249 },
250 },
251 {
252 /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
253 .matches = {
254 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
255 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
256 "Lenovo MIIX 320-10ICR"),
257 },
258 },
259 {
260 /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
261 .matches = {
262 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
263 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
264 "Lenovo ideapad D330-10IGM"),
265 },
266 },
267 {},
268};
269
233__init void sysfb_apply_efi_quirks(void) 270__init void sysfb_apply_efi_quirks(void)
234{ 271{
235 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || 272 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
236 !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) 273 !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
237 dmi_check_system(efifb_dmi_system_table); 274 dmi_check_system(efifb_dmi_system_table);
275
276 if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
277 dmi_check_system(efifb_dmi_swap_width_height)) {
278 u16 temp = screen_info.lfb_width;
279
280 screen_info.lfb_width = screen_info.lfb_height;
281 screen_info.lfb_height = temp;
282 screen_info.lfb_linelength = 4 * screen_info.lfb_width;
283 }
238} 284}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8f72526e2f68..24843cf49579 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3466,7 +3466,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
3466 /* 3466 /*
3467 * Currently, fast page fault only works for direct mapping 3467 * Currently, fast page fault only works for direct mapping
3468 * since the gfn is not stable for indirect shadow page. See 3468 * since the gfn is not stable for indirect shadow page. See
3469 * Documentation/virtual/kvm/locking.txt to get more detail. 3469 * Documentation/virt/kvm/locking.txt to get more detail.
3470 */ 3470 */
3471 fault_handled = fast_pf_fix_direct_spte(vcpu, sp, 3471 fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
3472 iterator.sptep, spte, 3472 iterator.sptep, spte,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 19f69df96758..7eafc6907861 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2143,12 +2143,20 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
2143 goto out; 2143 goto out;
2144 } 2144 }
2145 2145
2146 svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
2147 GFP_KERNEL_ACCOUNT);
2148 if (!svm->vcpu.arch.user_fpu) {
2149 printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
2150 err = -ENOMEM;
2151 goto free_partial_svm;
2152 }
2153
2146 svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, 2154 svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
2147 GFP_KERNEL_ACCOUNT); 2155 GFP_KERNEL_ACCOUNT);
2148 if (!svm->vcpu.arch.guest_fpu) { 2156 if (!svm->vcpu.arch.guest_fpu) {
2149 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); 2157 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
2150 err = -ENOMEM; 2158 err = -ENOMEM;
2151 goto free_partial_svm; 2159 goto free_user_fpu;
2152 } 2160 }
2153 2161
2154 err = kvm_vcpu_init(&svm->vcpu, kvm, id); 2162 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
@@ -2211,6 +2219,8 @@ uninit:
2211 kvm_vcpu_uninit(&svm->vcpu); 2219 kvm_vcpu_uninit(&svm->vcpu);
2212free_svm: 2220free_svm:
2213 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); 2221 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2222free_user_fpu:
2223 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2214free_partial_svm: 2224free_partial_svm:
2215 kmem_cache_free(kvm_vcpu_cache, svm); 2225 kmem_cache_free(kvm_vcpu_cache, svm);
2216out: 2226out:
@@ -2241,6 +2251,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2241 __free_page(virt_to_page(svm->nested.hsave)); 2251 __free_page(virt_to_page(svm->nested.hsave));
2242 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); 2252 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
2243 kvm_vcpu_uninit(vcpu); 2253 kvm_vcpu_uninit(vcpu);
2254 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2244 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); 2255 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2245 kmem_cache_free(kvm_vcpu_cache, svm); 2256 kmem_cache_free(kvm_vcpu_cache, svm);
2246} 2257}
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0f1378789bd0..ced9fba32598 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -220,6 +220,8 @@ static void free_nested(struct kvm_vcpu *vcpu)
220 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 220 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
221 return; 221 return;
222 222
223 kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
224
223 vmx->nested.vmxon = false; 225 vmx->nested.vmxon = false;
224 vmx->nested.smm.vmxon = false; 226 vmx->nested.smm.vmxon = false;
225 free_vpid(vmx->nested.vpid02); 227 free_vpid(vmx->nested.vpid02);
@@ -232,7 +234,9 @@ static void free_nested(struct kvm_vcpu *vcpu)
232 vmx->vmcs01.shadow_vmcs = NULL; 234 vmx->vmcs01.shadow_vmcs = NULL;
233 } 235 }
234 kfree(vmx->nested.cached_vmcs12); 236 kfree(vmx->nested.cached_vmcs12);
237 vmx->nested.cached_vmcs12 = NULL;
235 kfree(vmx->nested.cached_shadow_vmcs12); 238 kfree(vmx->nested.cached_shadow_vmcs12);
239 vmx->nested.cached_shadow_vmcs12 = NULL;
236 /* Unpin physical memory we referred to in the vmcs02 */ 240 /* Unpin physical memory we referred to in the vmcs02 */
237 if (vmx->nested.apic_access_page) { 241 if (vmx->nested.apic_access_page) {
238 kvm_release_page_dirty(vmx->nested.apic_access_page); 242 kvm_release_page_dirty(vmx->nested.apic_access_page);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index a279447eb75b..074385c86c09 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6598,6 +6598,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
6598 free_loaded_vmcs(vmx->loaded_vmcs); 6598 free_loaded_vmcs(vmx->loaded_vmcs);
6599 kfree(vmx->guest_msrs); 6599 kfree(vmx->guest_msrs);
6600 kvm_vcpu_uninit(vcpu); 6600 kvm_vcpu_uninit(vcpu);
6601 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
6601 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); 6602 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
6602 kmem_cache_free(kvm_vcpu_cache, vmx); 6603 kmem_cache_free(kvm_vcpu_cache, vmx);
6603} 6604}
@@ -6613,12 +6614,20 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
6613 if (!vmx) 6614 if (!vmx)
6614 return ERR_PTR(-ENOMEM); 6615 return ERR_PTR(-ENOMEM);
6615 6616
6617 vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
6618 GFP_KERNEL_ACCOUNT);
6619 if (!vmx->vcpu.arch.user_fpu) {
6620 printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
6621 err = -ENOMEM;
6622 goto free_partial_vcpu;
6623 }
6624
6616 vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, 6625 vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
6617 GFP_KERNEL_ACCOUNT); 6626 GFP_KERNEL_ACCOUNT);
6618 if (!vmx->vcpu.arch.guest_fpu) { 6627 if (!vmx->vcpu.arch.guest_fpu) {
6619 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); 6628 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
6620 err = -ENOMEM; 6629 err = -ENOMEM;
6621 goto free_partial_vcpu; 6630 goto free_user_fpu;
6622 } 6631 }
6623 6632
6624 vmx->vpid = allocate_vpid(); 6633 vmx->vpid = allocate_vpid();
@@ -6721,6 +6730,8 @@ uninit_vcpu:
6721free_vcpu: 6730free_vcpu:
6722 free_vpid(vmx->vpid); 6731 free_vpid(vmx->vpid);
6723 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); 6732 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
6733free_user_fpu:
6734 kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
6724free_partial_vcpu: 6735free_partial_vcpu:
6725 kmem_cache_free(kvm_vcpu_cache, vmx); 6736 kmem_cache_free(kvm_vcpu_cache, vmx);
6726 return ERR_PTR(err); 6737 return ERR_PTR(err);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 58305cf81182..c6d951cbd76c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3306,6 +3306,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3306 3306
3307 kvm_x86_ops->vcpu_load(vcpu, cpu); 3307 kvm_x86_ops->vcpu_load(vcpu, cpu);
3308 3308
3309 fpregs_assert_state_consistent();
3310 if (test_thread_flag(TIF_NEED_FPU_LOAD))
3311 switch_fpu_return();
3312
3309 /* Apply any externally detected TSC adjustments (due to suspend) */ 3313 /* Apply any externally detected TSC adjustments (due to suspend) */
3310 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 3314 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
3311 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 3315 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
@@ -7202,7 +7206,7 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
7202 7206
7203 rcu_read_unlock(); 7207 rcu_read_unlock();
7204 7208
7205 if (target) 7209 if (target && READ_ONCE(target->ready))
7206 kvm_vcpu_yield_to(target); 7210 kvm_vcpu_yield_to(target);
7207} 7211}
7208 7212
@@ -7242,6 +7246,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
7242 break; 7246 break;
7243 case KVM_HC_KICK_CPU: 7247 case KVM_HC_KICK_CPU:
7244 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); 7248 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
7249 kvm_sched_yield(vcpu->kvm, a1);
7245 ret = 0; 7250 ret = 0;
7246 break; 7251 break;
7247#ifdef CONFIG_X86_64 7252#ifdef CONFIG_X86_64
@@ -7990,9 +7995,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
7990 trace_kvm_entry(vcpu->vcpu_id); 7995 trace_kvm_entry(vcpu->vcpu_id);
7991 guest_enter_irqoff(); 7996 guest_enter_irqoff();
7992 7997
7993 fpregs_assert_state_consistent(); 7998 /* The preempt notifier should have taken care of the FPU already. */
7994 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 7999 WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD));
7995 switch_fpu_return();
7996 8000
7997 if (unlikely(vcpu->arch.switch_db_regs)) { 8001 if (unlikely(vcpu->arch.switch_db_regs)) {
7998 set_debugreg(0, 7); 8002 set_debugreg(0, 7);
@@ -8270,7 +8274,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
8270{ 8274{
8271 fpregs_lock(); 8275 fpregs_lock();
8272 8276
8273 copy_fpregs_to_fpstate(&current->thread.fpu); 8277 copy_fpregs_to_fpstate(vcpu->arch.user_fpu);
8274 /* PKRU is separately restored in kvm_x86_ops->run. */ 8278 /* PKRU is separately restored in kvm_x86_ops->run. */
8275 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, 8279 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
8276 ~XFEATURE_MASK_PKRU); 8280 ~XFEATURE_MASK_PKRU);
@@ -8287,7 +8291,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
8287 fpregs_lock(); 8291 fpregs_lock();
8288 8292
8289 copy_fpregs_to_fpstate(vcpu->arch.guest_fpu); 8293 copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
8290 copy_kernel_to_fpregs(&current->thread.fpu.state); 8294 copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
8291 8295
8292 fpregs_mark_activate(); 8296 fpregs_mark_activate();
8293 fpregs_unlock(); 8297 fpregs_unlock();
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 6c46095cd0d9..9ceacd1156db 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -177,13 +177,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
177 177
178 pmd = pmd_offset(pud, address); 178 pmd = pmd_offset(pud, address);
179 pmd_k = pmd_offset(pud_k, address); 179 pmd_k = pmd_offset(pud_k, address);
180 if (!pmd_present(*pmd_k))
181 return NULL;
182 180
183 if (!pmd_present(*pmd)) 181 if (pmd_present(*pmd) != pmd_present(*pmd_k))
184 set_pmd(pmd, *pmd_k); 182 set_pmd(pmd, *pmd_k);
183
184 if (!pmd_present(*pmd_k))
185 return NULL;
185 else 186 else
186 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); 187 BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
187 188
188 return pmd_k; 189 return pmd_k;
189} 190}
@@ -203,17 +204,13 @@ void vmalloc_sync_all(void)
203 spin_lock(&pgd_lock); 204 spin_lock(&pgd_lock);
204 list_for_each_entry(page, &pgd_list, lru) { 205 list_for_each_entry(page, &pgd_list, lru) {
205 spinlock_t *pgt_lock; 206 spinlock_t *pgt_lock;
206 pmd_t *ret;
207 207
208 /* the pgt_lock only for Xen */ 208 /* the pgt_lock only for Xen */
209 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 209 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
210 210
211 spin_lock(pgt_lock); 211 spin_lock(pgt_lock);
212 ret = vmalloc_sync_one(page_address(page), address); 212 vmalloc_sync_one(page_address(page), address);
213 spin_unlock(pgt_lock); 213 spin_unlock(pgt_lock);
214
215 if (!ret)
216 break;
217 } 214 }
218 spin_unlock(&pgd_lock); 215 spin_unlock(&pgd_lock);
219 } 216 }