diff options
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/emulate.c | 51 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 45 |
4 files changed, 102 insertions, 1 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 05a562b85025..0982507b962a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1891,6 +1891,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, | |||
1891 | ss->p = 1; | 1891 | ss->p = 1; |
1892 | } | 1892 | } |
1893 | 1893 | ||
1894 | static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) | ||
1895 | { | ||
1896 | struct x86_emulate_ops *ops = ctxt->ops; | ||
1897 | u32 eax, ebx, ecx, edx; | ||
1898 | |||
1899 | /* | ||
1900 | * syscall should always be enabled in longmode - so only become | ||
1901 | * vendor specific (cpuid) if other modes are active... | ||
1902 | */ | ||
1903 | if (ctxt->mode == X86EMUL_MODE_PROT64) | ||
1904 | return true; | ||
1905 | |||
1906 | eax = 0x00000000; | ||
1907 | ecx = 0x00000000; | ||
1908 | if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { | ||
1909 | /* | ||
1910 | * Intel ("GenuineIntel") | ||
1911 | * remark: Intel CPUs only support "syscall" in 64bit | ||
1912 | * longmode. Also an 64bit guest with a | ||
1913 | * 32bit compat-app running will #UD !! While this | ||
1914 | * behaviour can be fixed (by emulating) into AMD | ||
1915 | * response - CPUs of AMD can't behave like Intel. | ||
1916 | */ | ||
1917 | if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && | ||
1918 | ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && | ||
1919 | edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) | ||
1920 | return false; | ||
1921 | |||
1922 | /* AMD ("AuthenticAMD") */ | ||
1923 | if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && | ||
1924 | ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && | ||
1925 | edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) | ||
1926 | return true; | ||
1927 | |||
1928 | /* AMD ("AMDisbetter!") */ | ||
1929 | if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && | ||
1930 | ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && | ||
1931 | edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) | ||
1932 | return true; | ||
1933 | } | ||
1934 | |||
1935 | /* default: (not Intel, not AMD), apply Intel's stricter rules... */ | ||
1936 | return false; | ||
1937 | } | ||
1938 | |||
1894 | static int em_syscall(struct x86_emulate_ctxt *ctxt) | 1939 | static int em_syscall(struct x86_emulate_ctxt *ctxt) |
1895 | { | 1940 | { |
1896 | struct x86_emulate_ops *ops = ctxt->ops; | 1941 | struct x86_emulate_ops *ops = ctxt->ops; |
@@ -1904,9 +1949,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
1904 | ctxt->mode == X86EMUL_MODE_VM86) | 1949 | ctxt->mode == X86EMUL_MODE_VM86) |
1905 | return emulate_ud(ctxt); | 1950 | return emulate_ud(ctxt); |
1906 | 1951 | ||
1952 | if (!(em_syscall_is_enabled(ctxt))) | ||
1953 | return emulate_ud(ctxt); | ||
1954 | |||
1907 | ops->get_msr(ctxt, MSR_EFER, &efer); | 1955 | ops->get_msr(ctxt, MSR_EFER, &efer); |
1908 | setup_syscalls_segments(ctxt, &cs, &ss); | 1956 | setup_syscalls_segments(ctxt, &cs, &ss); |
1909 | 1957 | ||
1958 | if (!(efer & EFER_SCE)) | ||
1959 | return emulate_ud(ctxt); | ||
1960 | |||
1910 | ops->get_msr(ctxt, MSR_STAR, &msr_data); | 1961 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
1911 | msr_data >>= 32; | 1962 | msr_data >>= 32; |
1912 | cs_sel = (u16)(msr_data & 0xfffc); | 1963 | cs_sel = (u16)(msr_data & 0xfffc); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 5fa553babe56..e385214711cb 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/ftrace_event.h> | 29 | #include <linux/ftrace_event.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <asm/perf_event.h> | ||
32 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
33 | #include <asm/desc.h> | 34 | #include <asm/desc.h> |
34 | #include <asm/kvm_para.h> | 35 | #include <asm/kvm_para.h> |
@@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage) | |||
575 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | 576 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
576 | 577 | ||
577 | cpu_svm_disable(); | 578 | cpu_svm_disable(); |
579 | |||
580 | amd_pmu_disable_virt(); | ||
578 | } | 581 | } |
579 | 582 | ||
580 | static int svm_hardware_enable(void *garbage) | 583 | static int svm_hardware_enable(void *garbage) |
@@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage) | |||
622 | 625 | ||
623 | svm_init_erratum_383(); | 626 | svm_init_erratum_383(); |
624 | 627 | ||
628 | amd_pmu_enable_virt(); | ||
629 | |||
625 | return 0; | 630 | return 0; |
626 | } | 631 | } |
627 | 632 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d29216c462b3..3b4c8d8ad906 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1457,7 +1457,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
1457 | #ifdef CONFIG_X86_64 | 1457 | #ifdef CONFIG_X86_64 |
1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 1458 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
1459 | #endif | 1459 | #endif |
1460 | if (current_thread_info()->status & TS_USEDFPU) | 1460 | if (__thread_has_fpu(current)) |
1461 | clts(); | 1461 | clts(); |
1462 | load_gdt(&__get_cpu_var(host_gdt)); | 1462 | load_gdt(&__get_cpu_var(host_gdt)); |
1463 | } | 1463 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 14d6cadc4ba6..9cbfc0698118 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1495,6 +1495,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) | |||
1495 | 1495 | ||
1496 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 1496 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
1497 | { | 1497 | { |
1498 | bool pr = false; | ||
1499 | |||
1498 | switch (msr) { | 1500 | switch (msr) { |
1499 | case MSR_EFER: | 1501 | case MSR_EFER: |
1500 | return set_efer(vcpu, data); | 1502 | return set_efer(vcpu, data); |
@@ -1635,6 +1637,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1635 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " | 1637 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " |
1636 | "0x%x data 0x%llx\n", msr, data); | 1638 | "0x%x data 0x%llx\n", msr, data); |
1637 | break; | 1639 | break; |
1640 | case MSR_P6_PERFCTR0: | ||
1641 | case MSR_P6_PERFCTR1: | ||
1642 | pr = true; | ||
1643 | case MSR_P6_EVNTSEL0: | ||
1644 | case MSR_P6_EVNTSEL1: | ||
1645 | if (kvm_pmu_msr(vcpu, msr)) | ||
1646 | return kvm_pmu_set_msr(vcpu, msr, data); | ||
1647 | |||
1648 | if (pr || data != 0) | ||
1649 | pr_unimpl(vcpu, "disabled perfctr wrmsr: " | ||
1650 | "0x%x data 0x%llx\n", msr, data); | ||
1651 | break; | ||
1638 | case MSR_K7_CLK_CTL: | 1652 | case MSR_K7_CLK_CTL: |
1639 | /* | 1653 | /* |
1640 | * Ignore all writes to this no longer documented MSR. | 1654 | * Ignore all writes to this no longer documented MSR. |
@@ -1835,6 +1849,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
1835 | case MSR_FAM10H_MMIO_CONF_BASE: | 1849 | case MSR_FAM10H_MMIO_CONF_BASE: |
1836 | data = 0; | 1850 | data = 0; |
1837 | break; | 1851 | break; |
1852 | case MSR_P6_PERFCTR0: | ||
1853 | case MSR_P6_PERFCTR1: | ||
1854 | case MSR_P6_EVNTSEL0: | ||
1855 | case MSR_P6_EVNTSEL1: | ||
1856 | if (kvm_pmu_msr(vcpu, msr)) | ||
1857 | return kvm_pmu_get_msr(vcpu, msr, pdata); | ||
1858 | data = 0; | ||
1859 | break; | ||
1838 | case MSR_IA32_UCODE_REV: | 1860 | case MSR_IA32_UCODE_REV: |
1839 | data = 0x100000000ULL; | 1861 | data = 0x100000000ULL; |
1840 | break; | 1862 | break; |
@@ -4180,6 +4202,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, | |||
4180 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); | 4202 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); |
4181 | } | 4203 | } |
4182 | 4204 | ||
4205 | static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, | ||
4206 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) | ||
4207 | { | ||
4208 | struct kvm_cpuid_entry2 *cpuid = NULL; | ||
4209 | |||
4210 | if (eax && ecx) | ||
4211 | cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), | ||
4212 | *eax, *ecx); | ||
4213 | |||
4214 | if (cpuid) { | ||
4215 | *eax = cpuid->eax; | ||
4216 | *ecx = cpuid->ecx; | ||
4217 | if (ebx) | ||
4218 | *ebx = cpuid->ebx; | ||
4219 | if (edx) | ||
4220 | *edx = cpuid->edx; | ||
4221 | return true; | ||
4222 | } | ||
4223 | |||
4224 | return false; | ||
4225 | } | ||
4226 | |||
4183 | static struct x86_emulate_ops emulate_ops = { | 4227 | static struct x86_emulate_ops emulate_ops = { |
4184 | .read_std = kvm_read_guest_virt_system, | 4228 | .read_std = kvm_read_guest_virt_system, |
4185 | .write_std = kvm_write_guest_virt_system, | 4229 | .write_std = kvm_write_guest_virt_system, |
@@ -4211,6 +4255,7 @@ static struct x86_emulate_ops emulate_ops = { | |||
4211 | .get_fpu = emulator_get_fpu, | 4255 | .get_fpu = emulator_get_fpu, |
4212 | .put_fpu = emulator_put_fpu, | 4256 | .put_fpu = emulator_put_fpu, |
4213 | .intercept = emulator_intercept, | 4257 | .intercept = emulator_intercept, |
4258 | .get_cpuid = emulator_get_cpuid, | ||
4214 | }; | 4259 | }; |
4215 | 4260 | ||
4216 | static void cache_all_regs(struct kvm_vcpu *vcpu) | 4261 | static void cache_all_regs(struct kvm_vcpu *vcpu) |