diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 128 |
1 files changed, 85 insertions, 43 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bf89ec2cfb82..5b4cdcbd154c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -93,14 +93,14 @@ module_param(yield_on_hlt, bool, S_IRUGO); | |||
93 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: | 93 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
94 | * ple_gap: upper bound on the amount of time between two successive | 94 | * ple_gap: upper bound on the amount of time between two successive |
95 | * executions of PAUSE in a loop. Also indicate if ple enabled. | 95 | * executions of PAUSE in a loop. Also indicate if ple enabled. |
96 | * According to test, this time is usually small than 41 cycles. | 96 | * According to test, this time is usually smaller than 128 cycles. |
97 | * ple_window: upper bound on the amount of time a guest is allowed to execute | 97 | * ple_window: upper bound on the amount of time a guest is allowed to execute |
98 | * in a PAUSE loop. Tests indicate that most spinlocks are held for | 98 | * in a PAUSE loop. Tests indicate that most spinlocks are held for |
99 | * less than 2^12 cycles | 99 | * less than 2^12 cycles |
100 | * Time is measured based on a counter that runs at the same rate as the TSC, | 100 | * Time is measured based on a counter that runs at the same rate as the TSC, |
101 | * refer SDM volume 3b section 21.6.13 & 22.1.3. | 101 | * refer SDM volume 3b section 21.6.13 & 22.1.3. |
102 | */ | 102 | */ |
103 | #define KVM_VMX_DEFAULT_PLE_GAP 41 | 103 | #define KVM_VMX_DEFAULT_PLE_GAP 128 |
104 | #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 | 104 | #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 |
105 | static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; | 105 | static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; |
106 | module_param(ple_gap, int, S_IRUGO); | 106 | module_param(ple_gap, int, S_IRUGO); |
@@ -176,11 +176,11 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | |||
176 | return container_of(vcpu, struct vcpu_vmx, vcpu); | 176 | return container_of(vcpu, struct vcpu_vmx, vcpu); |
177 | } | 177 | } |
178 | 178 | ||
179 | static int init_rmode(struct kvm *kvm); | ||
180 | static u64 construct_eptp(unsigned long root_hpa); | 179 | static u64 construct_eptp(unsigned long root_hpa); |
181 | static void kvm_cpu_vmxon(u64 addr); | 180 | static void kvm_cpu_vmxon(u64 addr); |
182 | static void kvm_cpu_vmxoff(void); | 181 | static void kvm_cpu_vmxoff(void); |
183 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); | 182 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
183 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); | ||
184 | 184 | ||
185 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | 185 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
186 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | 186 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); |
@@ -1333,19 +1333,25 @@ static __init int vmx_disabled_by_bios(void) | |||
1333 | 1333 | ||
1334 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); | 1334 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); |
1335 | if (msr & FEATURE_CONTROL_LOCKED) { | 1335 | if (msr & FEATURE_CONTROL_LOCKED) { |
1336 | /* launched w/ TXT and VMX disabled */ | ||
1336 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) | 1337 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) |
1337 | && tboot_enabled()) | 1338 | && tboot_enabled()) |
1338 | return 1; | 1339 | return 1; |
1340 | /* launched w/o TXT and VMX only enabled w/ TXT */ | ||
1339 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) | 1341 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) |
1342 | && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) | ||
1340 | && !tboot_enabled()) { | 1343 | && !tboot_enabled()) { |
1341 | printk(KERN_WARNING "kvm: disable TXT in the BIOS or " | 1344 | printk(KERN_WARNING "kvm: disable TXT in the BIOS or " |
1342 | " activate TXT before enabling KVM\n"); | 1345 | "activate TXT before enabling KVM\n"); |
1343 | return 1; | 1346 | return 1; |
1344 | } | 1347 | } |
1348 | /* launched w/o TXT and VMX disabled */ | ||
1349 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) | ||
1350 | && !tboot_enabled()) | ||
1351 | return 1; | ||
1345 | } | 1352 | } |
1346 | 1353 | ||
1347 | return 0; | 1354 | return 0; |
1348 | /* locked but not enabled */ | ||
1349 | } | 1355 | } |
1350 | 1356 | ||
1351 | static void kvm_cpu_vmxon(u64 addr) | 1357 | static void kvm_cpu_vmxon(u64 addr) |
@@ -1683,6 +1689,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1683 | vmx->emulation_required = 1; | 1689 | vmx->emulation_required = 1; |
1684 | vmx->rmode.vm86_active = 0; | 1690 | vmx->rmode.vm86_active = 0; |
1685 | 1691 | ||
1692 | vmcs_write16(GUEST_TR_SELECTOR, vmx->rmode.tr.selector); | ||
1686 | vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base); | 1693 | vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base); |
1687 | vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit); | 1694 | vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit); |
1688 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); | 1695 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); |
@@ -1756,6 +1763,19 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1756 | vmx->emulation_required = 1; | 1763 | vmx->emulation_required = 1; |
1757 | vmx->rmode.vm86_active = 1; | 1764 | vmx->rmode.vm86_active = 1; |
1758 | 1765 | ||
1766 | /* | ||
1767 | * Very old userspace does not call KVM_SET_TSS_ADDR before entering | ||
1768 | * vcpu. Call it here with phys address pointing 16M below 4G. | ||
1769 | */ | ||
1770 | if (!vcpu->kvm->arch.tss_addr) { | ||
1771 | printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " | ||
1772 | "called before entering vcpu\n"); | ||
1773 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | ||
1774 | vmx_set_tss_addr(vcpu->kvm, 0xfeffd000); | ||
1775 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
1776 | } | ||
1777 | |||
1778 | vmx->rmode.tr.selector = vmcs_read16(GUEST_TR_SELECTOR); | ||
1759 | vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE); | 1779 | vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE); |
1760 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); | 1780 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); |
1761 | 1781 | ||
@@ -1794,7 +1814,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1794 | 1814 | ||
1795 | continue_rmode: | 1815 | continue_rmode: |
1796 | kvm_mmu_reset_context(vcpu); | 1816 | kvm_mmu_reset_context(vcpu); |
1797 | init_rmode(vcpu->kvm); | ||
1798 | } | 1817 | } |
1799 | 1818 | ||
1800 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | 1819 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
@@ -2030,23 +2049,40 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
2030 | vmcs_writel(GUEST_CR4, hw_cr4); | 2049 | vmcs_writel(GUEST_CR4, hw_cr4); |
2031 | } | 2050 | } |
2032 | 2051 | ||
2033 | static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) | ||
2034 | { | ||
2035 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
2036 | |||
2037 | return vmcs_readl(sf->base); | ||
2038 | } | ||
2039 | |||
2040 | static void vmx_get_segment(struct kvm_vcpu *vcpu, | 2052 | static void vmx_get_segment(struct kvm_vcpu *vcpu, |
2041 | struct kvm_segment *var, int seg) | 2053 | struct kvm_segment *var, int seg) |
2042 | { | 2054 | { |
2055 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2043 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | 2056 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
2057 | struct kvm_save_segment *save; | ||
2044 | u32 ar; | 2058 | u32 ar; |
2045 | 2059 | ||
2060 | if (vmx->rmode.vm86_active | ||
2061 | && (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES | ||
2062 | || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS | ||
2063 | || seg == VCPU_SREG_GS) | ||
2064 | && !emulate_invalid_guest_state) { | ||
2065 | switch (seg) { | ||
2066 | case VCPU_SREG_TR: save = &vmx->rmode.tr; break; | ||
2067 | case VCPU_SREG_ES: save = &vmx->rmode.es; break; | ||
2068 | case VCPU_SREG_DS: save = &vmx->rmode.ds; break; | ||
2069 | case VCPU_SREG_FS: save = &vmx->rmode.fs; break; | ||
2070 | case VCPU_SREG_GS: save = &vmx->rmode.gs; break; | ||
2071 | default: BUG(); | ||
2072 | } | ||
2073 | var->selector = save->selector; | ||
2074 | var->base = save->base; | ||
2075 | var->limit = save->limit; | ||
2076 | ar = save->ar; | ||
2077 | if (seg == VCPU_SREG_TR | ||
2078 | || var->selector == vmcs_read16(sf->selector)) | ||
2079 | goto use_saved_rmode_seg; | ||
2080 | } | ||
2046 | var->base = vmcs_readl(sf->base); | 2081 | var->base = vmcs_readl(sf->base); |
2047 | var->limit = vmcs_read32(sf->limit); | 2082 | var->limit = vmcs_read32(sf->limit); |
2048 | var->selector = vmcs_read16(sf->selector); | 2083 | var->selector = vmcs_read16(sf->selector); |
2049 | ar = vmcs_read32(sf->ar_bytes); | 2084 | ar = vmcs_read32(sf->ar_bytes); |
2085 | use_saved_rmode_seg: | ||
2050 | if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) | 2086 | if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) |
2051 | ar = 0; | 2087 | ar = 0; |
2052 | var->type = ar & 15; | 2088 | var->type = ar & 15; |
@@ -2060,6 +2096,18 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu, | |||
2060 | var->unusable = (ar >> 16) & 1; | 2096 | var->unusable = (ar >> 16) & 1; |
2061 | } | 2097 | } |
2062 | 2098 | ||
2099 | static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) | ||
2100 | { | ||
2101 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; | ||
2102 | struct kvm_segment s; | ||
2103 | |||
2104 | if (to_vmx(vcpu)->rmode.vm86_active) { | ||
2105 | vmx_get_segment(vcpu, &s, seg); | ||
2106 | return s.base; | ||
2107 | } | ||
2108 | return vmcs_readl(sf->base); | ||
2109 | } | ||
2110 | |||
2063 | static int vmx_get_cpl(struct kvm_vcpu *vcpu) | 2111 | static int vmx_get_cpl(struct kvm_vcpu *vcpu) |
2064 | { | 2112 | { |
2065 | if (!is_protmode(vcpu)) | 2113 | if (!is_protmode(vcpu)) |
@@ -2101,6 +2149,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
2101 | u32 ar; | 2149 | u32 ar; |
2102 | 2150 | ||
2103 | if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) { | 2151 | if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) { |
2152 | vmcs_write16(sf->selector, var->selector); | ||
2104 | vmx->rmode.tr.selector = var->selector; | 2153 | vmx->rmode.tr.selector = var->selector; |
2105 | vmx->rmode.tr.base = var->base; | 2154 | vmx->rmode.tr.base = var->base; |
2106 | vmx->rmode.tr.limit = var->limit; | 2155 | vmx->rmode.tr.limit = var->limit; |
@@ -2361,11 +2410,12 @@ static bool guest_state_valid(struct kvm_vcpu *vcpu) | |||
2361 | 2410 | ||
2362 | static int init_rmode_tss(struct kvm *kvm) | 2411 | static int init_rmode_tss(struct kvm *kvm) |
2363 | { | 2412 | { |
2364 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; | 2413 | gfn_t fn; |
2365 | u16 data = 0; | 2414 | u16 data = 0; |
2366 | int ret = 0; | 2415 | int r, idx, ret = 0; |
2367 | int r; | ||
2368 | 2416 | ||
2417 | idx = srcu_read_lock(&kvm->srcu); | ||
2418 | fn = rmode_tss_base(kvm) >> PAGE_SHIFT; | ||
2369 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); | 2419 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); |
2370 | if (r < 0) | 2420 | if (r < 0) |
2371 | goto out; | 2421 | goto out; |
@@ -2389,12 +2439,13 @@ static int init_rmode_tss(struct kvm *kvm) | |||
2389 | 2439 | ||
2390 | ret = 1; | 2440 | ret = 1; |
2391 | out: | 2441 | out: |
2442 | srcu_read_unlock(&kvm->srcu, idx); | ||
2392 | return ret; | 2443 | return ret; |
2393 | } | 2444 | } |
2394 | 2445 | ||
2395 | static int init_rmode_identity_map(struct kvm *kvm) | 2446 | static int init_rmode_identity_map(struct kvm *kvm) |
2396 | { | 2447 | { |
2397 | int i, r, ret; | 2448 | int i, idx, r, ret; |
2398 | pfn_t identity_map_pfn; | 2449 | pfn_t identity_map_pfn; |
2399 | u32 tmp; | 2450 | u32 tmp; |
2400 | 2451 | ||
@@ -2409,6 +2460,7 @@ static int init_rmode_identity_map(struct kvm *kvm) | |||
2409 | return 1; | 2460 | return 1; |
2410 | ret = 0; | 2461 | ret = 0; |
2411 | identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; | 2462 | identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; |
2463 | idx = srcu_read_lock(&kvm->srcu); | ||
2412 | r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); | 2464 | r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); |
2413 | if (r < 0) | 2465 | if (r < 0) |
2414 | goto out; | 2466 | goto out; |
@@ -2424,6 +2476,7 @@ static int init_rmode_identity_map(struct kvm *kvm) | |||
2424 | kvm->arch.ept_identity_pagetable_done = true; | 2476 | kvm->arch.ept_identity_pagetable_done = true; |
2425 | ret = 1; | 2477 | ret = 1; |
2426 | out: | 2478 | out: |
2479 | srcu_read_unlock(&kvm->srcu, idx); | ||
2427 | return ret; | 2480 | return ret; |
2428 | } | 2481 | } |
2429 | 2482 | ||
@@ -2699,22 +2752,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2699 | return 0; | 2752 | return 0; |
2700 | } | 2753 | } |
2701 | 2754 | ||
2702 | static int init_rmode(struct kvm *kvm) | ||
2703 | { | ||
2704 | int idx, ret = 0; | ||
2705 | |||
2706 | idx = srcu_read_lock(&kvm->srcu); | ||
2707 | if (!init_rmode_tss(kvm)) | ||
2708 | goto exit; | ||
2709 | if (!init_rmode_identity_map(kvm)) | ||
2710 | goto exit; | ||
2711 | |||
2712 | ret = 1; | ||
2713 | exit: | ||
2714 | srcu_read_unlock(&kvm->srcu, idx); | ||
2715 | return ret; | ||
2716 | } | ||
2717 | |||
2718 | static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | 2755 | static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) |
2719 | { | 2756 | { |
2720 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2757 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
@@ -2722,10 +2759,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2722 | int ret; | 2759 | int ret; |
2723 | 2760 | ||
2724 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); | 2761 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); |
2725 | if (!init_rmode(vmx->vcpu.kvm)) { | ||
2726 | ret = -ENOMEM; | ||
2727 | goto out; | ||
2728 | } | ||
2729 | 2762 | ||
2730 | vmx->rmode.vm86_active = 0; | 2763 | vmx->rmode.vm86_active = 0; |
2731 | 2764 | ||
@@ -2805,7 +2838,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2805 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); | 2838 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); |
2806 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) | 2839 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) |
2807 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, | 2840 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, |
2808 | page_to_phys(vmx->vcpu.arch.apic->regs_page)); | 2841 | __pa(vmx->vcpu.arch.apic->regs)); |
2809 | vmcs_write32(TPR_THRESHOLD, 0); | 2842 | vmcs_write32(TPR_THRESHOLD, 0); |
2810 | } | 2843 | } |
2811 | 2844 | ||
@@ -2971,6 +3004,9 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | |||
2971 | if (ret) | 3004 | if (ret) |
2972 | return ret; | 3005 | return ret; |
2973 | kvm->arch.tss_addr = addr; | 3006 | kvm->arch.tss_addr = addr; |
3007 | if (!init_rmode_tss(kvm)) | ||
3008 | return -ENOMEM; | ||
3009 | |||
2974 | return 0; | 3010 | return 0; |
2975 | } | 3011 | } |
2976 | 3012 | ||
@@ -3962,7 +3998,7 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu) | |||
3962 | #define Q "l" | 3998 | #define Q "l" |
3963 | #endif | 3999 | #endif |
3964 | 4000 | ||
3965 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | 4001 | static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
3966 | { | 4002 | { |
3967 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 4003 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3968 | 4004 | ||
@@ -3991,6 +4027,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
3991 | asm( | 4027 | asm( |
3992 | /* Store host registers */ | 4028 | /* Store host registers */ |
3993 | "push %%"R"dx; push %%"R"bp;" | 4029 | "push %%"R"dx; push %%"R"bp;" |
4030 | "push %%"R"cx \n\t" /* placeholder for guest rcx */ | ||
3994 | "push %%"R"cx \n\t" | 4031 | "push %%"R"cx \n\t" |
3995 | "cmp %%"R"sp, %c[host_rsp](%0) \n\t" | 4032 | "cmp %%"R"sp, %c[host_rsp](%0) \n\t" |
3996 | "je 1f \n\t" | 4033 | "je 1f \n\t" |
@@ -4032,10 +4069,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
4032 | ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" | 4069 | ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" |
4033 | ".Lkvm_vmx_return: " | 4070 | ".Lkvm_vmx_return: " |
4034 | /* Save guest registers, load host registers, keep flags */ | 4071 | /* Save guest registers, load host registers, keep flags */ |
4035 | "xchg %0, (%%"R"sp) \n\t" | 4072 | "mov %0, %c[wordsize](%%"R"sp) \n\t" |
4073 | "pop %0 \n\t" | ||
4036 | "mov %%"R"ax, %c[rax](%0) \n\t" | 4074 | "mov %%"R"ax, %c[rax](%0) \n\t" |
4037 | "mov %%"R"bx, %c[rbx](%0) \n\t" | 4075 | "mov %%"R"bx, %c[rbx](%0) \n\t" |
4038 | "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t" | 4076 | "pop"Q" %c[rcx](%0) \n\t" |
4039 | "mov %%"R"dx, %c[rdx](%0) \n\t" | 4077 | "mov %%"R"dx, %c[rdx](%0) \n\t" |
4040 | "mov %%"R"si, %c[rsi](%0) \n\t" | 4078 | "mov %%"R"si, %c[rsi](%0) \n\t" |
4041 | "mov %%"R"di, %c[rdi](%0) \n\t" | 4079 | "mov %%"R"di, %c[rdi](%0) \n\t" |
@@ -4053,7 +4091,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
4053 | "mov %%cr2, %%"R"ax \n\t" | 4091 | "mov %%cr2, %%"R"ax \n\t" |
4054 | "mov %%"R"ax, %c[cr2](%0) \n\t" | 4092 | "mov %%"R"ax, %c[cr2](%0) \n\t" |
4055 | 4093 | ||
4056 | "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t" | 4094 | "pop %%"R"bp; pop %%"R"dx \n\t" |
4057 | "setbe %c[fail](%0) \n\t" | 4095 | "setbe %c[fail](%0) \n\t" |
4058 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), | 4096 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), |
4059 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), | 4097 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), |
@@ -4076,7 +4114,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
4076 | [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), | 4114 | [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), |
4077 | [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), | 4115 | [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), |
4078 | #endif | 4116 | #endif |
4079 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) | 4117 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), |
4118 | [wordsize]"i"(sizeof(ulong)) | ||
4080 | : "cc", "memory" | 4119 | : "cc", "memory" |
4081 | , R"ax", R"bx", R"di", R"si" | 4120 | , R"ax", R"bx", R"di", R"si" |
4082 | #ifdef CONFIG_X86_64 | 4121 | #ifdef CONFIG_X86_64 |
@@ -4183,8 +4222,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
4183 | if (!kvm->arch.ept_identity_map_addr) | 4222 | if (!kvm->arch.ept_identity_map_addr) |
4184 | kvm->arch.ept_identity_map_addr = | 4223 | kvm->arch.ept_identity_map_addr = |
4185 | VMX_EPT_IDENTITY_PAGETABLE_ADDR; | 4224 | VMX_EPT_IDENTITY_PAGETABLE_ADDR; |
4225 | err = -ENOMEM; | ||
4186 | if (alloc_identity_pagetable(kvm) != 0) | 4226 | if (alloc_identity_pagetable(kvm) != 0) |
4187 | goto free_vmcs; | 4227 | goto free_vmcs; |
4228 | if (!init_rmode_identity_map(kvm)) | ||
4229 | goto free_vmcs; | ||
4188 | } | 4230 | } |
4189 | 4231 | ||
4190 | return &vmx->vcpu; | 4232 | return &vmx->vcpu; |