diff options
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 4 |
3 files changed, 19 insertions, 3 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bd59b482f1a8..b43686a44877 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -259,6 +259,8 @@ struct kvm_mmu { | |||
259 | u64 *lm_root; | 259 | u64 *lm_root; |
260 | u64 rsvd_bits_mask[2][4]; | 260 | u64 rsvd_bits_mask[2][4]; |
261 | 261 | ||
262 | bool nx; | ||
263 | |||
262 | u64 pdptrs[4]; /* pae */ | 264 | u64 pdptrs[4]; /* pae */ |
263 | }; | 265 | }; |
264 | 266 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dd76765310ce..95cbeed74cf9 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2634,6 +2634,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu, | |||
2634 | context->shadow_root_level = PT32E_ROOT_LEVEL; | 2634 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
2635 | context->root_hpa = INVALID_PAGE; | 2635 | context->root_hpa = INVALID_PAGE; |
2636 | context->direct_map = true; | 2636 | context->direct_map = true; |
2637 | context->nx = false; | ||
2637 | return 0; | 2638 | return 0; |
2638 | } | 2639 | } |
2639 | 2640 | ||
@@ -2687,7 +2688,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | |||
2687 | int maxphyaddr = cpuid_maxphyaddr(vcpu); | 2688 | int maxphyaddr = cpuid_maxphyaddr(vcpu); |
2688 | u64 exb_bit_rsvd = 0; | 2689 | u64 exb_bit_rsvd = 0; |
2689 | 2690 | ||
2690 | if (!is_nx(vcpu)) | 2691 | if (!context->nx) |
2691 | exb_bit_rsvd = rsvd_bits(63, 63); | 2692 | exb_bit_rsvd = rsvd_bits(63, 63); |
2692 | switch (level) { | 2693 | switch (level) { |
2693 | case PT32_ROOT_LEVEL: | 2694 | case PT32_ROOT_LEVEL: |
@@ -2746,6 +2747,8 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, | |||
2746 | struct kvm_mmu *context, | 2747 | struct kvm_mmu *context, |
2747 | int level) | 2748 | int level) |
2748 | { | 2749 | { |
2750 | context->nx = is_nx(vcpu); | ||
2751 | |||
2749 | reset_rsvds_bits_mask(vcpu, context, level); | 2752 | reset_rsvds_bits_mask(vcpu, context, level); |
2750 | 2753 | ||
2751 | ASSERT(is_pae(vcpu)); | 2754 | ASSERT(is_pae(vcpu)); |
@@ -2772,6 +2775,8 @@ static int paging64_init_context(struct kvm_vcpu *vcpu, | |||
2772 | static int paging32_init_context(struct kvm_vcpu *vcpu, | 2775 | static int paging32_init_context(struct kvm_vcpu *vcpu, |
2773 | struct kvm_mmu *context) | 2776 | struct kvm_mmu *context) |
2774 | { | 2777 | { |
2778 | context->nx = false; | ||
2779 | |||
2775 | reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); | 2780 | reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); |
2776 | 2781 | ||
2777 | context->new_cr3 = paging_new_cr3; | 2782 | context->new_cr3 = paging_new_cr3; |
@@ -2810,19 +2815,24 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) | |||
2810 | context->set_cr3 = kvm_x86_ops->set_tdp_cr3; | 2815 | context->set_cr3 = kvm_x86_ops->set_tdp_cr3; |
2811 | context->get_cr3 = get_cr3; | 2816 | context->get_cr3 = get_cr3; |
2812 | context->inject_page_fault = kvm_inject_page_fault; | 2817 | context->inject_page_fault = kvm_inject_page_fault; |
2818 | context->nx = is_nx(vcpu); | ||
2813 | 2819 | ||
2814 | if (!is_paging(vcpu)) { | 2820 | if (!is_paging(vcpu)) { |
2821 | context->nx = false; | ||
2815 | context->gva_to_gpa = nonpaging_gva_to_gpa; | 2822 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
2816 | context->root_level = 0; | 2823 | context->root_level = 0; |
2817 | } else if (is_long_mode(vcpu)) { | 2824 | } else if (is_long_mode(vcpu)) { |
2825 | context->nx = is_nx(vcpu); | ||
2818 | reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL); | 2826 | reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL); |
2819 | context->gva_to_gpa = paging64_gva_to_gpa; | 2827 | context->gva_to_gpa = paging64_gva_to_gpa; |
2820 | context->root_level = PT64_ROOT_LEVEL; | 2828 | context->root_level = PT64_ROOT_LEVEL; |
2821 | } else if (is_pae(vcpu)) { | 2829 | } else if (is_pae(vcpu)) { |
2830 | context->nx = is_nx(vcpu); | ||
2822 | reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL); | 2831 | reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL); |
2823 | context->gva_to_gpa = paging64_gva_to_gpa; | 2832 | context->gva_to_gpa = paging64_gva_to_gpa; |
2824 | context->root_level = PT32E_ROOT_LEVEL; | 2833 | context->root_level = PT32E_ROOT_LEVEL; |
2825 | } else { | 2834 | } else { |
2835 | context->nx = false; | ||
2826 | reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); | 2836 | reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); |
2827 | context->gva_to_gpa = paging32_gva_to_gpa; | 2837 | context->gva_to_gpa = paging32_gva_to_gpa; |
2828 | context->root_level = PT32_ROOT_LEVEL; | 2838 | context->root_level = PT32_ROOT_LEVEL; |
@@ -2878,17 +2888,21 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) | |||
2878 | * functions between mmu and nested_mmu are swapped. | 2888 | * functions between mmu and nested_mmu are swapped. |
2879 | */ | 2889 | */ |
2880 | if (!is_paging(vcpu)) { | 2890 | if (!is_paging(vcpu)) { |
2891 | g_context->nx = false; | ||
2881 | g_context->root_level = 0; | 2892 | g_context->root_level = 0; |
2882 | g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; | 2893 | g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; |
2883 | } else if (is_long_mode(vcpu)) { | 2894 | } else if (is_long_mode(vcpu)) { |
2895 | g_context->nx = is_nx(vcpu); | ||
2884 | reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL); | 2896 | reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL); |
2885 | g_context->root_level = PT64_ROOT_LEVEL; | 2897 | g_context->root_level = PT64_ROOT_LEVEL; |
2886 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; | 2898 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
2887 | } else if (is_pae(vcpu)) { | 2899 | } else if (is_pae(vcpu)) { |
2900 | g_context->nx = is_nx(vcpu); | ||
2888 | reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL); | 2901 | reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL); |
2889 | g_context->root_level = PT32E_ROOT_LEVEL; | 2902 | g_context->root_level = PT32E_ROOT_LEVEL; |
2890 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; | 2903 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
2891 | } else { | 2904 | } else { |
2905 | g_context->nx = false; | ||
2892 | reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL); | 2906 | reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL); |
2893 | g_context->root_level = PT32_ROOT_LEVEL; | 2907 | g_context->root_level = PT32_ROOT_LEVEL; |
2894 | g_context->gva_to_gpa = paging32_gva_to_gpa_nested; | 2908 | g_context->gva_to_gpa = paging32_gva_to_gpa_nested; |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a28f09bb76c6..2bdd843ad63f 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -105,7 +105,7 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) | |||
105 | 105 | ||
106 | access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; | 106 | access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; |
107 | #if PTTYPE == 64 | 107 | #if PTTYPE == 64 |
108 | if (is_nx(vcpu)) | 108 | if (vcpu->arch.mmu.nx) |
109 | access &= ~(gpte >> PT64_NX_SHIFT); | 109 | access &= ~(gpte >> PT64_NX_SHIFT); |
110 | #endif | 110 | #endif |
111 | return access; | 111 | return access; |
@@ -272,7 +272,7 @@ error: | |||
272 | walker->error_code |= PFERR_WRITE_MASK; | 272 | walker->error_code |= PFERR_WRITE_MASK; |
273 | if (user_fault) | 273 | if (user_fault) |
274 | walker->error_code |= PFERR_USER_MASK; | 274 | walker->error_code |= PFERR_USER_MASK; |
275 | if (fetch_fault && is_nx(vcpu)) | 275 | if (fetch_fault && mmu->nx) |
276 | walker->error_code |= PFERR_FETCH_MASK; | 276 | walker->error_code |= PFERR_FETCH_MASK; |
277 | if (rsvd_fault) | 277 | if (rsvd_fault) |
278 | walker->error_code |= PFERR_RSVD_MASK; | 278 | walker->error_code |= PFERR_RSVD_MASK; |