diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 69088a1ba509..ff606f507913 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3322,7 +3322,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) | |||
3322 | break; | 3322 | break; |
3323 | 3323 | ||
3324 | reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, | 3324 | reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, |
3325 | leaf); | 3325 | iterator.level); |
3326 | } | 3326 | } |
3327 | 3327 | ||
3328 | walk_shadow_page_lockless_end(vcpu); | 3328 | walk_shadow_page_lockless_end(vcpu); |
@@ -3614,7 +3614,7 @@ static void | |||
3614 | __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | 3614 | __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
3615 | struct rsvd_bits_validate *rsvd_check, | 3615 | struct rsvd_bits_validate *rsvd_check, |
3616 | int maxphyaddr, int level, bool nx, bool gbpages, | 3616 | int maxphyaddr, int level, bool nx, bool gbpages, |
3617 | bool pse) | 3617 | bool pse, bool amd) |
3618 | { | 3618 | { |
3619 | u64 exb_bit_rsvd = 0; | 3619 | u64 exb_bit_rsvd = 0; |
3620 | u64 gbpages_bit_rsvd = 0; | 3620 | u64 gbpages_bit_rsvd = 0; |
@@ -3631,7 +3631,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | |||
3631 | * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for | 3631 | * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for |
3632 | * leaf entries) on AMD CPUs only. | 3632 | * leaf entries) on AMD CPUs only. |
3633 | */ | 3633 | */ |
3634 | if (guest_cpuid_is_amd(vcpu)) | 3634 | if (amd) |
3635 | nonleaf_bit8_rsvd = rsvd_bits(8, 8); | 3635 | nonleaf_bit8_rsvd = rsvd_bits(8, 8); |
3636 | 3636 | ||
3637 | switch (level) { | 3637 | switch (level) { |
@@ -3699,7 +3699,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, | |||
3699 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, | 3699 | __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, |
3700 | cpuid_maxphyaddr(vcpu), context->root_level, | 3700 | cpuid_maxphyaddr(vcpu), context->root_level, |
3701 | context->nx, guest_cpuid_has_gbpages(vcpu), | 3701 | context->nx, guest_cpuid_has_gbpages(vcpu), |
3702 | is_pse(vcpu)); | 3702 | is_pse(vcpu), guest_cpuid_is_amd(vcpu)); |
3703 | } | 3703 | } |
3704 | 3704 | ||
3705 | static void | 3705 | static void |
@@ -3749,13 +3749,24 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, | |||
3749 | void | 3749 | void |
3750 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) | 3750 | reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
3751 | { | 3751 | { |
3752 | /* | ||
3753 | * Passing "true" to the last argument is okay; it adds a check | ||
3754 | * on bit 8 of the SPTEs which KVM doesn't use anyway. | ||
3755 | */ | ||
3752 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, | 3756 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, |
3753 | boot_cpu_data.x86_phys_bits, | 3757 | boot_cpu_data.x86_phys_bits, |
3754 | context->shadow_root_level, context->nx, | 3758 | context->shadow_root_level, context->nx, |
3755 | guest_cpuid_has_gbpages(vcpu), is_pse(vcpu)); | 3759 | guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), |
3760 | true); | ||
3756 | } | 3761 | } |
3757 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); | 3762 | EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); |
3758 | 3763 | ||
3764 | static inline bool boot_cpu_is_amd(void) | ||
3765 | { | ||
3766 | WARN_ON_ONCE(!tdp_enabled); | ||
3767 | return shadow_x_mask == 0; | ||
3768 | } | ||
3769 | |||
3759 | /* | 3770 | /* |
3760 | * the direct page table on host, use as much mmu features as | 3771 | * the direct page table on host, use as much mmu features as |
3761 | * possible, however, kvm currently does not do execution-protection. | 3772 | * possible, however, kvm currently does not do execution-protection. |
@@ -3764,11 +3775,11 @@ static void | |||
3764 | reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, | 3775 | reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, |
3765 | struct kvm_mmu *context) | 3776 | struct kvm_mmu *context) |
3766 | { | 3777 | { |
3767 | if (guest_cpuid_is_amd(vcpu)) | 3778 | if (boot_cpu_is_amd()) |
3768 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, | 3779 | __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, |
3769 | boot_cpu_data.x86_phys_bits, | 3780 | boot_cpu_data.x86_phys_bits, |
3770 | context->shadow_root_level, false, | 3781 | context->shadow_root_level, false, |
3771 | cpu_has_gbpages, true); | 3782 | cpu_has_gbpages, true, true); |
3772 | else | 3783 | else |
3773 | __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, | 3784 | __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, |
3774 | boot_cpu_data.x86_phys_bits, | 3785 | boot_cpu_data.x86_phys_bits, |