aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2017-08-23 17:14:38 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2017-08-25 03:28:28 -0400
commitb9dd21e104bcd45e124acfe978a79df71259e59b (patch)
tree5a38e9036b388cee6f72f861df7adf9ddeafd158
parentc469268cd523245cc58255f6696e0c295485cb0b (diff)
KVM: x86: simplify handling of PKRU
Move it to struct kvm_arch_vcpu, replacing guest_pkru_valid with a simple comparison against the host value of the register. The write of PKRU in addition can be skipped if the guest has not enabled the feature. Once we do this, we need not test OSPKE in the host anymore, because guest_CR4.PKE=1 implies host_CR4.PKE=1. The static PKU test is kept to elide the code on older CPUs. Suggested-by: Yang Zhang <zy107165@alibaba-inc.com> Fixes: 1be0e61c1f255faaeab04a390e00c8b9b9042870 Cc: stable@vger.kernel.org Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h5
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/svm.c7
-rw-r--r--arch/x86/kvm/vmx.c25
5 files changed, 10 insertions, 30 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 87ac4fba6d8e..f4d120a3e22e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
492 unsigned long cr4; 492 unsigned long cr4;
493 unsigned long cr4_guest_owned_bits; 493 unsigned long cr4_guest_owned_bits;
494 unsigned long cr8; 494 unsigned long cr8;
495 u32 pkru;
495 u32 hflags; 496 u32 hflags;
496 u64 efer; 497 u64 efer;
497 u64 apic_base; 498 u64 apic_base;
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 762cdf2595f9..e1e89ee4af75 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
84 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); 84 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
85} 85}
86 86
87static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
88{
89 return kvm_x86_ops->get_pkru(vcpu);
90}
91
92static inline void enter_guest_mode(struct kvm_vcpu *vcpu) 87static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
93{ 88{
94 vcpu->arch.hflags |= HF_GUEST_MASK; 89 vcpu->arch.hflags |= HF_GUEST_MASK;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d7d248a000dd..4b9a3ae6b725 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
185 * index of the protection domain, so pte_pkey * 2 is 185 * index of the protection domain, so pte_pkey * 2 is
186 * is the index of the first bit for the domain. 186 * is the index of the first bit for the domain.
187 */ 187 */
188 pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; 188 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
189 189
190 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ 190 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
191 offset = (pfec & ~1) + 191 offset = (pfec & ~1) +
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 56ba05312759..af256b786a70 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1777 to_svm(vcpu)->vmcb->save.rflags = rflags; 1777 to_svm(vcpu)->vmcb->save.rflags = rflags;
1778} 1778}
1779 1779
1780static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
1781{
1782 return 0;
1783}
1784
1785static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 1780static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1786{ 1781{
1787 switch (reg) { 1782 switch (reg) {
@@ -5413,8 +5408,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
5413 .get_rflags = svm_get_rflags, 5408 .get_rflags = svm_get_rflags,
5414 .set_rflags = svm_set_rflags, 5409 .set_rflags = svm_set_rflags,
5415 5410
5416 .get_pkru = svm_get_pkru,
5417
5418 .tlb_flush = svm_flush_tlb, 5411 .tlb_flush = svm_flush_tlb,
5419 5412
5420 .run = svm_vcpu_run, 5413 .run = svm_vcpu_run,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9b21b1223035..c6ef2940119b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -636,8 +636,6 @@ struct vcpu_vmx {
636 636
637 u64 current_tsc_ratio; 637 u64 current_tsc_ratio;
638 638
639 bool guest_pkru_valid;
640 u32 guest_pkru;
641 u32 host_pkru; 639 u32 host_pkru;
642 640
643 /* 641 /*
@@ -2383,11 +2381,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2383 to_vmx(vcpu)->emulation_required = emulation_required(vcpu); 2381 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2384} 2382}
2385 2383
2386static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
2387{
2388 return to_vmx(vcpu)->guest_pkru;
2389}
2390
2391static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) 2384static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2392{ 2385{
2393 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 2386 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@@ -9020,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9020 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 9013 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
9021 vmx_set_interrupt_shadow(vcpu, 0); 9014 vmx_set_interrupt_shadow(vcpu, 0);
9022 9015
9023 if (vmx->guest_pkru_valid) 9016 if (static_cpu_has(X86_FEATURE_PKU) &&
9024 __write_pkru(vmx->guest_pkru); 9017 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
9018 vcpu->arch.pkru != vmx->host_pkru)
9019 __write_pkru(vcpu->arch.pkru);
9025 9020
9026 atomic_switch_perf_msrs(vmx); 9021 atomic_switch_perf_msrs(vmx);
9027 debugctlmsr = get_debugctlmsr(); 9022 debugctlmsr = get_debugctlmsr();
@@ -9169,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9169 * back on host, so it is safe to read guest PKRU from current 9164 * back on host, so it is safe to read guest PKRU from current
9170 * XSAVE. 9165 * XSAVE.
9171 */ 9166 */
9172 if (boot_cpu_has(X86_FEATURE_OSPKE)) { 9167 if (static_cpu_has(X86_FEATURE_PKU) &&
9173 vmx->guest_pkru = __read_pkru(); 9168 kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
9174 if (vmx->guest_pkru != vmx->host_pkru) { 9169 vcpu->arch.pkru = __read_pkru();
9175 vmx->guest_pkru_valid = true; 9170 if (vcpu->arch.pkru != vmx->host_pkru)
9176 __write_pkru(vmx->host_pkru); 9171 __write_pkru(vmx->host_pkru);
9177 } else
9178 vmx->guest_pkru_valid = false;
9179 } 9172 }
9180 9173
9181 /* 9174 /*
@@ -11682,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
11682 .get_rflags = vmx_get_rflags, 11675 .get_rflags = vmx_get_rflags,
11683 .set_rflags = vmx_set_rflags, 11676 .set_rflags = vmx_set_rflags,
11684 11677
11685 .get_pkru = vmx_get_pkru,
11686
11687 .tlb_flush = vmx_flush_tlb, 11678 .tlb_flush = vmx_flush_tlb,
11688 11679
11689 .run = vmx_vcpu_run, 11680 .run = vmx_vcpu_run,