aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-14 11:46:12 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:55 -0400
commit0959ffacf39b1ae7f56072b0c64429ee528100ca (patch)
tree4144335ba89e62522430b04f8436f98958834e7d /arch
parent625831a3f40d330c611fe37cf501d80d611921f9 (diff)
KVM: MMU: Don't track nested fault info in error-code
This patch moves the detection whether a page-fault was nested or not out of the error code and moves it into a separate variable in the fault struct. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.h1
-rw-r--r--arch/x86/kvm/x86.c14
3 files changed, 5 insertions, 11 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 80224bf5d4f8..519d6f784984 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -322,6 +322,7 @@ struct kvm_vcpu_arch {
322 struct { 322 struct {
323 u64 address; 323 u64 address;
324 unsigned error_code; 324 unsigned error_code;
325 bool nested;
325 } fault; 326 } fault;
326 327
327 /* only needed in kvm_pv_mmu_op() path, but it's hot so 328 /* only needed in kvm_pv_mmu_op() path, but it's hot so
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 513abbb5ff46..7086ca85d3e7 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -47,7 +47,6 @@
47#define PFERR_USER_MASK (1U << 2) 47#define PFERR_USER_MASK (1U << 2)
48#define PFERR_RSVD_MASK (1U << 3) 48#define PFERR_RSVD_MASK (1U << 3)
49#define PFERR_FETCH_MASK (1U << 4) 49#define PFERR_FETCH_MASK (1U << 4)
50#define PFERR_NESTED_MASK (1U << 31)
51 50
52int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
53int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 52int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a465bd29f381..a51635ee85ec 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -342,18 +342,12 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
342 342
343void kvm_propagate_fault(struct kvm_vcpu *vcpu) 343void kvm_propagate_fault(struct kvm_vcpu *vcpu)
344{ 344{
345 u32 nested, error; 345 if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
346
347 error = vcpu->arch.fault.error_code;
348 nested = error & PFERR_NESTED_MASK;
349 error = error & ~PFERR_NESTED_MASK;
350
351 vcpu->arch.fault.error_code = error;
352
353 if (mmu_is_nested(vcpu) && !nested)
354 vcpu->arch.nested_mmu.inject_page_fault(vcpu); 346 vcpu->arch.nested_mmu.inject_page_fault(vcpu);
355 else 347 else
356 vcpu->arch.mmu.inject_page_fault(vcpu); 348 vcpu->arch.mmu.inject_page_fault(vcpu);
349
350 vcpu->arch.fault.nested = false;
357} 351}
358 352
359void kvm_inject_nmi(struct kvm_vcpu *vcpu) 353void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -3524,7 +3518,7 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3524 access |= PFERR_USER_MASK; 3518 access |= PFERR_USER_MASK;
3525 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error); 3519 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
3526 if (t_gpa == UNMAPPED_GVA) 3520 if (t_gpa == UNMAPPED_GVA)
3527 vcpu->arch.fault.error_code |= PFERR_NESTED_MASK; 3521 vcpu->arch.fault.nested = true;
3528 3522
3529 return t_gpa; 3523 return t_gpa;
3530} 3524}