aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2010-10-22 12:18:18 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:28:40 -0500
commit612819c3c6e67bac8fceaa7cc402f13b1b63f7e4 (patch)
tree3739b8420660fc4de8d37d26004d9992e92acbe3 /arch/x86/kvm/paging_tmpl.h
parent7905d9a5ad7a83f1c1c00559839857ab90afbdfc (diff)
KVM: propagate fault r/w information to gup(), allow read-only memory
As suggested by Andrea, pass r/w error code to gup(), upgrading read fault to writable if host pte allows it. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index d6b281e989b1..ba00eefa7bcd 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -427,7 +427,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
427static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 427static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
428 struct guest_walker *gw, 428 struct guest_walker *gw,
429 int user_fault, int write_fault, int hlevel, 429 int user_fault, int write_fault, int hlevel,
430 int *ptwrite, pfn_t pfn) 430 int *ptwrite, pfn_t pfn, bool map_writable)
431{ 431{
432 unsigned access = gw->pt_access; 432 unsigned access = gw->pt_access;
433 struct kvm_mmu_page *sp = NULL; 433 struct kvm_mmu_page *sp = NULL;
@@ -501,7 +501,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
501 501
502 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, 502 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
503 user_fault, write_fault, dirty, ptwrite, it.level, 503 user_fault, write_fault, dirty, ptwrite, it.level,
504 gw->gfn, pfn, false, true); 504 gw->gfn, pfn, false, map_writable);
505 FNAME(pte_prefetch)(vcpu, gw, it.sptep); 505 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
506 506
507 return it.sptep; 507 return it.sptep;
@@ -539,6 +539,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
539 pfn_t pfn; 539 pfn_t pfn;
540 int level = PT_PAGE_TABLE_LEVEL; 540 int level = PT_PAGE_TABLE_LEVEL;
541 unsigned long mmu_seq; 541 unsigned long mmu_seq;
542 bool map_writable;
542 543
543 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 544 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
544 545
@@ -569,13 +570,17 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
569 mmu_seq = vcpu->kvm->mmu_notifier_seq; 570 mmu_seq = vcpu->kvm->mmu_notifier_seq;
570 smp_rmb(); 571 smp_rmb();
571 572
572 if (try_async_pf(vcpu, no_apf, walker.gfn, addr, &pfn)) 573 if (try_async_pf(vcpu, no_apf, walker.gfn, addr, &pfn, write_fault,
574 &map_writable))
573 return 0; 575 return 0;
574 576
575 /* mmio */ 577 /* mmio */
576 if (is_error_pfn(pfn)) 578 if (is_error_pfn(pfn))
577 return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); 579 return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
578 580
581 if (!map_writable)
582 walker.pte_access &= ~ACC_WRITE_MASK;
583
579 spin_lock(&vcpu->kvm->mmu_lock); 584 spin_lock(&vcpu->kvm->mmu_lock);
580 if (mmu_notifier_retry(vcpu, mmu_seq)) 585 if (mmu_notifier_retry(vcpu, mmu_seq))
581 goto out_unlock; 586 goto out_unlock;
@@ -583,7 +588,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
583 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); 588 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
584 kvm_mmu_free_some_pages(vcpu); 589 kvm_mmu_free_some_pages(vcpu);
585 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 590 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
586 level, &write_pt, pfn); 591 level, &write_pt, pfn, map_writable);
587 (void)sptep; 592 (void)sptep;
588 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, 593 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
589 sptep, *sptep, write_pt); 594 sptep, *sptep, write_pt);