diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2010-10-22 12:18:18 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:28:40 -0500 |
commit | 612819c3c6e67bac8fceaa7cc402f13b1b63f7e4 (patch) | |
tree | 3739b8420660fc4de8d37d26004d9992e92acbe3 /arch/x86/kvm/mmu.c | |
parent | 7905d9a5ad7a83f1c1c00559839857ab90afbdfc (diff) |
KVM: propagate fault r/w information to gup(), allow read-only memory
As suggested by Andrea, pass r/w error code to gup(), upgrading read fault
to writable if host pte allows it.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 27 |
1 files changed, 17 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 99433943170c..53509f5973db 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2216,7 +2216,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) | |||
2216 | } | 2216 | } |
2217 | 2217 | ||
2218 | static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | 2218 | static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, |
2219 | int level, gfn_t gfn, pfn_t pfn) | 2219 | int map_writable, int level, gfn_t gfn, pfn_t pfn) |
2220 | { | 2220 | { |
2221 | struct kvm_shadow_walk_iterator iterator; | 2221 | struct kvm_shadow_walk_iterator iterator; |
2222 | struct kvm_mmu_page *sp; | 2222 | struct kvm_mmu_page *sp; |
@@ -2225,9 +2225,13 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
2225 | 2225 | ||
2226 | for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { | 2226 | for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { |
2227 | if (iterator.level == level) { | 2227 | if (iterator.level == level) { |
2228 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, | 2228 | unsigned pte_access = ACC_ALL; |
2229 | |||
2230 | if (!map_writable) | ||
2231 | pte_access &= ~ACC_WRITE_MASK; | ||
2232 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, | ||
2229 | 0, write, 1, &pt_write, | 2233 | 0, write, 1, &pt_write, |
2230 | level, gfn, pfn, false, true); | 2234 | level, gfn, pfn, false, map_writable); |
2231 | direct_pte_prefetch(vcpu, iterator.sptep); | 2235 | direct_pte_prefetch(vcpu, iterator.sptep); |
2232 | ++vcpu->stat.pf_fixed; | 2236 | ++vcpu->stat.pf_fixed; |
2233 | break; | 2237 | break; |
@@ -2288,6 +2292,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
2288 | int level; | 2292 | int level; |
2289 | pfn_t pfn; | 2293 | pfn_t pfn; |
2290 | unsigned long mmu_seq; | 2294 | unsigned long mmu_seq; |
2295 | bool map_writable; | ||
2291 | 2296 | ||
2292 | level = mapping_level(vcpu, gfn); | 2297 | level = mapping_level(vcpu, gfn); |
2293 | 2298 | ||
@@ -2302,7 +2307,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
2302 | 2307 | ||
2303 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 2308 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
2304 | smp_rmb(); | 2309 | smp_rmb(); |
2305 | pfn = gfn_to_pfn(vcpu->kvm, gfn); | 2310 | pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, &map_writable); |
2306 | 2311 | ||
2307 | /* mmio */ | 2312 | /* mmio */ |
2308 | if (is_error_pfn(pfn)) | 2313 | if (is_error_pfn(pfn)) |
@@ -2312,7 +2317,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
2312 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 2317 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
2313 | goto out_unlock; | 2318 | goto out_unlock; |
2314 | kvm_mmu_free_some_pages(vcpu); | 2319 | kvm_mmu_free_some_pages(vcpu); |
2315 | r = __direct_map(vcpu, v, write, level, gfn, pfn); | 2320 | r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn); |
2316 | spin_unlock(&vcpu->kvm->mmu_lock); | 2321 | spin_unlock(&vcpu->kvm->mmu_lock); |
2317 | 2322 | ||
2318 | 2323 | ||
@@ -2611,11 +2616,11 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu) | |||
2611 | } | 2616 | } |
2612 | 2617 | ||
2613 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | 2618 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, |
2614 | gva_t gva, pfn_t *pfn) | 2619 | gva_t gva, pfn_t *pfn, bool write, bool *writable) |
2615 | { | 2620 | { |
2616 | bool async; | 2621 | bool async; |
2617 | 2622 | ||
2618 | *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async); | 2623 | *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); |
2619 | 2624 | ||
2620 | if (!async) | 2625 | if (!async) |
2621 | return false; /* *pfn has correct page already */ | 2626 | return false; /* *pfn has correct page already */ |
@@ -2632,7 +2637,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn, | |||
2632 | return true; | 2637 | return true; |
2633 | } | 2638 | } |
2634 | 2639 | ||
2635 | *pfn = gfn_to_pfn(vcpu->kvm, gfn); | 2640 | *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); |
2636 | 2641 | ||
2637 | return false; | 2642 | return false; |
2638 | } | 2643 | } |
@@ -2645,6 +2650,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
2645 | int level; | 2650 | int level; |
2646 | gfn_t gfn = gpa >> PAGE_SHIFT; | 2651 | gfn_t gfn = gpa >> PAGE_SHIFT; |
2647 | unsigned long mmu_seq; | 2652 | unsigned long mmu_seq; |
2653 | int write = error_code & PFERR_WRITE_MASK; | ||
2654 | bool map_writable; | ||
2648 | 2655 | ||
2649 | ASSERT(vcpu); | 2656 | ASSERT(vcpu); |
2650 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); | 2657 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
@@ -2660,7 +2667,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
2660 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 2667 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
2661 | smp_rmb(); | 2668 | smp_rmb(); |
2662 | 2669 | ||
2663 | if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn)) | 2670 | if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn, write, &map_writable)) |
2664 | return 0; | 2671 | return 0; |
2665 | 2672 | ||
2666 | /* mmio */ | 2673 | /* mmio */ |
@@ -2670,7 +2677,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
2670 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 2677 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
2671 | goto out_unlock; | 2678 | goto out_unlock; |
2672 | kvm_mmu_free_some_pages(vcpu); | 2679 | kvm_mmu_free_some_pages(vcpu); |
2673 | r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, | 2680 | r = __direct_map(vcpu, gpa, write, map_writable, |
2674 | level, gfn, pfn); | 2681 | level, gfn, pfn); |
2675 | spin_unlock(&vcpu->kvm->mmu_lock); | 2682 | spin_unlock(&vcpu->kvm->mmu_lock); |
2676 | 2683 | ||