diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 21 |
1 files changed, 7 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 02c839f40e29..d1986b7dcec7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -217,11 +217,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | |||
217 | } | 217 | } |
218 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); | 218 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
219 | 219 | ||
220 | static bool is_write_protection(struct kvm_vcpu *vcpu) | ||
221 | { | ||
222 | return kvm_read_cr0_bits(vcpu, X86_CR0_WP); | ||
223 | } | ||
224 | |||
225 | static int is_cpuid_PSE36(void) | 220 | static int is_cpuid_PSE36(void) |
226 | { | 221 | { |
227 | return 1; | 222 | return 1; |
@@ -243,11 +238,6 @@ static int is_large_pte(u64 pte) | |||
243 | return pte & PT_PAGE_SIZE_MASK; | 238 | return pte & PT_PAGE_SIZE_MASK; |
244 | } | 239 | } |
245 | 240 | ||
246 | static int is_writable_pte(unsigned long pte) | ||
247 | { | ||
248 | return pte & PT_WRITABLE_MASK; | ||
249 | } | ||
250 | |||
251 | static int is_dirty_gpte(unsigned long pte) | 241 | static int is_dirty_gpte(unsigned long pte) |
252 | { | 242 | { |
253 | return pte & PT_DIRTY_MASK; | 243 | return pte & PT_DIRTY_MASK; |
@@ -2247,15 +2237,17 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * | |||
2247 | send_sig_info(SIGBUS, &info, tsk); | 2237 | send_sig_info(SIGBUS, &info, tsk); |
2248 | } | 2238 | } |
2249 | 2239 | ||
2250 | static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) | 2240 | static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva, |
2241 | unsigned access, gfn_t gfn, pfn_t pfn) | ||
2251 | { | 2242 | { |
2252 | kvm_release_pfn_clean(pfn); | 2243 | kvm_release_pfn_clean(pfn); |
2253 | if (is_hwpoison_pfn(pfn)) { | 2244 | if (is_hwpoison_pfn(pfn)) { |
2254 | kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current); | 2245 | kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); |
2255 | return 0; | 2246 | return 0; |
2256 | } else if (is_fault_pfn(pfn)) | 2247 | } else if (is_fault_pfn(pfn)) |
2257 | return -EFAULT; | 2248 | return -EFAULT; |
2258 | 2249 | ||
2250 | vcpu_cache_mmio_info(vcpu, gva, gfn, access); | ||
2259 | return 1; | 2251 | return 1; |
2260 | } | 2252 | } |
2261 | 2253 | ||
@@ -2337,7 +2329,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, | |||
2337 | 2329 | ||
2338 | /* mmio */ | 2330 | /* mmio */ |
2339 | if (is_error_pfn(pfn)) | 2331 | if (is_error_pfn(pfn)) |
2340 | return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); | 2332 | return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn); |
2341 | 2333 | ||
2342 | spin_lock(&vcpu->kvm->mmu_lock); | 2334 | spin_lock(&vcpu->kvm->mmu_lock); |
2343 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 2335 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
@@ -2564,6 +2556,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) | |||
2564 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | 2556 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
2565 | return; | 2557 | return; |
2566 | 2558 | ||
2559 | vcpu_clear_mmio_info(vcpu, ~0ul); | ||
2567 | trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); | 2560 | trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
2568 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { | 2561 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
2569 | hpa_t root = vcpu->arch.mmu.root_hpa; | 2562 | hpa_t root = vcpu->arch.mmu.root_hpa; |
@@ -2710,7 +2703,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, | |||
2710 | 2703 | ||
2711 | /* mmio */ | 2704 | /* mmio */ |
2712 | if (is_error_pfn(pfn)) | 2705 | if (is_error_pfn(pfn)) |
2713 | return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); | 2706 | return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn); |
2714 | spin_lock(&vcpu->kvm->mmu_lock); | 2707 | spin_lock(&vcpu->kvm->mmu_lock); |
2715 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 2708 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
2716 | goto out_unlock; | 2709 | goto out_unlock; |