aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h17
-rw-r--r--arch/x86/kvm/mmu.c5
-rw-r--r--arch/x86/kvm/paging_tmpl.h6
-rw-r--r--arch/x86/kvm/svm.c7
-rw-r--r--arch/x86/kvm/x86.c44
6 files changed, 40 insertions, 41 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 87d017e276f4..bf70ecea3974 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -19,6 +19,8 @@ struct x86_exception {
19 u8 vector; 19 u8 vector;
20 bool error_code_valid; 20 bool error_code_valid;
21 u16 error_code; 21 u16 error_code;
22 bool nested_page_fault;
23 u64 address; /* cr2 or nested page fault gpa */
22}; 24};
23 25
24/* 26/*
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9980a2484624..0c0941db31c4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -242,7 +242,8 @@ struct kvm_mmu {
242 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 242 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
243 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 243 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf); 244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf);
245 void (*inject_page_fault)(struct kvm_vcpu *vcpu); 245 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
246 struct x86_exception *fault);
246 void (*free)(struct kvm_vcpu *vcpu); 247 void (*free)(struct kvm_vcpu *vcpu);
247 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 248 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
248 struct x86_exception *exception); 249 struct x86_exception *exception);
@@ -318,16 +319,6 @@ struct kvm_vcpu_arch {
318 */ 319 */
319 struct kvm_mmu *walk_mmu; 320 struct kvm_mmu *walk_mmu;
320 321
321 /*
322 * This struct is filled with the necessary information to propagate a
323 * page fault into the guest
324 */
325 struct {
326 u64 address;
327 unsigned error_code;
328 bool nested;
329 } fault;
330
331 /* only needed in kvm_pv_mmu_op() path, but it's hot so 322 /* only needed in kvm_pv_mmu_op() path, but it's hot so
332 * put it here to avoid allocation */ 323 * put it here to avoid allocation */
333 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 324 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
@@ -686,11 +677,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
686void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 677void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
687void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); 678void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
688void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 679void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
689void kvm_inject_page_fault(struct kvm_vcpu *vcpu); 680void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
690int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 681int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
691 gfn_t gfn, void *data, int offset, int len, 682 gfn_t gfn, void *data, int offset, int len,
692 u32 access); 683 u32 access);
693void kvm_propagate_fault(struct kvm_vcpu *vcpu); 684void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
694bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 685bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
695 686
696int kvm_pic_set_irq(void *opaque, int irq, int level); 687int kvm_pic_set_irq(void *opaque, int irq, int level);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9ce041469a8e..d35950087e3d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2736,9 +2736,10 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
2736 return vcpu->arch.cr3; 2736 return vcpu->arch.cr3;
2737} 2737}
2738 2738
2739static void inject_page_fault(struct kvm_vcpu *vcpu) 2739static void inject_page_fault(struct kvm_vcpu *vcpu,
2740 struct x86_exception *fault)
2740{ 2741{
2741 vcpu->arch.mmu.inject_page_fault(vcpu); 2742 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
2742} 2743}
2743 2744
2744static void paging_free(struct kvm_vcpu *vcpu) 2745static void paging_free(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ad5a5a28b96e..d5a0a11d33a1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -279,8 +279,8 @@ error:
279 if (rsvd_fault) 279 if (rsvd_fault)
280 walker->fault.error_code |= PFERR_RSVD_MASK; 280 walker->fault.error_code |= PFERR_RSVD_MASK;
281 281
282 vcpu->arch.fault.address = addr; 282 walker->fault.address = addr;
283 vcpu->arch.fault.error_code = walker->fault.error_code; 283 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
284 284
285 trace_kvm_mmu_walker_error(walker->fault.error_code); 285 trace_kvm_mmu_walker_error(walker->fault.error_code);
286 return 0; 286 return 0;
@@ -568,7 +568,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
568 */ 568 */
569 if (!r) { 569 if (!r) {
570 pgprintk("%s: guest page fault\n", __func__); 570 pgprintk("%s: guest page fault\n", __func__);
571 inject_page_fault(vcpu); 571 inject_page_fault(vcpu, &walker.fault);
572 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 572 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
573 return 0; 573 return 0;
574 } 574 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 28274cf307ba..b985cb81a573 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1647,14 +1647,15 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1647 force_new_asid(vcpu); 1647 force_new_asid(vcpu);
1648} 1648}
1649 1649
1650static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu) 1650static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1651 struct x86_exception *fault)
1651{ 1652{
1652 struct vcpu_svm *svm = to_svm(vcpu); 1653 struct vcpu_svm *svm = to_svm(vcpu);
1653 1654
1654 svm->vmcb->control.exit_code = SVM_EXIT_NPF; 1655 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1655 svm->vmcb->control.exit_code_hi = 0; 1656 svm->vmcb->control.exit_code_hi = 0;
1656 svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code; 1657 svm->vmcb->control.exit_info_1 = fault->error_code;
1657 svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address; 1658 svm->vmcb->control.exit_info_2 = fault->address;
1658 1659
1659 nested_svm_vmexit(svm); 1660 nested_svm_vmexit(svm);
1660} 1661}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a7a7decba43f..47e5a41cc40e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -334,23 +334,19 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
334} 334}
335EXPORT_SYMBOL_GPL(kvm_requeue_exception); 335EXPORT_SYMBOL_GPL(kvm_requeue_exception);
336 336
337void kvm_inject_page_fault(struct kvm_vcpu *vcpu) 337void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
338{ 338{
339 unsigned error_code = vcpu->arch.fault.error_code;
340
341 ++vcpu->stat.pf_guest; 339 ++vcpu->stat.pf_guest;
342 vcpu->arch.cr2 = vcpu->arch.fault.address; 340 vcpu->arch.cr2 = fault->address;
343 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); 341 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
344} 342}
345 343
346void kvm_propagate_fault(struct kvm_vcpu *vcpu) 344void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
347{ 345{
348 if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested) 346 if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
349 vcpu->arch.nested_mmu.inject_page_fault(vcpu); 347 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
350 else 348 else
351 vcpu->arch.mmu.inject_page_fault(vcpu); 349 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
352
353 vcpu->arch.fault.nested = false;
354} 350}
355 351
356void kvm_inject_nmi(struct kvm_vcpu *vcpu) 352void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -3610,8 +3606,6 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3610 /* NPT walks are always user-walks */ 3606 /* NPT walks are always user-walks */
3611 access |= PFERR_USER_MASK; 3607 access |= PFERR_USER_MASK;
3612 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); 3608 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
3613 if (t_gpa == UNMAPPED_GVA)
3614 vcpu->arch.fault.nested = true;
3615 3609
3616 return t_gpa; 3610 return t_gpa;
3617} 3611}
@@ -4259,7 +4253,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
4259{ 4253{
4260 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4254 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4261 if (ctxt->exception.vector == PF_VECTOR) 4255 if (ctxt->exception.vector == PF_VECTOR)
4262 kvm_propagate_fault(vcpu); 4256 kvm_propagate_fault(vcpu, &ctxt->exception);
4263 else if (ctxt->exception.error_code_valid) 4257 else if (ctxt->exception.error_code_valid)
4264 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 4258 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
4265 ctxt->exception.error_code); 4259 ctxt->exception.error_code);
@@ -6264,6 +6258,8 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
6264void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 6258void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
6265 struct kvm_async_pf *work) 6259 struct kvm_async_pf *work)
6266{ 6260{
6261 struct x86_exception fault;
6262
6267 trace_kvm_async_pf_not_present(work->arch.token, work->gva); 6263 trace_kvm_async_pf_not_present(work->arch.token, work->gva);
6268 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 6264 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
6269 6265
@@ -6272,15 +6268,20 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
6272 kvm_x86_ops->get_cpl(vcpu) == 0)) 6268 kvm_x86_ops->get_cpl(vcpu) == 0))
6273 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 6269 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
6274 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { 6270 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
6275 vcpu->arch.fault.error_code = 0; 6271 fault.vector = PF_VECTOR;
6276 vcpu->arch.fault.address = work->arch.token; 6272 fault.error_code_valid = true;
6277 kvm_inject_page_fault(vcpu); 6273 fault.error_code = 0;
6274 fault.nested_page_fault = false;
6275 fault.address = work->arch.token;
6276 kvm_inject_page_fault(vcpu, &fault);
6278 } 6277 }
6279} 6278}
6280 6279
6281void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 6280void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6282 struct kvm_async_pf *work) 6281 struct kvm_async_pf *work)
6283{ 6282{
6283 struct x86_exception fault;
6284
6284 trace_kvm_async_pf_ready(work->arch.token, work->gva); 6285 trace_kvm_async_pf_ready(work->arch.token, work->gva);
6285 if (is_error_page(work->page)) 6286 if (is_error_page(work->page))
6286 work->arch.token = ~0; /* broadcast wakeup */ 6287 work->arch.token = ~0; /* broadcast wakeup */
@@ -6289,9 +6290,12 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6289 6290
6290 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 6291 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
6291 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 6292 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
6292 vcpu->arch.fault.error_code = 0; 6293 fault.vector = PF_VECTOR;
6293 vcpu->arch.fault.address = work->arch.token; 6294 fault.error_code_valid = true;
6294 kvm_inject_page_fault(vcpu); 6295 fault.error_code = 0;
6296 fault.nested_page_fault = false;
6297 fault.address = work->arch.token;
6298 kvm_inject_page_fault(vcpu, &fault);
6295 } 6299 }
6296 vcpu->arch.apf.halted = false; 6300 vcpu->arch.apf.halted = false;
6297} 6301}