aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-14 05:22:52 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:16 -0500
commit631bc4878220932fe67fc46fc7cf7cccdb1ec597 (patch)
treeac588182d02308a004d45a9c3ae6834d096e263d /arch/x86/kvm
parentfd10cde9294f73eeccbc16f3fec1ae6cde7b800c (diff)
KVM: Handle async PF in a guest.
When async PF capability is detected hook up special page fault handler that will handle async page fault events and bypass other page faults to regular page fault handler. Also add async PF handling to nested SVM emulation. Async PF always generates exit to L1 where vcpu thread will be scheduled out until page is available. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/svm.c45
1 files changed, 36 insertions, 9 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b81a9b7c2ca4..93e8120b8021 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -31,6 +31,7 @@
31 31
32#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
33#include <asm/desc.h> 33#include <asm/desc.h>
34#include <asm/kvm_para.h>
34 35
35#include <asm/virtext.h> 36#include <asm/virtext.h>
36#include "trace.h" 37#include "trace.h"
@@ -133,6 +134,7 @@ struct vcpu_svm {
133 134
134 unsigned int3_injected; 135 unsigned int3_injected;
135 unsigned long int3_rip; 136 unsigned long int3_rip;
137 u32 apf_reason;
136}; 138};
137 139
138#define MSR_INVALID 0xffffffffU 140#define MSR_INVALID 0xffffffffU
@@ -1383,16 +1385,33 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1383 1385
1384static int pf_interception(struct vcpu_svm *svm) 1386static int pf_interception(struct vcpu_svm *svm)
1385{ 1387{
1386 u64 fault_address; 1388 u64 fault_address = svm->vmcb->control.exit_info_2;
1387 u32 error_code; 1389 u32 error_code;
1390 int r = 1;
1388 1391
1389 fault_address = svm->vmcb->control.exit_info_2; 1392 switch (svm->apf_reason) {
1390 error_code = svm->vmcb->control.exit_info_1; 1393 default:
1394 error_code = svm->vmcb->control.exit_info_1;
1391 1395
1392 trace_kvm_page_fault(fault_address, error_code); 1396 trace_kvm_page_fault(fault_address, error_code);
1393 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) 1397 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1394 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); 1398 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1395 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1399 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1400 break;
1401 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1402 svm->apf_reason = 0;
1403 local_irq_disable();
1404 kvm_async_pf_task_wait(fault_address);
1405 local_irq_enable();
1406 break;
1407 case KVM_PV_REASON_PAGE_READY:
1408 svm->apf_reason = 0;
1409 local_irq_disable();
1410 kvm_async_pf_task_wake(fault_address);
1411 local_irq_enable();
1412 break;
1413 }
1414 return r;
1396} 1415}
1397 1416
1398static int db_interception(struct vcpu_svm *svm) 1417static int db_interception(struct vcpu_svm *svm)
@@ -1836,8 +1855,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
1836 return NESTED_EXIT_HOST; 1855 return NESTED_EXIT_HOST;
1837 break; 1856 break;
1838 case SVM_EXIT_EXCP_BASE + PF_VECTOR: 1857 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1839 /* When we're shadowing, trap PFs */ 1858 /* When we're shadowing, trap PFs, but not async PF */
1840 if (!npt_enabled) 1859 if (!npt_enabled && svm->apf_reason == 0)
1841 return NESTED_EXIT_HOST; 1860 return NESTED_EXIT_HOST;
1842 break; 1861 break;
1843 case SVM_EXIT_EXCP_BASE + NM_VECTOR: 1862 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
@@ -1893,6 +1912,10 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
1893 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); 1912 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1894 if (svm->nested.intercept_exceptions & excp_bits) 1913 if (svm->nested.intercept_exceptions & excp_bits)
1895 vmexit = NESTED_EXIT_DONE; 1914 vmexit = NESTED_EXIT_DONE;
1915 /* async page fault always cause vmexit */
1916 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
1917 svm->apf_reason != 0)
1918 vmexit = NESTED_EXIT_DONE;
1896 break; 1919 break;
1897 } 1920 }
1898 case SVM_EXIT_ERR: { 1921 case SVM_EXIT_ERR: {
@@ -3414,6 +3437,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3414 3437
3415 svm->next_rip = 0; 3438 svm->next_rip = 0;
3416 3439
3440 /* if exit due to PF check for async PF */
3441 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3442 svm->apf_reason = kvm_read_and_reset_pf_reason();
3443
3417 if (npt_enabled) { 3444 if (npt_enabled) {
3418 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); 3445 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3419 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); 3446 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);