diff options
author | Sheng Yang <sheng.yang@intel.com> | 2007-10-28 21:40:42 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:58 -0500 |
commit | f78e0e2ee498e8f847500b565792c7d7634dcf54 (patch) | |
tree | dfd8f35883b40939a1ec013e27e6303af06d3e77 | |
parent | a03490ed29d2771c675d4d9c0ffe22e19a1757f3 (diff) |
KVM: VMX: Enable memory mapped TPR shadow (FlexPriority)
This patch based on CR8/TPR patch, and enable the TPR shadow (FlexPriority)
for 32bit Windows. Since TPR is accessed very frequently by 32bit
Windows, especially SMP guest, with FlexPriority enabled, we saw significant
performance gain.
Signed-off-by: Sheng Yang <sheng.yang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | drivers/kvm/kvm.h | 4 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 56 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 117 | ||||
-rw-r--r-- | drivers/kvm/vmx.h | 5 |
4 files changed, 152 insertions, 30 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 516f79ffd126..22317d6f66ae 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -383,6 +383,7 @@ struct kvm { | |||
383 | struct kvm_ioapic *vioapic; | 383 | struct kvm_ioapic *vioapic; |
384 | int round_robin_prev_vcpu; | 384 | int round_robin_prev_vcpu; |
385 | unsigned int tss_addr; | 385 | unsigned int tss_addr; |
386 | struct page *apic_access_page; | ||
386 | }; | 387 | }; |
387 | 388 | ||
388 | static inline struct kvm_pic *pic_irqchip(struct kvm *kvm) | 389 | static inline struct kvm_pic *pic_irqchip(struct kvm *kvm) |
@@ -522,6 +523,9 @@ int is_error_page(struct page *page); | |||
522 | int kvm_set_memory_region(struct kvm *kvm, | 523 | int kvm_set_memory_region(struct kvm *kvm, |
523 | struct kvm_userspace_memory_region *mem, | 524 | struct kvm_userspace_memory_region *mem, |
524 | int user_alloc); | 525 | int user_alloc); |
526 | int __kvm_set_memory_region(struct kvm *kvm, | ||
527 | struct kvm_userspace_memory_region *mem, | ||
528 | int user_alloc); | ||
525 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); | 529 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); |
526 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 530 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
527 | void kvm_release_page(struct page *page); | 531 | void kvm_release_page(struct page *page); |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 8f7125710d02..ac5ed00e9065 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -362,10 +362,12 @@ EXPORT_SYMBOL_GPL(fx_init); | |||
362 | * space. | 362 | * space. |
363 | * | 363 | * |
364 | * Discontiguous memory is allowed, mostly for framebuffers. | 364 | * Discontiguous memory is allowed, mostly for framebuffers. |
365 | * | ||
366 | * Must be called holding kvm->lock. | ||
365 | */ | 367 | */ |
366 | int kvm_set_memory_region(struct kvm *kvm, | 368 | int __kvm_set_memory_region(struct kvm *kvm, |
367 | struct kvm_userspace_memory_region *mem, | 369 | struct kvm_userspace_memory_region *mem, |
368 | int user_alloc) | 370 | int user_alloc) |
369 | { | 371 | { |
370 | int r; | 372 | int r; |
371 | gfn_t base_gfn; | 373 | gfn_t base_gfn; |
@@ -392,8 +394,6 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
392 | if (!npages) | 394 | if (!npages) |
393 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; | 395 | mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; |
394 | 396 | ||
395 | mutex_lock(&kvm->lock); | ||
396 | |||
397 | new = old = *memslot; | 397 | new = old = *memslot; |
398 | 398 | ||
399 | new.base_gfn = base_gfn; | 399 | new.base_gfn = base_gfn; |
@@ -403,7 +403,7 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
403 | /* Disallow changing a memory slot's size. */ | 403 | /* Disallow changing a memory slot's size. */ |
404 | r = -EINVAL; | 404 | r = -EINVAL; |
405 | if (npages && old.npages && npages != old.npages) | 405 | if (npages && old.npages && npages != old.npages) |
406 | goto out_unlock; | 406 | goto out_free; |
407 | 407 | ||
408 | /* Check for overlaps */ | 408 | /* Check for overlaps */ |
409 | r = -EEXIST; | 409 | r = -EEXIST; |
@@ -414,7 +414,7 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
414 | continue; | 414 | continue; |
415 | if (!((base_gfn + npages <= s->base_gfn) || | 415 | if (!((base_gfn + npages <= s->base_gfn) || |
416 | (base_gfn >= s->base_gfn + s->npages))) | 416 | (base_gfn >= s->base_gfn + s->npages))) |
417 | goto out_unlock; | 417 | goto out_free; |
418 | } | 418 | } |
419 | 419 | ||
420 | /* Free page dirty bitmap if unneeded */ | 420 | /* Free page dirty bitmap if unneeded */ |
@@ -428,7 +428,7 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
428 | new.rmap = vmalloc(npages * sizeof(struct page *)); | 428 | new.rmap = vmalloc(npages * sizeof(struct page *)); |
429 | 429 | ||
430 | if (!new.rmap) | 430 | if (!new.rmap) |
431 | goto out_unlock; | 431 | goto out_free; |
432 | 432 | ||
433 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); | 433 | memset(new.rmap, 0, npages * sizeof(*new.rmap)); |
434 | 434 | ||
@@ -445,7 +445,7 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
445 | up_write(¤t->mm->mmap_sem); | 445 | up_write(¤t->mm->mmap_sem); |
446 | 446 | ||
447 | if (IS_ERR((void *)new.userspace_addr)) | 447 | if (IS_ERR((void *)new.userspace_addr)) |
448 | goto out_unlock; | 448 | goto out_free; |
449 | } | 449 | } |
450 | } else { | 450 | } else { |
451 | if (!old.user_alloc && old.rmap) { | 451 | if (!old.user_alloc && old.rmap) { |
@@ -468,7 +468,7 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
468 | 468 | ||
469 | new.dirty_bitmap = vmalloc(dirty_bytes); | 469 | new.dirty_bitmap = vmalloc(dirty_bytes); |
470 | if (!new.dirty_bitmap) | 470 | if (!new.dirty_bitmap) |
471 | goto out_unlock; | 471 | goto out_free; |
472 | memset(new.dirty_bitmap, 0, dirty_bytes); | 472 | memset(new.dirty_bitmap, 0, dirty_bytes); |
473 | } | 473 | } |
474 | 474 | ||
@@ -498,18 +498,28 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
498 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | 498 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); |
499 | kvm_flush_remote_tlbs(kvm); | 499 | kvm_flush_remote_tlbs(kvm); |
500 | 500 | ||
501 | mutex_unlock(&kvm->lock); | ||
502 | |||
503 | kvm_free_physmem_slot(&old, &new); | 501 | kvm_free_physmem_slot(&old, &new); |
504 | return 0; | 502 | return 0; |
505 | 503 | ||
506 | out_unlock: | 504 | out_free: |
507 | mutex_unlock(&kvm->lock); | ||
508 | kvm_free_physmem_slot(&new, &old); | 505 | kvm_free_physmem_slot(&new, &old); |
509 | out: | 506 | out: |
510 | return r; | 507 | return r; |
511 | 508 | ||
512 | } | 509 | } |
510 | EXPORT_SYMBOL_GPL(__kvm_set_memory_region); | ||
511 | |||
512 | int kvm_set_memory_region(struct kvm *kvm, | ||
513 | struct kvm_userspace_memory_region *mem, | ||
514 | int user_alloc) | ||
515 | { | ||
516 | int r; | ||
517 | |||
518 | mutex_lock(&kvm->lock); | ||
519 | r = __kvm_set_memory_region(kvm, mem, user_alloc); | ||
520 | mutex_unlock(&kvm->lock); | ||
521 | return r; | ||
522 | } | ||
513 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 523 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
514 | 524 | ||
515 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 525 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
@@ -888,14 +898,21 @@ static int emulator_read_emulated(unsigned long addr, | |||
888 | memcpy(val, vcpu->mmio_data, bytes); | 898 | memcpy(val, vcpu->mmio_data, bytes); |
889 | vcpu->mmio_read_completed = 0; | 899 | vcpu->mmio_read_completed = 0; |
890 | return X86EMUL_CONTINUE; | 900 | return X86EMUL_CONTINUE; |
891 | } else if (emulator_read_std(addr, val, bytes, vcpu) | 901 | } |
892 | == X86EMUL_CONTINUE) | ||
893 | return X86EMUL_CONTINUE; | ||
894 | 902 | ||
895 | gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); | 903 | gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); |
904 | |||
905 | /* For APIC access vmexit */ | ||
906 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | ||
907 | goto mmio; | ||
908 | |||
909 | if (emulator_read_std(addr, val, bytes, vcpu) | ||
910 | == X86EMUL_CONTINUE) | ||
911 | return X86EMUL_CONTINUE; | ||
896 | if (gpa == UNMAPPED_GVA) | 912 | if (gpa == UNMAPPED_GVA) |
897 | return X86EMUL_PROPAGATE_FAULT; | 913 | return X86EMUL_PROPAGATE_FAULT; |
898 | 914 | ||
915 | mmio: | ||
899 | /* | 916 | /* |
900 | * Is this MMIO handled locally? | 917 | * Is this MMIO handled locally? |
901 | */ | 918 | */ |
@@ -938,9 +955,14 @@ static int emulator_write_emulated_onepage(unsigned long addr, | |||
938 | return X86EMUL_PROPAGATE_FAULT; | 955 | return X86EMUL_PROPAGATE_FAULT; |
939 | } | 956 | } |
940 | 957 | ||
958 | /* For APIC access vmexit */ | ||
959 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | ||
960 | goto mmio; | ||
961 | |||
941 | if (emulator_write_phys(vcpu, gpa, val, bytes)) | 962 | if (emulator_write_phys(vcpu, gpa, val, bytes)) |
942 | return X86EMUL_CONTINUE; | 963 | return X86EMUL_CONTINUE; |
943 | 964 | ||
965 | mmio: | ||
944 | /* | 966 | /* |
945 | * Is this MMIO handled locally? | 967 | * Is this MMIO handled locally? |
946 | */ | 968 | */ |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 7fe834cb0d81..eca422e9506d 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -86,6 +86,7 @@ static struct vmcs_config { | |||
86 | u32 revision_id; | 86 | u32 revision_id; |
87 | u32 pin_based_exec_ctrl; | 87 | u32 pin_based_exec_ctrl; |
88 | u32 cpu_based_exec_ctrl; | 88 | u32 cpu_based_exec_ctrl; |
89 | u32 cpu_based_2nd_exec_ctrl; | ||
89 | u32 vmexit_ctrl; | 90 | u32 vmexit_ctrl; |
90 | u32 vmentry_ctrl; | 91 | u32 vmentry_ctrl; |
91 | } vmcs_config; | 92 | } vmcs_config; |
@@ -179,6 +180,29 @@ static inline int vm_need_tpr_shadow(struct kvm *kvm) | |||
179 | return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm))); | 180 | return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm))); |
180 | } | 181 | } |
181 | 182 | ||
183 | static inline int cpu_has_secondary_exec_ctrls(void) | ||
184 | { | ||
185 | return (vmcs_config.cpu_based_exec_ctrl & | ||
186 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); | ||
187 | } | ||
188 | |||
189 | static inline int vm_need_secondary_exec_ctrls(struct kvm *kvm) | ||
190 | { | ||
191 | return ((cpu_has_secondary_exec_ctrls()) && (irqchip_in_kernel(kvm))); | ||
192 | } | ||
193 | |||
194 | static inline int cpu_has_vmx_virtualize_apic_accesses(void) | ||
195 | { | ||
196 | return (vmcs_config.cpu_based_2nd_exec_ctrl & | ||
197 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); | ||
198 | } | ||
199 | |||
200 | static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) | ||
201 | { | ||
202 | return ((cpu_has_vmx_virtualize_apic_accesses()) && | ||
203 | (irqchip_in_kernel(kvm))); | ||
204 | } | ||
205 | |||
182 | static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) | 206 | static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) |
183 | { | 207 | { |
184 | int i; | 208 | int i; |
@@ -918,6 +942,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
918 | u32 min, opt; | 942 | u32 min, opt; |
919 | u32 _pin_based_exec_control = 0; | 943 | u32 _pin_based_exec_control = 0; |
920 | u32 _cpu_based_exec_control = 0; | 944 | u32 _cpu_based_exec_control = 0; |
945 | u32 _cpu_based_2nd_exec_control = 0; | ||
921 | u32 _vmexit_control = 0; | 946 | u32 _vmexit_control = 0; |
922 | u32 _vmentry_control = 0; | 947 | u32 _vmentry_control = 0; |
923 | 948 | ||
@@ -935,11 +960,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
935 | CPU_BASED_USE_IO_BITMAPS | | 960 | CPU_BASED_USE_IO_BITMAPS | |
936 | CPU_BASED_MOV_DR_EXITING | | 961 | CPU_BASED_MOV_DR_EXITING | |
937 | CPU_BASED_USE_TSC_OFFSETING; | 962 | CPU_BASED_USE_TSC_OFFSETING; |
938 | #ifdef CONFIG_X86_64 | 963 | opt = CPU_BASED_TPR_SHADOW | |
939 | opt = CPU_BASED_TPR_SHADOW; | 964 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
940 | #else | ||
941 | opt = 0; | ||
942 | #endif | ||
943 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, | 965 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, |
944 | &_cpu_based_exec_control) < 0) | 966 | &_cpu_based_exec_control) < 0) |
945 | return -EIO; | 967 | return -EIO; |
@@ -948,6 +970,18 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
948 | _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & | 970 | _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & |
949 | ~CPU_BASED_CR8_STORE_EXITING; | 971 | ~CPU_BASED_CR8_STORE_EXITING; |
950 | #endif | 972 | #endif |
973 | if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { | ||
974 | min = 0; | ||
975 | opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | ||
976 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2, | ||
977 | &_cpu_based_2nd_exec_control) < 0) | ||
978 | return -EIO; | ||
979 | } | ||
980 | #ifndef CONFIG_X86_64 | ||
981 | if (!(_cpu_based_2nd_exec_control & | ||
982 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) | ||
983 | _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; | ||
984 | #endif | ||
951 | 985 | ||
952 | min = 0; | 986 | min = 0; |
953 | #ifdef CONFIG_X86_64 | 987 | #ifdef CONFIG_X86_64 |
@@ -985,6 +1019,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
985 | 1019 | ||
986 | vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; | 1020 | vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; |
987 | vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; | 1021 | vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; |
1022 | vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; | ||
988 | vmcs_conf->vmexit_ctrl = _vmexit_control; | 1023 | vmcs_conf->vmexit_ctrl = _vmexit_control; |
989 | vmcs_conf->vmentry_ctrl = _vmentry_control; | 1024 | vmcs_conf->vmentry_ctrl = _vmentry_control; |
990 | 1025 | ||
@@ -1427,6 +1462,27 @@ static void seg_setup(int seg) | |||
1427 | vmcs_write32(sf->ar_bytes, 0x93); | 1462 | vmcs_write32(sf->ar_bytes, 0x93); |
1428 | } | 1463 | } |
1429 | 1464 | ||
1465 | static int alloc_apic_access_page(struct kvm *kvm) | ||
1466 | { | ||
1467 | struct kvm_userspace_memory_region kvm_userspace_mem; | ||
1468 | int r = 0; | ||
1469 | |||
1470 | mutex_lock(&kvm->lock); | ||
1471 | if (kvm->apic_access_page) | ||
1472 | goto out; | ||
1473 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | ||
1474 | kvm_userspace_mem.flags = 0; | ||
1475 | kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; | ||
1476 | kvm_userspace_mem.memory_size = PAGE_SIZE; | ||
1477 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); | ||
1478 | if (r) | ||
1479 | goto out; | ||
1480 | kvm->apic_access_page = gfn_to_page(kvm, 0xfee00); | ||
1481 | out: | ||
1482 | mutex_unlock(&kvm->lock); | ||
1483 | return r; | ||
1484 | } | ||
1485 | |||
1430 | /* | 1486 | /* |
1431 | * Sets up the vmcs for emulated real mode. | 1487 | * Sets up the vmcs for emulated real mode. |
1432 | */ | 1488 | */ |
@@ -1458,8 +1514,14 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1458 | CPU_BASED_CR8_LOAD_EXITING; | 1514 | CPU_BASED_CR8_LOAD_EXITING; |
1459 | #endif | 1515 | #endif |
1460 | } | 1516 | } |
1517 | if (!vm_need_secondary_exec_ctrls(vmx->vcpu.kvm)) | ||
1518 | exec_control &= ~CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | ||
1461 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); | 1519 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); |
1462 | 1520 | ||
1521 | if (vm_need_secondary_exec_ctrls(vmx->vcpu.kvm)) | ||
1522 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, | ||
1523 | vmcs_config.cpu_based_2nd_exec_ctrl); | ||
1524 | |||
1463 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); | 1525 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); |
1464 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); | 1526 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); |
1465 | vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ | 1527 | vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ |
@@ -1528,6 +1590,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1528 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); | 1590 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); |
1529 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); | 1591 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); |
1530 | 1592 | ||
1593 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1594 | if (alloc_apic_access_page(vmx->vcpu.kvm) != 0) | ||
1595 | return -ENOMEM; | ||
1596 | |||
1531 | return 0; | 1597 | return 0; |
1532 | } | 1598 | } |
1533 | 1599 | ||
@@ -1616,13 +1682,17 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
1616 | 1682 | ||
1617 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ | 1683 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ |
1618 | 1684 | ||
1619 | #ifdef CONFIG_X86_64 | 1685 | if (cpu_has_vmx_tpr_shadow()) { |
1620 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); | 1686 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); |
1621 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) | 1687 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) |
1622 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, | 1688 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, |
1623 | page_to_phys(vmx->vcpu.apic->regs_page)); | 1689 | page_to_phys(vmx->vcpu.apic->regs_page)); |
1624 | vmcs_write32(TPR_THRESHOLD, 0); | 1690 | vmcs_write32(TPR_THRESHOLD, 0); |
1625 | #endif | 1691 | } |
1692 | |||
1693 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
1694 | vmcs_write64(APIC_ACCESS_ADDR, | ||
1695 | page_to_phys(vmx->vcpu.kvm->apic_access_page)); | ||
1626 | 1696 | ||
1627 | vmx->vcpu.cr0 = 0x60000010; | 1697 | vmx->vcpu.cr0 = 0x60000010; |
1628 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */ | 1698 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */ |
@@ -2094,6 +2164,26 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2094 | return 1; | 2164 | return 1; |
2095 | } | 2165 | } |
2096 | 2166 | ||
2167 | static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2168 | { | ||
2169 | u64 exit_qualification; | ||
2170 | enum emulation_result er; | ||
2171 | unsigned long offset; | ||
2172 | |||
2173 | exit_qualification = vmcs_read64(EXIT_QUALIFICATION); | ||
2174 | offset = exit_qualification & 0xffful; | ||
2175 | |||
2176 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | ||
2177 | |||
2178 | if (er != EMULATE_DONE) { | ||
2179 | printk(KERN_ERR | ||
2180 | "Fail to handle apic access vmexit! Offset is 0x%lx\n", | ||
2181 | offset); | ||
2182 | return -ENOTSUPP; | ||
2183 | } | ||
2184 | return 1; | ||
2185 | } | ||
2186 | |||
2097 | /* | 2187 | /* |
2098 | * The exit handlers return 1 if the exit was handled fully and guest execution | 2188 | * The exit handlers return 1 if the exit was handled fully and guest execution |
2099 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs | 2189 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
@@ -2113,7 +2203,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
2113 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, | 2203 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, |
2114 | [EXIT_REASON_HLT] = handle_halt, | 2204 | [EXIT_REASON_HLT] = handle_halt, |
2115 | [EXIT_REASON_VMCALL] = handle_vmcall, | 2205 | [EXIT_REASON_VMCALL] = handle_vmcall, |
2116 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold | 2206 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, |
2207 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, | ||
2117 | }; | 2208 | }; |
2118 | 2209 | ||
2119 | static const int kvm_vmx_max_exit_handlers = | 2210 | static const int kvm_vmx_max_exit_handlers = |
diff --git a/drivers/kvm/vmx.h b/drivers/kvm/vmx.h index 270d477a2aa6..c84bd3733153 100644 --- a/drivers/kvm/vmx.h +++ b/drivers/kvm/vmx.h | |||
@@ -89,6 +89,8 @@ enum vmcs_field { | |||
89 | TSC_OFFSET_HIGH = 0x00002011, | 89 | TSC_OFFSET_HIGH = 0x00002011, |
90 | VIRTUAL_APIC_PAGE_ADDR = 0x00002012, | 90 | VIRTUAL_APIC_PAGE_ADDR = 0x00002012, |
91 | VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, | 91 | VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, |
92 | APIC_ACCESS_ADDR = 0x00002014, | ||
93 | APIC_ACCESS_ADDR_HIGH = 0x00002015, | ||
92 | VMCS_LINK_POINTER = 0x00002800, | 94 | VMCS_LINK_POINTER = 0x00002800, |
93 | VMCS_LINK_POINTER_HIGH = 0x00002801, | 95 | VMCS_LINK_POINTER_HIGH = 0x00002801, |
94 | GUEST_IA32_DEBUGCTL = 0x00002802, | 96 | GUEST_IA32_DEBUGCTL = 0x00002802, |
@@ -214,6 +216,7 @@ enum vmcs_field { | |||
214 | #define EXIT_REASON_MSR_WRITE 32 | 216 | #define EXIT_REASON_MSR_WRITE 32 |
215 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 | 217 | #define EXIT_REASON_MWAIT_INSTRUCTION 36 |
216 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 | 218 | #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 |
219 | #define EXIT_REASON_APIC_ACCESS 44 | ||
217 | 220 | ||
218 | /* | 221 | /* |
219 | * Interruption-information format | 222 | * Interruption-information format |
@@ -307,4 +310,6 @@ enum vmcs_field { | |||
307 | #define MSR_IA32_FEATURE_CONTROL_LOCKED 0x1 | 310 | #define MSR_IA32_FEATURE_CONTROL_LOCKED 0x1 |
308 | #define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED 0x4 | 311 | #define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED 0x4 |
309 | 312 | ||
313 | #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9 | ||
314 | |||
310 | #endif | 315 | #endif |