aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c148
1 files changed, 110 insertions, 38 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8f1e22d3b286..5430e4b0af29 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -246,7 +246,7 @@ void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
246} 246}
247EXPORT_SYMBOL_GPL(kvm_set_shared_msr); 247EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
248 248
249static void drop_user_return_notifiers(void *ignore) 249static void drop_user_return_notifiers(void)
250{ 250{
251 unsigned int cpu = smp_processor_id(); 251 unsigned int cpu = smp_processor_id();
252 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 252 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
@@ -408,12 +408,14 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
408} 408}
409EXPORT_SYMBOL_GPL(kvm_inject_page_fault); 409EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
410 410
411void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 411static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
412{ 412{
413 if (mmu_is_nested(vcpu) && !fault->nested_page_fault) 413 if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
414 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); 414 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
415 else 415 else
416 vcpu->arch.mmu.inject_page_fault(vcpu, fault); 416 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
417
418 return fault->nested_page_fault;
417} 419}
418 420
419void kvm_inject_nmi(struct kvm_vcpu *vcpu) 421void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -457,11 +459,12 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
457 gfn_t ngfn, void *data, int offset, int len, 459 gfn_t ngfn, void *data, int offset, int len,
458 u32 access) 460 u32 access)
459{ 461{
462 struct x86_exception exception;
460 gfn_t real_gfn; 463 gfn_t real_gfn;
461 gpa_t ngpa; 464 gpa_t ngpa;
462 465
463 ngpa = gfn_to_gpa(ngfn); 466 ngpa = gfn_to_gpa(ngfn);
464 real_gfn = mmu->translate_gpa(vcpu, ngpa, access); 467 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
465 if (real_gfn == UNMAPPED_GVA) 468 if (real_gfn == UNMAPPED_GVA)
466 return -EFAULT; 469 return -EFAULT;
467 470
@@ -726,7 +729,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
726{ 729{
727 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { 730 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
728 kvm_mmu_sync_roots(vcpu); 731 kvm_mmu_sync_roots(vcpu);
729 kvm_mmu_flush_tlb(vcpu); 732 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
730 return 0; 733 return 0;
731 } 734 }
732 735
@@ -1518,7 +1521,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
1518 pvclock_update_vm_gtod_copy(kvm); 1521 pvclock_update_vm_gtod_copy(kvm);
1519 1522
1520 kvm_for_each_vcpu(i, vcpu, kvm) 1523 kvm_for_each_vcpu(i, vcpu, kvm)
1521 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 1524 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1522 1525
1523 /* guest entries allowed */ 1526 /* guest entries allowed */
1524 kvm_for_each_vcpu(i, vcpu, kvm) 1527 kvm_for_each_vcpu(i, vcpu, kvm)
@@ -1661,7 +1664,7 @@ static void kvmclock_update_fn(struct work_struct *work)
1661 struct kvm_vcpu *vcpu; 1664 struct kvm_vcpu *vcpu;
1662 1665
1663 kvm_for_each_vcpu(i, vcpu, kvm) { 1666 kvm_for_each_vcpu(i, vcpu, kvm) {
1664 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 1667 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1665 kvm_vcpu_kick(vcpu); 1668 kvm_vcpu_kick(vcpu);
1666 } 1669 }
1667} 1670}
@@ -1670,7 +1673,7 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1670{ 1673{
1671 struct kvm *kvm = v->kvm; 1674 struct kvm *kvm = v->kvm;
1672 1675
1673 set_bit(KVM_REQ_CLOCK_UPDATE, &v->requests); 1676 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1674 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 1677 schedule_delayed_work(&kvm->arch.kvmclock_update_work,
1675 KVMCLOCK_UPDATE_DELAY); 1678 KVMCLOCK_UPDATE_DELAY);
1676} 1679}
@@ -1723,9 +1726,10 @@ static bool valid_mtrr_type(unsigned t)
1723 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ 1726 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1724} 1727}
1725 1728
1726static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1729bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1727{ 1730{
1728 int i; 1731 int i;
1732 u64 mask;
1729 1733
1730 if (!msr_mtrr_valid(msr)) 1734 if (!msr_mtrr_valid(msr))
1731 return false; 1735 return false;
@@ -1747,14 +1751,31 @@ static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1747 } 1751 }
1748 1752
1749 /* variable MTRRs */ 1753 /* variable MTRRs */
1750 return valid_mtrr_type(data & 0xff); 1754 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
1755
1756 mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
1757 if ((msr & 1) == 0) {
1758 /* MTRR base */
1759 if (!valid_mtrr_type(data & 0xff))
1760 return false;
1761 mask |= 0xf00;
1762 } else
1763 /* MTRR mask */
1764 mask |= 0x7ff;
1765 if (data & mask) {
1766 kvm_inject_gp(vcpu, 0);
1767 return false;
1768 }
1769
1770 return true;
1751} 1771}
1772EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
1752 1773
1753static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1774static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1754{ 1775{
1755 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; 1776 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1756 1777
1757 if (!mtrr_valid(vcpu, msr, data)) 1778 if (!kvm_mtrr_valid(vcpu, msr, data))
1758 return 1; 1779 return 1;
1759 1780
1760 if (msr == MSR_MTRRdefType) { 1781 if (msr == MSR_MTRRdefType) {
@@ -1805,7 +1826,7 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1805 break; 1826 break;
1806 default: 1827 default:
1807 if (msr >= MSR_IA32_MC0_CTL && 1828 if (msr >= MSR_IA32_MC0_CTL &&
1808 msr < MSR_IA32_MC0_CTL + 4 * bank_num) { 1829 msr < MSR_IA32_MCx_CTL(bank_num)) {
1809 u32 offset = msr - MSR_IA32_MC0_CTL; 1830 u32 offset = msr - MSR_IA32_MC0_CTL;
1810 /* only 0 or all 1s can be written to IA32_MCi_CTL 1831 /* only 0 or all 1s can be written to IA32_MCi_CTL
1811 * some Linux kernels though clear bit 10 in bank 4 to 1832 * some Linux kernels though clear bit 10 in bank 4 to
@@ -2164,7 +2185,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2164 2185
2165 case MSR_IA32_MCG_CTL: 2186 case MSR_IA32_MCG_CTL:
2166 case MSR_IA32_MCG_STATUS: 2187 case MSR_IA32_MCG_STATUS:
2167 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 2188 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2168 return set_msr_mce(vcpu, msr, data); 2189 return set_msr_mce(vcpu, msr, data);
2169 2190
2170 /* Performance counters are not protected by a CPUID bit, 2191 /* Performance counters are not protected by a CPUID bit,
@@ -2330,7 +2351,7 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2330 break; 2351 break;
2331 default: 2352 default:
2332 if (msr >= MSR_IA32_MC0_CTL && 2353 if (msr >= MSR_IA32_MC0_CTL &&
2333 msr < MSR_IA32_MC0_CTL + 4 * bank_num) { 2354 msr < MSR_IA32_MCx_CTL(bank_num)) {
2334 u32 offset = msr - MSR_IA32_MC0_CTL; 2355 u32 offset = msr - MSR_IA32_MC0_CTL;
2335 data = vcpu->arch.mce_banks[offset]; 2356 data = vcpu->arch.mce_banks[offset];
2336 break; 2357 break;
@@ -2419,7 +2440,13 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2419 case MSR_K7_HWCR: 2440 case MSR_K7_HWCR:
2420 case MSR_VM_HSAVE_PA: 2441 case MSR_VM_HSAVE_PA:
2421 case MSR_K7_EVNTSEL0: 2442 case MSR_K7_EVNTSEL0:
2443 case MSR_K7_EVNTSEL1:
2444 case MSR_K7_EVNTSEL2:
2445 case MSR_K7_EVNTSEL3:
2422 case MSR_K7_PERFCTR0: 2446 case MSR_K7_PERFCTR0:
2447 case MSR_K7_PERFCTR1:
2448 case MSR_K7_PERFCTR2:
2449 case MSR_K7_PERFCTR3:
2423 case MSR_K8_INT_PENDING_MSG: 2450 case MSR_K8_INT_PENDING_MSG:
2424 case MSR_AMD64_NB_CFG: 2451 case MSR_AMD64_NB_CFG:
2425 case MSR_FAM10H_MMIO_CONF_BASE: 2452 case MSR_FAM10H_MMIO_CONF_BASE:
@@ -2505,7 +2532,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2505 case MSR_IA32_MCG_CAP: 2532 case MSR_IA32_MCG_CAP:
2506 case MSR_IA32_MCG_CTL: 2533 case MSR_IA32_MCG_CTL:
2507 case MSR_IA32_MCG_STATUS: 2534 case MSR_IA32_MCG_STATUS:
2508 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: 2535 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2509 return get_msr_mce(vcpu, msr, pdata); 2536 return get_msr_mce(vcpu, msr, pdata);
2510 case MSR_K7_CLK_CTL: 2537 case MSR_K7_CLK_CTL:
2511 /* 2538 /*
@@ -2823,7 +2850,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2823 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 2850 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2824 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 2851 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2825 vcpu->arch.tsc_offset_adjustment = 0; 2852 vcpu->arch.tsc_offset_adjustment = 0;
2826 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 2853 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2827 } 2854 }
2828 2855
2829 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { 2856 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
@@ -4040,16 +4067,16 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
4040 kvm_x86_ops->get_segment(vcpu, var, seg); 4067 kvm_x86_ops->get_segment(vcpu, var, seg);
4041} 4068}
4042 4069
4043gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) 4070gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
4071 struct x86_exception *exception)
4044{ 4072{
4045 gpa_t t_gpa; 4073 gpa_t t_gpa;
4046 struct x86_exception exception;
4047 4074
4048 BUG_ON(!mmu_is_nested(vcpu)); 4075 BUG_ON(!mmu_is_nested(vcpu));
4049 4076
4050 /* NPT walks are always user-walks */ 4077 /* NPT walks are always user-walks */
4051 access |= PFERR_USER_MASK; 4078 access |= PFERR_USER_MASK;
4052 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); 4079 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
4053 4080
4054 return t_gpa; 4081 return t_gpa;
4055} 4082}
@@ -4906,16 +4933,18 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4906 } 4933 }
4907} 4934}
4908 4935
4909static void inject_emulated_exception(struct kvm_vcpu *vcpu) 4936static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
4910{ 4937{
4911 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; 4938 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4912 if (ctxt->exception.vector == PF_VECTOR) 4939 if (ctxt->exception.vector == PF_VECTOR)
4913 kvm_propagate_fault(vcpu, &ctxt->exception); 4940 return kvm_propagate_fault(vcpu, &ctxt->exception);
4914 else if (ctxt->exception.error_code_valid) 4941
4942 if (ctxt->exception.error_code_valid)
4915 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 4943 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
4916 ctxt->exception.error_code); 4944 ctxt->exception.error_code);
4917 else 4945 else
4918 kvm_queue_exception(vcpu, ctxt->exception.vector); 4946 kvm_queue_exception(vcpu, ctxt->exception.vector);
4947 return false;
4919} 4948}
4920 4949
4921static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 4950static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
@@ -4972,7 +5001,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4972 5001
4973 ++vcpu->stat.insn_emulation_fail; 5002 ++vcpu->stat.insn_emulation_fail;
4974 trace_kvm_emulate_insn_failed(vcpu); 5003 trace_kvm_emulate_insn_failed(vcpu);
4975 if (!is_guest_mode(vcpu)) { 5004 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
4976 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 5005 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4977 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 5006 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4978 vcpu->run->internal.ndata = 0; 5007 vcpu->run->internal.ndata = 0;
@@ -5224,6 +5253,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
5224 5253
5225 ctxt->interruptibility = 0; 5254 ctxt->interruptibility = 0;
5226 ctxt->have_exception = false; 5255 ctxt->have_exception = false;
5256 ctxt->exception.vector = -1;
5227 ctxt->perm_ok = false; 5257 ctxt->perm_ok = false;
5228 5258
5229 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; 5259 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
@@ -5276,8 +5306,9 @@ restart:
5276 } 5306 }
5277 5307
5278 if (ctxt->have_exception) { 5308 if (ctxt->have_exception) {
5279 inject_emulated_exception(vcpu);
5280 r = EMULATE_DONE; 5309 r = EMULATE_DONE;
5310 if (inject_emulated_exception(vcpu))
5311 return r;
5281 } else if (vcpu->arch.pio.count) { 5312 } else if (vcpu->arch.pio.count) {
5282 if (!vcpu->arch.pio.in) { 5313 if (!vcpu->arch.pio.in) {
5283 /* FIXME: return into emulator if single-stepping. */ 5314 /* FIXME: return into emulator if single-stepping. */
@@ -5545,7 +5576,7 @@ static void kvm_set_mmio_spte_mask(void)
5545 * entry to generate page fault with PFER.RSV = 1. 5576 * entry to generate page fault with PFER.RSV = 1.
5546 */ 5577 */
5547 /* Mask the reserved physical address bits. */ 5578 /* Mask the reserved physical address bits. */
5548 mask = ((1ull << (51 - maxphyaddr + 1)) - 1) << maxphyaddr; 5579 mask = rsvd_bits(maxphyaddr, 51);
5549 5580
5550 /* Bit 62 is always reserved for 32bit host. */ 5581 /* Bit 62 is always reserved for 32bit host. */
5551 mask |= 0x3ull << 62; 5582 mask |= 0x3ull << 62;
@@ -5576,7 +5607,7 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
5576 spin_lock(&kvm_lock); 5607 spin_lock(&kvm_lock);
5577 list_for_each_entry(kvm, &vm_list, vm_list) 5608 list_for_each_entry(kvm, &vm_list, vm_list)
5578 kvm_for_each_vcpu(i, vcpu, kvm) 5609 kvm_for_each_vcpu(i, vcpu, kvm)
5579 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); 5610 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
5580 atomic_set(&kvm_guest_has_master_clock, 0); 5611 atomic_set(&kvm_guest_has_master_clock, 0);
5581 spin_unlock(&kvm_lock); 5612 spin_unlock(&kvm_lock);
5582} 5613}
@@ -5989,6 +6020,44 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
5989 kvm_apic_update_tmr(vcpu, tmr); 6020 kvm_apic_update_tmr(vcpu, tmr);
5990} 6021}
5991 6022
6023static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
6024{
6025 ++vcpu->stat.tlb_flush;
6026 kvm_x86_ops->tlb_flush(vcpu);
6027}
6028
6029void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6030{
6031 struct page *page = NULL;
6032
6033 if (!irqchip_in_kernel(vcpu->kvm))
6034 return;
6035
6036 if (!kvm_x86_ops->set_apic_access_page_addr)
6037 return;
6038
6039 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6040 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
6041
6042 /*
6043 * Do not pin apic access page in memory, the MMU notifier
6044 * will call us again if it is migrated or swapped out.
6045 */
6046 put_page(page);
6047}
6048EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
6049
6050void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6051 unsigned long address)
6052{
6053 /*
6054 * The physical address of apic access page is stored in the VMCS.
6055 * Update it when it becomes invalid.
6056 */
6057 if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
6058 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6059}
6060
5992/* 6061/*
5993 * Returns 1 to let __vcpu_run() continue the guest execution loop without 6062 * Returns 1 to let __vcpu_run() continue the guest execution loop without
5994 * exiting to the userspace. Otherwise, the value will be returned to the 6063 * exiting to the userspace. Otherwise, the value will be returned to the
@@ -6018,7 +6087,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6018 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 6087 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
6019 kvm_mmu_sync_roots(vcpu); 6088 kvm_mmu_sync_roots(vcpu);
6020 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 6089 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
6021 kvm_x86_ops->tlb_flush(vcpu); 6090 kvm_vcpu_flush_tlb(vcpu);
6022 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 6091 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
6023 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 6092 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
6024 r = 0; 6093 r = 0;
@@ -6049,6 +6118,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6049 kvm_deliver_pmi(vcpu); 6118 kvm_deliver_pmi(vcpu);
6050 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 6119 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
6051 vcpu_scan_ioapic(vcpu); 6120 vcpu_scan_ioapic(vcpu);
6121 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
6122 kvm_vcpu_reload_apic_access_page(vcpu);
6052 } 6123 }
6053 6124
6054 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { 6125 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -6934,7 +7005,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
6934 kvm_rip_write(vcpu, 0); 7005 kvm_rip_write(vcpu, 0);
6935} 7006}
6936 7007
6937int kvm_arch_hardware_enable(void *garbage) 7008int kvm_arch_hardware_enable(void)
6938{ 7009{
6939 struct kvm *kvm; 7010 struct kvm *kvm;
6940 struct kvm_vcpu *vcpu; 7011 struct kvm_vcpu *vcpu;
@@ -6945,7 +7016,7 @@ int kvm_arch_hardware_enable(void *garbage)
6945 bool stable, backwards_tsc = false; 7016 bool stable, backwards_tsc = false;
6946 7017
6947 kvm_shared_msr_cpu_online(); 7018 kvm_shared_msr_cpu_online();
6948 ret = kvm_x86_ops->hardware_enable(garbage); 7019 ret = kvm_x86_ops->hardware_enable();
6949 if (ret != 0) 7020 if (ret != 0)
6950 return ret; 7021 return ret;
6951 7022
@@ -6954,7 +7025,7 @@ int kvm_arch_hardware_enable(void *garbage)
6954 list_for_each_entry(kvm, &vm_list, vm_list) { 7025 list_for_each_entry(kvm, &vm_list, vm_list) {
6955 kvm_for_each_vcpu(i, vcpu, kvm) { 7026 kvm_for_each_vcpu(i, vcpu, kvm) {
6956 if (!stable && vcpu->cpu == smp_processor_id()) 7027 if (!stable && vcpu->cpu == smp_processor_id())
6957 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); 7028 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6958 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 7029 if (stable && vcpu->arch.last_host_tsc > local_tsc) {
6959 backwards_tsc = true; 7030 backwards_tsc = true;
6960 if (vcpu->arch.last_host_tsc > max_tsc) 7031 if (vcpu->arch.last_host_tsc > max_tsc)
@@ -7008,8 +7079,7 @@ int kvm_arch_hardware_enable(void *garbage)
7008 kvm_for_each_vcpu(i, vcpu, kvm) { 7079 kvm_for_each_vcpu(i, vcpu, kvm) {
7009 vcpu->arch.tsc_offset_adjustment += delta_cyc; 7080 vcpu->arch.tsc_offset_adjustment += delta_cyc;
7010 vcpu->arch.last_host_tsc = local_tsc; 7081 vcpu->arch.last_host_tsc = local_tsc;
7011 set_bit(KVM_REQ_MASTERCLOCK_UPDATE, 7082 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
7012 &vcpu->requests);
7013 } 7083 }
7014 7084
7015 /* 7085 /*
@@ -7026,10 +7096,10 @@ int kvm_arch_hardware_enable(void *garbage)
7026 return 0; 7096 return 0;
7027} 7097}
7028 7098
7029void kvm_arch_hardware_disable(void *garbage) 7099void kvm_arch_hardware_disable(void)
7030{ 7100{
7031 kvm_x86_ops->hardware_disable(garbage); 7101 kvm_x86_ops->hardware_disable();
7032 drop_user_return_notifiers(garbage); 7102 drop_user_return_notifiers();
7033} 7103}
7034 7104
7035int kvm_arch_hardware_setup(void) 7105int kvm_arch_hardware_setup(void)
@@ -7146,6 +7216,11 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
7146 static_key_slow_dec(&kvm_no_apic_vcpu); 7216 static_key_slow_dec(&kvm_no_apic_vcpu);
7147} 7217}
7148 7218
7219void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
7220{
7221 kvm_x86_ops->sched_in(vcpu, cpu);
7222}
7223
7149int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 7224int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7150{ 7225{
7151 if (type) 7226 if (type)
@@ -7237,10 +7312,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
7237 kfree(kvm->arch.vpic); 7312 kfree(kvm->arch.vpic);
7238 kfree(kvm->arch.vioapic); 7313 kfree(kvm->arch.vioapic);
7239 kvm_free_vcpus(kvm); 7314 kvm_free_vcpus(kvm);
7240 if (kvm->arch.apic_access_page)
7241 put_page(kvm->arch.apic_access_page);
7242 if (kvm->arch.ept_identity_pagetable)
7243 put_page(kvm->arch.ept_identity_pagetable);
7244 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 7315 kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
7245} 7316}
7246 7317
@@ -7643,3 +7714,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
7643EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 7714EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
7644EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 7715EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
7645EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 7716EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
7717EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);