diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 157 |
1 files changed, 110 insertions, 47 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 32bf19ef3115..2b2dd030ea3b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -801,6 +801,17 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) | |||
801 | } | 801 | } |
802 | EXPORT_SYMBOL_GPL(kvm_get_cr8); | 802 | EXPORT_SYMBOL_GPL(kvm_get_cr8); |
803 | 803 | ||
804 | static void kvm_update_dr0123(struct kvm_vcpu *vcpu) | ||
805 | { | ||
806 | int i; | ||
807 | |||
808 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | ||
809 | for (i = 0; i < KVM_NR_DB_REGS; i++) | ||
810 | vcpu->arch.eff_db[i] = vcpu->arch.db[i]; | ||
811 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; | ||
812 | } | ||
813 | } | ||
814 | |||
804 | static void kvm_update_dr6(struct kvm_vcpu *vcpu) | 815 | static void kvm_update_dr6(struct kvm_vcpu *vcpu) |
805 | { | 816 | { |
806 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | 817 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) |
@@ -3149,6 +3160,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | |||
3149 | return -EINVAL; | 3160 | return -EINVAL; |
3150 | 3161 | ||
3151 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); | 3162 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); |
3163 | kvm_update_dr0123(vcpu); | ||
3152 | vcpu->arch.dr6 = dbgregs->dr6; | 3164 | vcpu->arch.dr6 = dbgregs->dr6; |
3153 | kvm_update_dr6(vcpu); | 3165 | kvm_update_dr6(vcpu); |
3154 | vcpu->arch.dr7 = dbgregs->dr7; | 3166 | vcpu->arch.dr7 = dbgregs->dr7; |
@@ -4114,8 +4126,8 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, | |||
4114 | do { | 4126 | do { |
4115 | n = min(len, 8); | 4127 | n = min(len, 8); |
4116 | if (!(vcpu->arch.apic && | 4128 | if (!(vcpu->arch.apic && |
4117 | !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) | 4129 | !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) |
4118 | && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) | 4130 | && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) |
4119 | break; | 4131 | break; |
4120 | handled += n; | 4132 | handled += n; |
4121 | addr += n; | 4133 | addr += n; |
@@ -4134,8 +4146,9 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) | |||
4134 | do { | 4146 | do { |
4135 | n = min(len, 8); | 4147 | n = min(len, 8); |
4136 | if (!(vcpu->arch.apic && | 4148 | if (!(vcpu->arch.apic && |
4137 | !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) | 4149 | !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, |
4138 | && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) | 4150 | addr, n, v)) |
4151 | && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) | ||
4139 | break; | 4152 | break; |
4140 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); | 4153 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); |
4141 | handled += n; | 4154 | handled += n; |
@@ -4475,7 +4488,8 @@ mmio: | |||
4475 | return X86EMUL_CONTINUE; | 4488 | return X86EMUL_CONTINUE; |
4476 | } | 4489 | } |
4477 | 4490 | ||
4478 | int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, | 4491 | static int emulator_read_write(struct x86_emulate_ctxt *ctxt, |
4492 | unsigned long addr, | ||
4479 | void *val, unsigned int bytes, | 4493 | void *val, unsigned int bytes, |
4480 | struct x86_exception *exception, | 4494 | struct x86_exception *exception, |
4481 | const struct read_write_emulator_ops *ops) | 4495 | const struct read_write_emulator_ops *ops) |
@@ -4538,7 +4552,7 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, | |||
4538 | exception, &read_emultor); | 4552 | exception, &read_emultor); |
4539 | } | 4553 | } |
4540 | 4554 | ||
4541 | int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, | 4555 | static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, |
4542 | unsigned long addr, | 4556 | unsigned long addr, |
4543 | const void *val, | 4557 | const void *val, |
4544 | unsigned int bytes, | 4558 | unsigned int bytes, |
@@ -4629,10 +4643,10 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) | |||
4629 | int r; | 4643 | int r; |
4630 | 4644 | ||
4631 | if (vcpu->arch.pio.in) | 4645 | if (vcpu->arch.pio.in) |
4632 | r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, | 4646 | r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, |
4633 | vcpu->arch.pio.size, pd); | 4647 | vcpu->arch.pio.size, pd); |
4634 | else | 4648 | else |
4635 | r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, | 4649 | r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, |
4636 | vcpu->arch.pio.port, vcpu->arch.pio.size, | 4650 | vcpu->arch.pio.port, vcpu->arch.pio.size, |
4637 | pd); | 4651 | pd); |
4638 | return r; | 4652 | return r; |
@@ -4705,7 +4719,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) | |||
4705 | kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); | 4719 | kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); |
4706 | } | 4720 | } |
4707 | 4721 | ||
4708 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | 4722 | int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) |
4709 | { | 4723 | { |
4710 | if (!need_emulate_wbinvd(vcpu)) | 4724 | if (!need_emulate_wbinvd(vcpu)) |
4711 | return X86EMUL_CONTINUE; | 4725 | return X86EMUL_CONTINUE; |
@@ -4722,19 +4736,29 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | |||
4722 | wbinvd(); | 4736 | wbinvd(); |
4723 | return X86EMUL_CONTINUE; | 4737 | return X86EMUL_CONTINUE; |
4724 | } | 4738 | } |
4739 | |||
4740 | int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | ||
4741 | { | ||
4742 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
4743 | return kvm_emulate_wbinvd_noskip(vcpu); | ||
4744 | } | ||
4725 | EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); | 4745 | EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); |
4726 | 4746 | ||
4747 | |||
4748 | |||
4727 | static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) | 4749 | static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) |
4728 | { | 4750 | { |
4729 | kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); | 4751 | kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); |
4730 | } | 4752 | } |
4731 | 4753 | ||
4732 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) | 4754 | static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, |
4755 | unsigned long *dest) | ||
4733 | { | 4756 | { |
4734 | return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); | 4757 | return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); |
4735 | } | 4758 | } |
4736 | 4759 | ||
4737 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) | 4760 | static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, |
4761 | unsigned long value) | ||
4738 | { | 4762 | { |
4739 | 4763 | ||
4740 | return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); | 4764 | return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); |
@@ -5816,7 +5840,7 @@ void kvm_arch_exit(void) | |||
5816 | free_percpu(shared_msrs); | 5840 | free_percpu(shared_msrs); |
5817 | } | 5841 | } |
5818 | 5842 | ||
5819 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | 5843 | int kvm_vcpu_halt(struct kvm_vcpu *vcpu) |
5820 | { | 5844 | { |
5821 | ++vcpu->stat.halt_exits; | 5845 | ++vcpu->stat.halt_exits; |
5822 | if (irqchip_in_kernel(vcpu->kvm)) { | 5846 | if (irqchip_in_kernel(vcpu->kvm)) { |
@@ -5827,6 +5851,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
5827 | return 0; | 5851 | return 0; |
5828 | } | 5852 | } |
5829 | } | 5853 | } |
5854 | EXPORT_SYMBOL_GPL(kvm_vcpu_halt); | ||
5855 | |||
5856 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | ||
5857 | { | ||
5858 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
5859 | return kvm_vcpu_halt(vcpu); | ||
5860 | } | ||
5830 | EXPORT_SYMBOL_GPL(kvm_emulate_halt); | 5861 | EXPORT_SYMBOL_GPL(kvm_emulate_halt); |
5831 | 5862 | ||
5832 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | 5863 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) |
@@ -5903,7 +5934,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) | |||
5903 | lapic_irq.dest_id = apicid; | 5934 | lapic_irq.dest_id = apicid; |
5904 | 5935 | ||
5905 | lapic_irq.delivery_mode = APIC_DM_REMRD; | 5936 | lapic_irq.delivery_mode = APIC_DM_REMRD; |
5906 | kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); | 5937 | kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); |
5907 | } | 5938 | } |
5908 | 5939 | ||
5909 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | 5940 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) |
@@ -5911,6 +5942,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
5911 | unsigned long nr, a0, a1, a2, a3, ret; | 5942 | unsigned long nr, a0, a1, a2, a3, ret; |
5912 | int op_64_bit, r = 1; | 5943 | int op_64_bit, r = 1; |
5913 | 5944 | ||
5945 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
5946 | |||
5914 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) | 5947 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) |
5915 | return kvm_hv_hypercall(vcpu); | 5948 | return kvm_hv_hypercall(vcpu); |
5916 | 5949 | ||
@@ -6164,7 +6197,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | |||
6164 | } | 6197 | } |
6165 | 6198 | ||
6166 | /* | 6199 | /* |
6167 | * Returns 1 to let __vcpu_run() continue the guest execution loop without | 6200 | * Returns 1 to let vcpu_run() continue the guest execution loop without |
6168 | * exiting to the userspace. Otherwise, the value will be returned to the | 6201 | * exiting to the userspace. Otherwise, the value will be returned to the |
6169 | * userspace. | 6202 | * userspace. |
6170 | */ | 6203 | */ |
@@ -6301,6 +6334,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
6301 | set_debugreg(vcpu->arch.eff_db[2], 2); | 6334 | set_debugreg(vcpu->arch.eff_db[2], 2); |
6302 | set_debugreg(vcpu->arch.eff_db[3], 3); | 6335 | set_debugreg(vcpu->arch.eff_db[3], 3); |
6303 | set_debugreg(vcpu->arch.dr6, 6); | 6336 | set_debugreg(vcpu->arch.dr6, 6); |
6337 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; | ||
6304 | } | 6338 | } |
6305 | 6339 | ||
6306 | trace_kvm_entry(vcpu->vcpu_id); | 6340 | trace_kvm_entry(vcpu->vcpu_id); |
@@ -6382,42 +6416,47 @@ out: | |||
6382 | return r; | 6416 | return r; |
6383 | } | 6417 | } |
6384 | 6418 | ||
6419 | static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) | ||
6420 | { | ||
6421 | if (!kvm_arch_vcpu_runnable(vcpu)) { | ||
6422 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | ||
6423 | kvm_vcpu_block(vcpu); | ||
6424 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | ||
6425 | if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) | ||
6426 | return 1; | ||
6427 | } | ||
6428 | |||
6429 | kvm_apic_accept_events(vcpu); | ||
6430 | switch(vcpu->arch.mp_state) { | ||
6431 | case KVM_MP_STATE_HALTED: | ||
6432 | vcpu->arch.pv.pv_unhalted = false; | ||
6433 | vcpu->arch.mp_state = | ||
6434 | KVM_MP_STATE_RUNNABLE; | ||
6435 | case KVM_MP_STATE_RUNNABLE: | ||
6436 | vcpu->arch.apf.halted = false; | ||
6437 | break; | ||
6438 | case KVM_MP_STATE_INIT_RECEIVED: | ||
6439 | break; | ||
6440 | default: | ||
6441 | return -EINTR; | ||
6442 | break; | ||
6443 | } | ||
6444 | return 1; | ||
6445 | } | ||
6385 | 6446 | ||
6386 | static int __vcpu_run(struct kvm_vcpu *vcpu) | 6447 | static int vcpu_run(struct kvm_vcpu *vcpu) |
6387 | { | 6448 | { |
6388 | int r; | 6449 | int r; |
6389 | struct kvm *kvm = vcpu->kvm; | 6450 | struct kvm *kvm = vcpu->kvm; |
6390 | 6451 | ||
6391 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | 6452 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
6392 | 6453 | ||
6393 | r = 1; | 6454 | for (;;) { |
6394 | while (r > 0) { | ||
6395 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && | 6455 | if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && |
6396 | !vcpu->arch.apf.halted) | 6456 | !vcpu->arch.apf.halted) |
6397 | r = vcpu_enter_guest(vcpu); | 6457 | r = vcpu_enter_guest(vcpu); |
6398 | else { | 6458 | else |
6399 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | 6459 | r = vcpu_block(kvm, vcpu); |
6400 | kvm_vcpu_block(vcpu); | ||
6401 | vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); | ||
6402 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | ||
6403 | kvm_apic_accept_events(vcpu); | ||
6404 | switch(vcpu->arch.mp_state) { | ||
6405 | case KVM_MP_STATE_HALTED: | ||
6406 | vcpu->arch.pv.pv_unhalted = false; | ||
6407 | vcpu->arch.mp_state = | ||
6408 | KVM_MP_STATE_RUNNABLE; | ||
6409 | case KVM_MP_STATE_RUNNABLE: | ||
6410 | vcpu->arch.apf.halted = false; | ||
6411 | break; | ||
6412 | case KVM_MP_STATE_INIT_RECEIVED: | ||
6413 | break; | ||
6414 | default: | ||
6415 | r = -EINTR; | ||
6416 | break; | ||
6417 | } | ||
6418 | } | ||
6419 | } | ||
6420 | |||
6421 | if (r <= 0) | 6460 | if (r <= 0) |
6422 | break; | 6461 | break; |
6423 | 6462 | ||
@@ -6429,6 +6468,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6429 | r = -EINTR; | 6468 | r = -EINTR; |
6430 | vcpu->run->exit_reason = KVM_EXIT_INTR; | 6469 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
6431 | ++vcpu->stat.request_irq_exits; | 6470 | ++vcpu->stat.request_irq_exits; |
6471 | break; | ||
6432 | } | 6472 | } |
6433 | 6473 | ||
6434 | kvm_check_async_pf_completion(vcpu); | 6474 | kvm_check_async_pf_completion(vcpu); |
@@ -6437,6 +6477,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
6437 | r = -EINTR; | 6477 | r = -EINTR; |
6438 | vcpu->run->exit_reason = KVM_EXIT_INTR; | 6478 | vcpu->run->exit_reason = KVM_EXIT_INTR; |
6439 | ++vcpu->stat.signal_exits; | 6479 | ++vcpu->stat.signal_exits; |
6480 | break; | ||
6440 | } | 6481 | } |
6441 | if (need_resched()) { | 6482 | if (need_resched()) { |
6442 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); | 6483 | srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); |
@@ -6568,7 +6609,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
6568 | } else | 6609 | } else |
6569 | WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); | 6610 | WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); |
6570 | 6611 | ||
6571 | r = __vcpu_run(vcpu); | 6612 | r = vcpu_run(vcpu); |
6572 | 6613 | ||
6573 | out: | 6614 | out: |
6574 | post_kvm_run_save(vcpu); | 6615 | post_kvm_run_save(vcpu); |
@@ -7075,11 +7116,14 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu) | |||
7075 | kvm_clear_exception_queue(vcpu); | 7116 | kvm_clear_exception_queue(vcpu); |
7076 | 7117 | ||
7077 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); | 7118 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); |
7119 | kvm_update_dr0123(vcpu); | ||
7078 | vcpu->arch.dr6 = DR6_INIT; | 7120 | vcpu->arch.dr6 = DR6_INIT; |
7079 | kvm_update_dr6(vcpu); | 7121 | kvm_update_dr6(vcpu); |
7080 | vcpu->arch.dr7 = DR7_FIXED_1; | 7122 | vcpu->arch.dr7 = DR7_FIXED_1; |
7081 | kvm_update_dr7(vcpu); | 7123 | kvm_update_dr7(vcpu); |
7082 | 7124 | ||
7125 | vcpu->arch.cr2 = 0; | ||
7126 | |||
7083 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 7127 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
7084 | vcpu->arch.apf.msr_val = 0; | 7128 | vcpu->arch.apf.msr_val = 0; |
7085 | vcpu->arch.st.msr_val = 0; | 7129 | vcpu->arch.st.msr_val = 0; |
@@ -7240,7 +7284,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
7240 | 7284 | ||
7241 | vcpu->arch.pv.pv_unhalted = false; | 7285 | vcpu->arch.pv.pv_unhalted = false; |
7242 | vcpu->arch.emulate_ctxt.ops = &emulate_ops; | 7286 | vcpu->arch.emulate_ctxt.ops = &emulate_ops; |
7243 | if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) | 7287 | if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) |
7244 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 7288 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
7245 | else | 7289 | else |
7246 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; | 7290 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; |
@@ -7288,6 +7332,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
7288 | vcpu->arch.guest_supported_xcr0 = 0; | 7332 | vcpu->arch.guest_supported_xcr0 = 0; |
7289 | vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; | 7333 | vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
7290 | 7334 | ||
7335 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); | ||
7336 | |||
7291 | kvm_async_pf_hash_reset(vcpu); | 7337 | kvm_async_pf_hash_reset(vcpu); |
7292 | kvm_pmu_init(vcpu); | 7338 | kvm_pmu_init(vcpu); |
7293 | 7339 | ||
@@ -7428,7 +7474,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |||
7428 | 7474 | ||
7429 | for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { | 7475 | for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { |
7430 | if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { | 7476 | if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { |
7431 | kvm_kvfree(free->arch.rmap[i]); | 7477 | kvfree(free->arch.rmap[i]); |
7432 | free->arch.rmap[i] = NULL; | 7478 | free->arch.rmap[i] = NULL; |
7433 | } | 7479 | } |
7434 | if (i == 0) | 7480 | if (i == 0) |
@@ -7436,7 +7482,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |||
7436 | 7482 | ||
7437 | if (!dont || free->arch.lpage_info[i - 1] != | 7483 | if (!dont || free->arch.lpage_info[i - 1] != |
7438 | dont->arch.lpage_info[i - 1]) { | 7484 | dont->arch.lpage_info[i - 1]) { |
7439 | kvm_kvfree(free->arch.lpage_info[i - 1]); | 7485 | kvfree(free->arch.lpage_info[i - 1]); |
7440 | free->arch.lpage_info[i - 1] = NULL; | 7486 | free->arch.lpage_info[i - 1] = NULL; |
7441 | } | 7487 | } |
7442 | } | 7488 | } |
@@ -7490,12 +7536,12 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | |||
7490 | 7536 | ||
7491 | out_free: | 7537 | out_free: |
7492 | for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { | 7538 | for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { |
7493 | kvm_kvfree(slot->arch.rmap[i]); | 7539 | kvfree(slot->arch.rmap[i]); |
7494 | slot->arch.rmap[i] = NULL; | 7540 | slot->arch.rmap[i] = NULL; |
7495 | if (i == 0) | 7541 | if (i == 0) |
7496 | continue; | 7542 | continue; |
7497 | 7543 | ||
7498 | kvm_kvfree(slot->arch.lpage_info[i - 1]); | 7544 | kvfree(slot->arch.lpage_info[i - 1]); |
7499 | slot->arch.lpage_info[i - 1] = NULL; | 7545 | slot->arch.lpage_info[i - 1] = NULL; |
7500 | } | 7546 | } |
7501 | return -ENOMEM; | 7547 | return -ENOMEM; |
@@ -7618,6 +7664,23 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
7618 | new = id_to_memslot(kvm->memslots, mem->slot); | 7664 | new = id_to_memslot(kvm->memslots, mem->slot); |
7619 | 7665 | ||
7620 | /* | 7666 | /* |
7667 | * Dirty logging tracks sptes in 4k granularity, meaning that large | ||
7668 | * sptes have to be split. If live migration is successful, the guest | ||
7669 | * in the source machine will be destroyed and large sptes will be | ||
7670 | * created in the destination. However, if the guest continues to run | ||
7671 | * in the source machine (for example if live migration fails), small | ||
7672 | * sptes will remain around and cause bad performance. | ||
7673 | * | ||
7674 | * Scan sptes if dirty logging has been stopped, dropping those | ||
7675 | * which can be collapsed into a single large-page spte. Later | ||
7676 | * page faults will create the large-page sptes. | ||
7677 | */ | ||
7678 | if ((change != KVM_MR_DELETE) && | ||
7679 | (old->flags & KVM_MEM_LOG_DIRTY_PAGES) && | ||
7680 | !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) | ||
7681 | kvm_mmu_zap_collapsible_sptes(kvm, new); | ||
7682 | |||
7683 | /* | ||
7621 | * Set up write protection and/or dirty logging for the new slot. | 7684 | * Set up write protection and/or dirty logging for the new slot. |
7622 | * | 7685 | * |
7623 | * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have | 7686 | * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have |