diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 209 |
1 files changed, 127 insertions, 82 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c259814200bd..bd7a70be41b3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -108,6 +108,10 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); | |||
108 | static u32 tsc_tolerance_ppm = 250; | 108 | static u32 tsc_tolerance_ppm = 250; |
109 | module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); | 109 | module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); |
110 | 110 | ||
111 | /* lapic timer advance (tscdeadline mode only) in nanoseconds */ | ||
112 | unsigned int lapic_timer_advance_ns = 0; | ||
113 | module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); | ||
114 | |||
111 | static bool backwards_tsc_observed = false; | 115 | static bool backwards_tsc_observed = false; |
112 | 116 | ||
113 | #define KVM_NR_SHARED_MSRS 16 | 117 | #define KVM_NR_SHARED_MSRS 16 |
@@ -141,6 +145,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
141 | { "irq_window", VCPU_STAT(irq_window_exits) }, | 145 | { "irq_window", VCPU_STAT(irq_window_exits) }, |
142 | { "nmi_window", VCPU_STAT(nmi_window_exits) }, | 146 | { "nmi_window", VCPU_STAT(nmi_window_exits) }, |
143 | { "halt_exits", VCPU_STAT(halt_exits) }, | 147 | { "halt_exits", VCPU_STAT(halt_exits) }, |
148 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, | ||
144 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 149 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
145 | { "hypercalls", VCPU_STAT(hypercalls) }, | 150 | { "hypercalls", VCPU_STAT(hypercalls) }, |
146 | { "request_irq", VCPU_STAT(request_irq_exits) }, | 151 | { "request_irq", VCPU_STAT(request_irq_exits) }, |
@@ -492,7 +497,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |||
492 | } | 497 | } |
493 | EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); | 498 | EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); |
494 | 499 | ||
495 | int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, | 500 | static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, |
496 | void *data, int offset, int len, u32 access) | 501 | void *data, int offset, int len, u32 access) |
497 | { | 502 | { |
498 | return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, | 503 | return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, |
@@ -643,7 +648,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) | |||
643 | } | 648 | } |
644 | } | 649 | } |
645 | 650 | ||
646 | int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | 651 | static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
647 | { | 652 | { |
648 | u64 xcr0 = xcr; | 653 | u64 xcr0 = xcr; |
649 | u64 old_xcr0 = vcpu->arch.xcr0; | 654 | u64 old_xcr0 = vcpu->arch.xcr0; |
@@ -1083,6 +1088,15 @@ static void update_pvclock_gtod(struct timekeeper *tk) | |||
1083 | } | 1088 | } |
1084 | #endif | 1089 | #endif |
1085 | 1090 | ||
1091 | void kvm_set_pending_timer(struct kvm_vcpu *vcpu) | ||
1092 | { | ||
1093 | /* | ||
1094 | * Note: KVM_REQ_PENDING_TIMER is implicitly checked in | ||
1095 | * vcpu_enter_guest. This function is only called from | ||
1096 | * the physical CPU that is running vcpu. | ||
1097 | */ | ||
1098 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | ||
1099 | } | ||
1086 | 1100 | ||
1087 | static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) | 1101 | static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) |
1088 | { | 1102 | { |
@@ -1180,7 +1194,7 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); | |||
1180 | #endif | 1194 | #endif |
1181 | 1195 | ||
1182 | static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); | 1196 | static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); |
1183 | unsigned long max_tsc_khz; | 1197 | static unsigned long max_tsc_khz; |
1184 | 1198 | ||
1185 | static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) | 1199 | static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) |
1186 | { | 1200 | { |
@@ -1234,7 +1248,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) | |||
1234 | return tsc; | 1248 | return tsc; |
1235 | } | 1249 | } |
1236 | 1250 | ||
1237 | void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) | 1251 | static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) |
1238 | { | 1252 | { |
1239 | #ifdef CONFIG_X86_64 | 1253 | #ifdef CONFIG_X86_64 |
1240 | bool vcpus_matched; | 1254 | bool vcpus_matched; |
@@ -1529,7 +1543,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) | |||
1529 | &ka->master_cycle_now); | 1543 | &ka->master_cycle_now); |
1530 | 1544 | ||
1531 | ka->use_master_clock = host_tsc_clocksource && vcpus_matched | 1545 | ka->use_master_clock = host_tsc_clocksource && vcpus_matched |
1532 | && !backwards_tsc_observed; | 1546 | && !backwards_tsc_observed |
1547 | && !ka->boot_vcpu_runs_old_kvmclock; | ||
1533 | 1548 | ||
1534 | if (ka->use_master_clock) | 1549 | if (ka->use_master_clock) |
1535 | atomic_set(&kvm_guest_has_master_clock, 1); | 1550 | atomic_set(&kvm_guest_has_master_clock, 1); |
@@ -2161,8 +2176,20 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2161 | case MSR_KVM_SYSTEM_TIME_NEW: | 2176 | case MSR_KVM_SYSTEM_TIME_NEW: |
2162 | case MSR_KVM_SYSTEM_TIME: { | 2177 | case MSR_KVM_SYSTEM_TIME: { |
2163 | u64 gpa_offset; | 2178 | u64 gpa_offset; |
2179 | struct kvm_arch *ka = &vcpu->kvm->arch; | ||
2180 | |||
2164 | kvmclock_reset(vcpu); | 2181 | kvmclock_reset(vcpu); |
2165 | 2182 | ||
2183 | if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) { | ||
2184 | bool tmp = (msr == MSR_KVM_SYSTEM_TIME); | ||
2185 | |||
2186 | if (ka->boot_vcpu_runs_old_kvmclock != tmp) | ||
2187 | set_bit(KVM_REQ_MASTERCLOCK_UPDATE, | ||
2188 | &vcpu->requests); | ||
2189 | |||
2190 | ka->boot_vcpu_runs_old_kvmclock = tmp; | ||
2191 | } | ||
2192 | |||
2166 | vcpu->arch.time = data; | 2193 | vcpu->arch.time = data; |
2167 | kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); | 2194 | kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); |
2168 | 2195 | ||
@@ -2324,6 +2351,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2324 | { | 2351 | { |
2325 | return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); | 2352 | return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); |
2326 | } | 2353 | } |
2354 | EXPORT_SYMBOL_GPL(kvm_get_msr); | ||
2327 | 2355 | ||
2328 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | 2356 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
2329 | { | 2357 | { |
@@ -2738,6 +2766,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2738 | case KVM_CAP_READONLY_MEM: | 2766 | case KVM_CAP_READONLY_MEM: |
2739 | case KVM_CAP_HYPERV_TIME: | 2767 | case KVM_CAP_HYPERV_TIME: |
2740 | case KVM_CAP_IOAPIC_POLARITY_IGNORED: | 2768 | case KVM_CAP_IOAPIC_POLARITY_IGNORED: |
2769 | case KVM_CAP_TSC_DEADLINE_TIMER: | ||
2741 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT | 2770 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
2742 | case KVM_CAP_ASSIGN_DEV_IRQ: | 2771 | case KVM_CAP_ASSIGN_DEV_IRQ: |
2743 | case KVM_CAP_PCI_2_3: | 2772 | case KVM_CAP_PCI_2_3: |
@@ -2776,9 +2805,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2776 | case KVM_CAP_TSC_CONTROL: | 2805 | case KVM_CAP_TSC_CONTROL: |
2777 | r = kvm_has_tsc_control; | 2806 | r = kvm_has_tsc_control; |
2778 | break; | 2807 | break; |
2779 | case KVM_CAP_TSC_DEADLINE_TIMER: | ||
2780 | r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); | ||
2781 | break; | ||
2782 | default: | 2808 | default: |
2783 | r = 0; | 2809 | r = 0; |
2784 | break; | 2810 | break; |
@@ -3734,83 +3760,43 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, | |||
3734 | * @kvm: kvm instance | 3760 | * @kvm: kvm instance |
3735 | * @log: slot id and address to which we copy the log | 3761 | * @log: slot id and address to which we copy the log |
3736 | * | 3762 | * |
3737 | * We need to keep it in mind that VCPU threads can write to the bitmap | 3763 | * Steps 1-4 below provide general overview of dirty page logging. See |
3738 | * concurrently. So, to avoid losing data, we keep the following order for | 3764 | * kvm_get_dirty_log_protect() function description for additional details. |
3739 | * each bit: | 3765 | * |
3766 | * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we | ||
3767 | * always flush the TLB (step 4) even if previous step failed and the dirty | ||
3768 | * bitmap may be corrupt. Regardless of previous outcome the KVM logging API | ||
3769 | * does not preclude user space subsequent dirty log read. Flushing TLB ensures | ||
3770 | * writes will be marked dirty for next log read. | ||
3740 | * | 3771 | * |
3741 | * 1. Take a snapshot of the bit and clear it if needed. | 3772 | * 1. Take a snapshot of the bit and clear it if needed. |
3742 | * 2. Write protect the corresponding page. | 3773 | * 2. Write protect the corresponding page. |
3743 | * 3. Flush TLB's if needed. | 3774 | * 3. Copy the snapshot to the userspace. |
3744 | * 4. Copy the snapshot to the userspace. | 3775 | * 4. Flush TLB's if needed. |
3745 | * | ||
3746 | * Between 2 and 3, the guest may write to the page using the remaining TLB | ||
3747 | * entry. This is not a problem because the page will be reported dirty at | ||
3748 | * step 4 using the snapshot taken before and step 3 ensures that successive | ||
3749 | * writes will be logged for the next call. | ||
3750 | */ | 3776 | */ |
3751 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 3777 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
3752 | { | 3778 | { |
3753 | int r; | ||
3754 | struct kvm_memory_slot *memslot; | ||
3755 | unsigned long n, i; | ||
3756 | unsigned long *dirty_bitmap; | ||
3757 | unsigned long *dirty_bitmap_buffer; | ||
3758 | bool is_dirty = false; | 3779 | bool is_dirty = false; |
3780 | int r; | ||
3759 | 3781 | ||
3760 | mutex_lock(&kvm->slots_lock); | 3782 | mutex_lock(&kvm->slots_lock); |
3761 | 3783 | ||
3762 | r = -EINVAL; | 3784 | /* |
3763 | if (log->slot >= KVM_USER_MEM_SLOTS) | 3785 | * Flush potentially hardware-cached dirty pages to dirty_bitmap. |
3764 | goto out; | 3786 | */ |
3765 | 3787 | if (kvm_x86_ops->flush_log_dirty) | |
3766 | memslot = id_to_memslot(kvm->memslots, log->slot); | 3788 | kvm_x86_ops->flush_log_dirty(kvm); |
3767 | |||
3768 | dirty_bitmap = memslot->dirty_bitmap; | ||
3769 | r = -ENOENT; | ||
3770 | if (!dirty_bitmap) | ||
3771 | goto out; | ||
3772 | |||
3773 | n = kvm_dirty_bitmap_bytes(memslot); | ||
3774 | |||
3775 | dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); | ||
3776 | memset(dirty_bitmap_buffer, 0, n); | ||
3777 | |||
3778 | spin_lock(&kvm->mmu_lock); | ||
3779 | |||
3780 | for (i = 0; i < n / sizeof(long); i++) { | ||
3781 | unsigned long mask; | ||
3782 | gfn_t offset; | ||
3783 | |||
3784 | if (!dirty_bitmap[i]) | ||
3785 | continue; | ||
3786 | |||
3787 | is_dirty = true; | ||
3788 | |||
3789 | mask = xchg(&dirty_bitmap[i], 0); | ||
3790 | dirty_bitmap_buffer[i] = mask; | ||
3791 | |||
3792 | offset = i * BITS_PER_LONG; | ||
3793 | kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); | ||
3794 | } | ||
3795 | |||
3796 | spin_unlock(&kvm->mmu_lock); | ||
3797 | 3789 | ||
3798 | /* See the comments in kvm_mmu_slot_remove_write_access(). */ | 3790 | r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); |
3799 | lockdep_assert_held(&kvm->slots_lock); | ||
3800 | 3791 | ||
3801 | /* | 3792 | /* |
3802 | * All the TLBs can be flushed out of mmu lock, see the comments in | 3793 | * All the TLBs can be flushed out of mmu lock, see the comments in |
3803 | * kvm_mmu_slot_remove_write_access(). | 3794 | * kvm_mmu_slot_remove_write_access(). |
3804 | */ | 3795 | */ |
3796 | lockdep_assert_held(&kvm->slots_lock); | ||
3805 | if (is_dirty) | 3797 | if (is_dirty) |
3806 | kvm_flush_remote_tlbs(kvm); | 3798 | kvm_flush_remote_tlbs(kvm); |
3807 | 3799 | ||
3808 | r = -EFAULT; | ||
3809 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) | ||
3810 | goto out; | ||
3811 | |||
3812 | r = 0; | ||
3813 | out: | ||
3814 | mutex_unlock(&kvm->slots_lock); | 3800 | mutex_unlock(&kvm->slots_lock); |
3815 | return r; | 3801 | return r; |
3816 | } | 3802 | } |
@@ -4516,6 +4502,8 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, | |||
4516 | if (rc != X86EMUL_CONTINUE) | 4502 | if (rc != X86EMUL_CONTINUE) |
4517 | return rc; | 4503 | return rc; |
4518 | addr += now; | 4504 | addr += now; |
4505 | if (ctxt->mode != X86EMUL_MODE_PROT64) | ||
4506 | addr = (u32)addr; | ||
4519 | val += now; | 4507 | val += now; |
4520 | bytes -= now; | 4508 | bytes -= now; |
4521 | } | 4509 | } |
@@ -4984,6 +4972,11 @@ static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulon | |||
4984 | kvm_register_write(emul_to_vcpu(ctxt), reg, val); | 4972 | kvm_register_write(emul_to_vcpu(ctxt), reg, val); |
4985 | } | 4973 | } |
4986 | 4974 | ||
4975 | static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) | ||
4976 | { | ||
4977 | kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); | ||
4978 | } | ||
4979 | |||
4987 | static const struct x86_emulate_ops emulate_ops = { | 4980 | static const struct x86_emulate_ops emulate_ops = { |
4988 | .read_gpr = emulator_read_gpr, | 4981 | .read_gpr = emulator_read_gpr, |
4989 | .write_gpr = emulator_write_gpr, | 4982 | .write_gpr = emulator_write_gpr, |
@@ -5019,6 +5012,7 @@ static const struct x86_emulate_ops emulate_ops = { | |||
5019 | .put_fpu = emulator_put_fpu, | 5012 | .put_fpu = emulator_put_fpu, |
5020 | .intercept = emulator_intercept, | 5013 | .intercept = emulator_intercept, |
5021 | .get_cpuid = emulator_get_cpuid, | 5014 | .get_cpuid = emulator_get_cpuid, |
5015 | .set_nmi_mask = emulator_set_nmi_mask, | ||
5022 | }; | 5016 | }; |
5023 | 5017 | ||
5024 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) | 5018 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) |
@@ -6311,6 +6305,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
6311 | } | 6305 | } |
6312 | 6306 | ||
6313 | trace_kvm_entry(vcpu->vcpu_id); | 6307 | trace_kvm_entry(vcpu->vcpu_id); |
6308 | wait_lapic_expire(vcpu); | ||
6314 | kvm_x86_ops->run(vcpu); | 6309 | kvm_x86_ops->run(vcpu); |
6315 | 6310 | ||
6316 | /* | 6311 | /* |
@@ -7041,15 +7036,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
7041 | return r; | 7036 | return r; |
7042 | } | 7037 | } |
7043 | 7038 | ||
7044 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | 7039 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
7045 | { | 7040 | { |
7046 | int r; | ||
7047 | struct msr_data msr; | 7041 | struct msr_data msr; |
7048 | struct kvm *kvm = vcpu->kvm; | 7042 | struct kvm *kvm = vcpu->kvm; |
7049 | 7043 | ||
7050 | r = vcpu_load(vcpu); | 7044 | if (vcpu_load(vcpu)) |
7051 | if (r) | 7045 | return; |
7052 | return r; | ||
7053 | msr.data = 0x0; | 7046 | msr.data = 0x0; |
7054 | msr.index = MSR_IA32_TSC; | 7047 | msr.index = MSR_IA32_TSC; |
7055 | msr.host_initiated = true; | 7048 | msr.host_initiated = true; |
@@ -7058,8 +7051,6 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | |||
7058 | 7051 | ||
7059 | schedule_delayed_work(&kvm->arch.kvmclock_sync_work, | 7052 | schedule_delayed_work(&kvm->arch.kvmclock_sync_work, |
7060 | KVMCLOCK_SYNC_PERIOD); | 7053 | KVMCLOCK_SYNC_PERIOD); |
7061 | |||
7062 | return r; | ||
7063 | } | 7054 | } |
7064 | 7055 | ||
7065 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 7056 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
@@ -7549,12 +7540,62 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
7549 | return 0; | 7540 | return 0; |
7550 | } | 7541 | } |
7551 | 7542 | ||
7543 | static void kvm_mmu_slot_apply_flags(struct kvm *kvm, | ||
7544 | struct kvm_memory_slot *new) | ||
7545 | { | ||
7546 | /* Still write protect RO slot */ | ||
7547 | if (new->flags & KVM_MEM_READONLY) { | ||
7548 | kvm_mmu_slot_remove_write_access(kvm, new); | ||
7549 | return; | ||
7550 | } | ||
7551 | |||
7552 | /* | ||
7553 | * Call kvm_x86_ops dirty logging hooks when they are valid. | ||
7554 | * | ||
7555 | * kvm_x86_ops->slot_disable_log_dirty is called when: | ||
7556 | * | ||
7557 | * - KVM_MR_CREATE with dirty logging is disabled | ||
7558 | * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag | ||
7559 | * | ||
7560 | * The reason is, in case of PML, we need to set D-bit for any slots | ||
7561 | * with dirty logging disabled in order to eliminate unnecessary GPA | ||
7562 | * logging in PML buffer (and potential PML buffer full VMEXT). This | ||
7563 | * guarantees leaving PML enabled during guest's lifetime won't have | ||
7564 | * any additonal overhead from PML when guest is running with dirty | ||
7565 | * logging disabled for memory slots. | ||
7566 | * | ||
7567 | * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot | ||
7568 | * to dirty logging mode. | ||
7569 | * | ||
7570 | * If kvm_x86_ops dirty logging hooks are invalid, use write protect. | ||
7571 | * | ||
7572 | * In case of write protect: | ||
7573 | * | ||
7574 | * Write protect all pages for dirty logging. | ||
7575 | * | ||
7576 | * All the sptes including the large sptes which point to this | ||
7577 | * slot are set to readonly. We can not create any new large | ||
7578 | * spte on this slot until the end of the logging. | ||
7579 | * | ||
7580 | * See the comments in fast_page_fault(). | ||
7581 | */ | ||
7582 | if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { | ||
7583 | if (kvm_x86_ops->slot_enable_log_dirty) | ||
7584 | kvm_x86_ops->slot_enable_log_dirty(kvm, new); | ||
7585 | else | ||
7586 | kvm_mmu_slot_remove_write_access(kvm, new); | ||
7587 | } else { | ||
7588 | if (kvm_x86_ops->slot_disable_log_dirty) | ||
7589 | kvm_x86_ops->slot_disable_log_dirty(kvm, new); | ||
7590 | } | ||
7591 | } | ||
7592 | |||
7552 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 7593 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
7553 | struct kvm_userspace_memory_region *mem, | 7594 | struct kvm_userspace_memory_region *mem, |
7554 | const struct kvm_memory_slot *old, | 7595 | const struct kvm_memory_slot *old, |
7555 | enum kvm_mr_change change) | 7596 | enum kvm_mr_change change) |
7556 | { | 7597 | { |
7557 | 7598 | struct kvm_memory_slot *new; | |
7558 | int nr_mmu_pages = 0; | 7599 | int nr_mmu_pages = 0; |
7559 | 7600 | ||
7560 | if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { | 7601 | if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { |
@@ -7573,17 +7614,20 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
7573 | 7614 | ||
7574 | if (nr_mmu_pages) | 7615 | if (nr_mmu_pages) |
7575 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | 7616 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); |
7617 | |||
7618 | /* It's OK to get 'new' slot here as it has already been installed */ | ||
7619 | new = id_to_memslot(kvm->memslots, mem->slot); | ||
7620 | |||
7576 | /* | 7621 | /* |
7577 | * Write protect all pages for dirty logging. | 7622 | * Set up write protection and/or dirty logging for the new slot. |
7578 | * | 7623 | * |
7579 | * All the sptes including the large sptes which point to this | 7624 | * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have |
7580 | * slot are set to readonly. We can not create any new large | 7625 | * been zapped so no dirty logging staff is needed for old slot. For |
7581 | * spte on this slot until the end of the logging. | 7626 | * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the |
7582 | * | 7627 | * new and it's also covered when dealing with the new slot. |
7583 | * See the comments in fast_page_fault(). | ||
7584 | */ | 7628 | */ |
7585 | if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) | 7629 | if (change != KVM_MR_DELETE) |
7586 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | 7630 | kvm_mmu_slot_apply_flags(kvm, new); |
7587 | } | 7631 | } |
7588 | 7632 | ||
7589 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | 7633 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
@@ -7837,3 +7881,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); | |||
7837 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); | 7881 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); |
7838 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); | 7882 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); |
7839 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window); | 7883 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window); |
7884 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); | ||