diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 226 |
1 files changed, 159 insertions, 67 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0033df32a745..c259814200bd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "kvm_cache_regs.h" | 27 | #include "kvm_cache_regs.h" |
28 | #include "x86.h" | 28 | #include "x86.h" |
29 | #include "cpuid.h" | 29 | #include "cpuid.h" |
30 | #include "assigned-dev.h" | ||
30 | 31 | ||
31 | #include <linux/clocksource.h> | 32 | #include <linux/clocksource.h> |
32 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
@@ -353,6 +354,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | |||
353 | 354 | ||
354 | if (!vcpu->arch.exception.pending) { | 355 | if (!vcpu->arch.exception.pending) { |
355 | queue: | 356 | queue: |
357 | if (has_error && !is_protmode(vcpu)) | ||
358 | has_error = false; | ||
356 | vcpu->arch.exception.pending = true; | 359 | vcpu->arch.exception.pending = true; |
357 | vcpu->arch.exception.has_error_code = has_error; | 360 | vcpu->arch.exception.has_error_code = has_error; |
358 | vcpu->arch.exception.nr = nr; | 361 | vcpu->arch.exception.nr = nr; |
@@ -455,6 +458,16 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) | |||
455 | } | 458 | } |
456 | EXPORT_SYMBOL_GPL(kvm_require_cpl); | 459 | EXPORT_SYMBOL_GPL(kvm_require_cpl); |
457 | 460 | ||
461 | bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) | ||
462 | { | ||
463 | if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
464 | return true; | ||
465 | |||
466 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
467 | return false; | ||
468 | } | ||
469 | EXPORT_SYMBOL_GPL(kvm_require_dr); | ||
470 | |||
458 | /* | 471 | /* |
459 | * This function will be used to read from the physical memory of the currently | 472 | * This function will be used to read from the physical memory of the currently |
460 | * running guest. The difference to kvm_read_guest_page is that this function | 473 | * running guest. The difference to kvm_read_guest_page is that this function |
@@ -656,6 +669,12 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
656 | if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) | 669 | if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) |
657 | return 1; | 670 | return 1; |
658 | 671 | ||
672 | if (xcr0 & XSTATE_AVX512) { | ||
673 | if (!(xcr0 & XSTATE_YMM)) | ||
674 | return 1; | ||
675 | if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512) | ||
676 | return 1; | ||
677 | } | ||
659 | kvm_put_guest_xcr0(vcpu); | 678 | kvm_put_guest_xcr0(vcpu); |
660 | vcpu->arch.xcr0 = xcr0; | 679 | vcpu->arch.xcr0 = xcr0; |
661 | 680 | ||
@@ -732,6 +751,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); | |||
732 | 751 | ||
733 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 752 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
734 | { | 753 | { |
754 | #ifdef CONFIG_X86_64 | ||
755 | cr3 &= ~CR3_PCID_INVD; | ||
756 | #endif | ||
757 | |||
735 | if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { | 758 | if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { |
736 | kvm_mmu_sync_roots(vcpu); | 759 | kvm_mmu_sync_roots(vcpu); |
737 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | 760 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
@@ -811,8 +834,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | |||
811 | vcpu->arch.eff_db[dr] = val; | 834 | vcpu->arch.eff_db[dr] = val; |
812 | break; | 835 | break; |
813 | case 4: | 836 | case 4: |
814 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
815 | return 1; /* #UD */ | ||
816 | /* fall through */ | 837 | /* fall through */ |
817 | case 6: | 838 | case 6: |
818 | if (val & 0xffffffff00000000ULL) | 839 | if (val & 0xffffffff00000000ULL) |
@@ -821,8 +842,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | |||
821 | kvm_update_dr6(vcpu); | 842 | kvm_update_dr6(vcpu); |
822 | break; | 843 | break; |
823 | case 5: | 844 | case 5: |
824 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
825 | return 1; /* #UD */ | ||
826 | /* fall through */ | 845 | /* fall through */ |
827 | default: /* 7 */ | 846 | default: /* 7 */ |
828 | if (val & 0xffffffff00000000ULL) | 847 | if (val & 0xffffffff00000000ULL) |
@@ -837,27 +856,21 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | |||
837 | 856 | ||
838 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | 857 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) |
839 | { | 858 | { |
840 | int res; | 859 | if (__kvm_set_dr(vcpu, dr, val)) { |
841 | |||
842 | res = __kvm_set_dr(vcpu, dr, val); | ||
843 | if (res > 0) | ||
844 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
845 | else if (res < 0) | ||
846 | kvm_inject_gp(vcpu, 0); | 860 | kvm_inject_gp(vcpu, 0); |
847 | 861 | return 1; | |
848 | return res; | 862 | } |
863 | return 0; | ||
849 | } | 864 | } |
850 | EXPORT_SYMBOL_GPL(kvm_set_dr); | 865 | EXPORT_SYMBOL_GPL(kvm_set_dr); |
851 | 866 | ||
852 | static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) | 867 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) |
853 | { | 868 | { |
854 | switch (dr) { | 869 | switch (dr) { |
855 | case 0 ... 3: | 870 | case 0 ... 3: |
856 | *val = vcpu->arch.db[dr]; | 871 | *val = vcpu->arch.db[dr]; |
857 | break; | 872 | break; |
858 | case 4: | 873 | case 4: |
859 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
860 | return 1; | ||
861 | /* fall through */ | 874 | /* fall through */ |
862 | case 6: | 875 | case 6: |
863 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | 876 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) |
@@ -866,23 +879,11 @@ static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) | |||
866 | *val = kvm_x86_ops->get_dr6(vcpu); | 879 | *val = kvm_x86_ops->get_dr6(vcpu); |
867 | break; | 880 | break; |
868 | case 5: | 881 | case 5: |
869 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
870 | return 1; | ||
871 | /* fall through */ | 882 | /* fall through */ |
872 | default: /* 7 */ | 883 | default: /* 7 */ |
873 | *val = vcpu->arch.dr7; | 884 | *val = vcpu->arch.dr7; |
874 | break; | 885 | break; |
875 | } | 886 | } |
876 | |||
877 | return 0; | ||
878 | } | ||
879 | |||
880 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) | ||
881 | { | ||
882 | if (_kvm_get_dr(vcpu, dr, val)) { | ||
883 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
884 | return 1; | ||
885 | } | ||
886 | return 0; | 887 | return 0; |
887 | } | 888 | } |
888 | EXPORT_SYMBOL_GPL(kvm_get_dr); | 889 | EXPORT_SYMBOL_GPL(kvm_get_dr); |
@@ -1237,21 +1238,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) | |||
1237 | { | 1238 | { |
1238 | #ifdef CONFIG_X86_64 | 1239 | #ifdef CONFIG_X86_64 |
1239 | bool vcpus_matched; | 1240 | bool vcpus_matched; |
1240 | bool do_request = false; | ||
1241 | struct kvm_arch *ka = &vcpu->kvm->arch; | 1241 | struct kvm_arch *ka = &vcpu->kvm->arch; |
1242 | struct pvclock_gtod_data *gtod = &pvclock_gtod_data; | 1242 | struct pvclock_gtod_data *gtod = &pvclock_gtod_data; |
1243 | 1243 | ||
1244 | vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == | 1244 | vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == |
1245 | atomic_read(&vcpu->kvm->online_vcpus)); | 1245 | atomic_read(&vcpu->kvm->online_vcpus)); |
1246 | 1246 | ||
1247 | if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) | 1247 | /* |
1248 | if (!ka->use_master_clock) | 1248 | * Once the masterclock is enabled, always perform request in |
1249 | do_request = 1; | 1249 | * order to update it. |
1250 | 1250 | * | |
1251 | if (!vcpus_matched && ka->use_master_clock) | 1251 | * In order to enable masterclock, the host clocksource must be TSC |
1252 | do_request = 1; | 1252 | * and the vcpus need to have matched TSCs. When that happens, |
1253 | 1253 | * perform request to enable masterclock. | |
1254 | if (do_request) | 1254 | */ |
1255 | if (ka->use_master_clock || | ||
1256 | (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched)) | ||
1255 | kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); | 1257 | kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); |
1256 | 1258 | ||
1257 | trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, | 1259 | trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, |
@@ -1637,16 +1639,16 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1637 | vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; | 1639 | vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; |
1638 | vcpu->last_guest_tsc = tsc_timestamp; | 1640 | vcpu->last_guest_tsc = tsc_timestamp; |
1639 | 1641 | ||
1642 | if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, | ||
1643 | &guest_hv_clock, sizeof(guest_hv_clock)))) | ||
1644 | return 0; | ||
1645 | |||
1640 | /* | 1646 | /* |
1641 | * The interface expects us to write an even number signaling that the | 1647 | * The interface expects us to write an even number signaling that the |
1642 | * update is finished. Since the guest won't see the intermediate | 1648 | * update is finished. Since the guest won't see the intermediate |
1643 | * state, we just increase by 2 at the end. | 1649 | * state, we just increase by 2 at the end. |
1644 | */ | 1650 | */ |
1645 | vcpu->hv_clock.version += 2; | 1651 | vcpu->hv_clock.version = guest_hv_clock.version + 2; |
1646 | |||
1647 | if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, | ||
1648 | &guest_hv_clock, sizeof(guest_hv_clock)))) | ||
1649 | return 0; | ||
1650 | 1652 | ||
1651 | /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ | 1653 | /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ |
1652 | pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); | 1654 | pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); |
@@ -1662,6 +1664,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1662 | 1664 | ||
1663 | vcpu->hv_clock.flags = pvclock_flags; | 1665 | vcpu->hv_clock.flags = pvclock_flags; |
1664 | 1666 | ||
1667 | trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); | ||
1668 | |||
1665 | kvm_write_guest_cached(v->kvm, &vcpu->pv_time, | 1669 | kvm_write_guest_cached(v->kvm, &vcpu->pv_time, |
1666 | &vcpu->hv_clock, | 1670 | &vcpu->hv_clock, |
1667 | sizeof(vcpu->hv_clock)); | 1671 | sizeof(vcpu->hv_clock)); |
@@ -2140,7 +2144,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2140 | case MSR_IA32_TSC_ADJUST: | 2144 | case MSR_IA32_TSC_ADJUST: |
2141 | if (guest_cpuid_has_tsc_adjust(vcpu)) { | 2145 | if (guest_cpuid_has_tsc_adjust(vcpu)) { |
2142 | if (!msr_info->host_initiated) { | 2146 | if (!msr_info->host_initiated) { |
2143 | u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; | 2147 | s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; |
2144 | kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); | 2148 | kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); |
2145 | } | 2149 | } |
2146 | vcpu->arch.ia32_tsc_adjust_msr = data; | 2150 | vcpu->arch.ia32_tsc_adjust_msr = data; |
@@ -3106,7 +3110,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, | |||
3106 | unsigned long val; | 3110 | unsigned long val; |
3107 | 3111 | ||
3108 | memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); | 3112 | memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); |
3109 | _kvm_get_dr(vcpu, 6, &val); | 3113 | kvm_get_dr(vcpu, 6, &val); |
3110 | dbgregs->dr6 = val; | 3114 | dbgregs->dr6 = val; |
3111 | dbgregs->dr7 = vcpu->arch.dr7; | 3115 | dbgregs->dr7 = vcpu->arch.dr7; |
3112 | dbgregs->flags = 0; | 3116 | dbgregs->flags = 0; |
@@ -3128,15 +3132,89 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | |||
3128 | return 0; | 3132 | return 0; |
3129 | } | 3133 | } |
3130 | 3134 | ||
3135 | #define XSTATE_COMPACTION_ENABLED (1ULL << 63) | ||
3136 | |||
3137 | static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) | ||
3138 | { | ||
3139 | struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; | ||
3140 | u64 xstate_bv = xsave->xsave_hdr.xstate_bv; | ||
3141 | u64 valid; | ||
3142 | |||
3143 | /* | ||
3144 | * Copy legacy XSAVE area, to avoid complications with CPUID | ||
3145 | * leaves 0 and 1 in the loop below. | ||
3146 | */ | ||
3147 | memcpy(dest, xsave, XSAVE_HDR_OFFSET); | ||
3148 | |||
3149 | /* Set XSTATE_BV */ | ||
3150 | *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; | ||
3151 | |||
3152 | /* | ||
3153 | * Copy each region from the possibly compacted offset to the | ||
3154 | * non-compacted offset. | ||
3155 | */ | ||
3156 | valid = xstate_bv & ~XSTATE_FPSSE; | ||
3157 | while (valid) { | ||
3158 | u64 feature = valid & -valid; | ||
3159 | int index = fls64(feature) - 1; | ||
3160 | void *src = get_xsave_addr(xsave, feature); | ||
3161 | |||
3162 | if (src) { | ||
3163 | u32 size, offset, ecx, edx; | ||
3164 | cpuid_count(XSTATE_CPUID, index, | ||
3165 | &size, &offset, &ecx, &edx); | ||
3166 | memcpy(dest + offset, src, size); | ||
3167 | } | ||
3168 | |||
3169 | valid -= feature; | ||
3170 | } | ||
3171 | } | ||
3172 | |||
3173 | static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) | ||
3174 | { | ||
3175 | struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; | ||
3176 | u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); | ||
3177 | u64 valid; | ||
3178 | |||
3179 | /* | ||
3180 | * Copy legacy XSAVE area, to avoid complications with CPUID | ||
3181 | * leaves 0 and 1 in the loop below. | ||
3182 | */ | ||
3183 | memcpy(xsave, src, XSAVE_HDR_OFFSET); | ||
3184 | |||
3185 | /* Set XSTATE_BV and possibly XCOMP_BV. */ | ||
3186 | xsave->xsave_hdr.xstate_bv = xstate_bv; | ||
3187 | if (cpu_has_xsaves) | ||
3188 | xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; | ||
3189 | |||
3190 | /* | ||
3191 | * Copy each region from the non-compacted offset to the | ||
3192 | * possibly compacted offset. | ||
3193 | */ | ||
3194 | valid = xstate_bv & ~XSTATE_FPSSE; | ||
3195 | while (valid) { | ||
3196 | u64 feature = valid & -valid; | ||
3197 | int index = fls64(feature) - 1; | ||
3198 | void *dest = get_xsave_addr(xsave, feature); | ||
3199 | |||
3200 | if (dest) { | ||
3201 | u32 size, offset, ecx, edx; | ||
3202 | cpuid_count(XSTATE_CPUID, index, | ||
3203 | &size, &offset, &ecx, &edx); | ||
3204 | memcpy(dest, src + offset, size); | ||
3205 | } else | ||
3206 | WARN_ON_ONCE(1); | ||
3207 | |||
3208 | valid -= feature; | ||
3209 | } | ||
3210 | } | ||
3211 | |||
3131 | static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, | 3212 | static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, |
3132 | struct kvm_xsave *guest_xsave) | 3213 | struct kvm_xsave *guest_xsave) |
3133 | { | 3214 | { |
3134 | if (cpu_has_xsave) { | 3215 | if (cpu_has_xsave) { |
3135 | memcpy(guest_xsave->region, | 3216 | memset(guest_xsave, 0, sizeof(struct kvm_xsave)); |
3136 | &vcpu->arch.guest_fpu.state->xsave, | 3217 | fill_xsave((u8 *) guest_xsave->region, vcpu); |
3137 | vcpu->arch.guest_xstate_size); | ||
3138 | *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= | ||
3139 | vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; | ||
3140 | } else { | 3218 | } else { |
3141 | memcpy(guest_xsave->region, | 3219 | memcpy(guest_xsave->region, |
3142 | &vcpu->arch.guest_fpu.state->fxsave, | 3220 | &vcpu->arch.guest_fpu.state->fxsave, |
@@ -3160,8 +3238,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, | |||
3160 | */ | 3238 | */ |
3161 | if (xstate_bv & ~kvm_supported_xcr0()) | 3239 | if (xstate_bv & ~kvm_supported_xcr0()) |
3162 | return -EINVAL; | 3240 | return -EINVAL; |
3163 | memcpy(&vcpu->arch.guest_fpu.state->xsave, | 3241 | load_xsave(vcpu, (u8 *)guest_xsave->region); |
3164 | guest_xsave->region, vcpu->arch.guest_xstate_size); | ||
3165 | } else { | 3242 | } else { |
3166 | if (xstate_bv & ~XSTATE_FPSSE) | 3243 | if (xstate_bv & ~XSTATE_FPSSE) |
3167 | return -EINVAL; | 3244 | return -EINVAL; |
@@ -4004,7 +4081,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
4004 | } | 4081 | } |
4005 | 4082 | ||
4006 | default: | 4083 | default: |
4007 | ; | 4084 | r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); |
4008 | } | 4085 | } |
4009 | out: | 4086 | out: |
4010 | return r; | 4087 | return r; |
@@ -4667,7 +4744,7 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) | |||
4667 | 4744 | ||
4668 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) | 4745 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) |
4669 | { | 4746 | { |
4670 | return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); | 4747 | return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); |
4671 | } | 4748 | } |
4672 | 4749 | ||
4673 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) | 4750 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) |
@@ -5211,21 +5288,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag | |||
5211 | 5288 | ||
5212 | static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) | 5289 | static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) |
5213 | { | 5290 | { |
5214 | struct kvm_run *kvm_run = vcpu->run; | ||
5215 | unsigned long eip = vcpu->arch.emulate_ctxt.eip; | ||
5216 | u32 dr6 = 0; | ||
5217 | |||
5218 | if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && | 5291 | if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && |
5219 | (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { | 5292 | (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { |
5220 | dr6 = kvm_vcpu_check_hw_bp(eip, 0, | 5293 | struct kvm_run *kvm_run = vcpu->run; |
5294 | unsigned long eip = kvm_get_linear_rip(vcpu); | ||
5295 | u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, | ||
5221 | vcpu->arch.guest_debug_dr7, | 5296 | vcpu->arch.guest_debug_dr7, |
5222 | vcpu->arch.eff_db); | 5297 | vcpu->arch.eff_db); |
5223 | 5298 | ||
5224 | if (dr6 != 0) { | 5299 | if (dr6 != 0) { |
5225 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; | 5300 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; |
5226 | kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + | 5301 | kvm_run->debug.arch.pc = eip; |
5227 | get_segment_base(vcpu, VCPU_SREG_CS); | ||
5228 | |||
5229 | kvm_run->debug.arch.exception = DB_VECTOR; | 5302 | kvm_run->debug.arch.exception = DB_VECTOR; |
5230 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 5303 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
5231 | *r = EMULATE_USER_EXIT; | 5304 | *r = EMULATE_USER_EXIT; |
@@ -5235,7 +5308,8 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) | |||
5235 | 5308 | ||
5236 | if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && | 5309 | if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && |
5237 | !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { | 5310 | !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { |
5238 | dr6 = kvm_vcpu_check_hw_bp(eip, 0, | 5311 | unsigned long eip = kvm_get_linear_rip(vcpu); |
5312 | u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, | ||
5239 | vcpu->arch.dr7, | 5313 | vcpu->arch.dr7, |
5240 | vcpu->arch.db); | 5314 | vcpu->arch.db); |
5241 | 5315 | ||
@@ -5365,7 +5439,9 @@ restart: | |||
5365 | kvm_rip_write(vcpu, ctxt->eip); | 5439 | kvm_rip_write(vcpu, ctxt->eip); |
5366 | if (r == EMULATE_DONE) | 5440 | if (r == EMULATE_DONE) |
5367 | kvm_vcpu_check_singlestep(vcpu, rflags, &r); | 5441 | kvm_vcpu_check_singlestep(vcpu, rflags, &r); |
5368 | __kvm_set_rflags(vcpu, ctxt->eflags); | 5442 | if (!ctxt->have_exception || |
5443 | exception_type(ctxt->exception.vector) == EXCPT_TRAP) | ||
5444 | __kvm_set_rflags(vcpu, ctxt->eflags); | ||
5369 | 5445 | ||
5370 | /* | 5446 | /* |
5371 | * For STI, interrupts are shadowed; so KVM_REQ_EVENT will | 5447 | * For STI, interrupts are shadowed; so KVM_REQ_EVENT will |
@@ -5965,6 +6041,12 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) | |||
5965 | __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | | 6041 | __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | |
5966 | X86_EFLAGS_RF); | 6042 | X86_EFLAGS_RF); |
5967 | 6043 | ||
6044 | if (vcpu->arch.exception.nr == DB_VECTOR && | ||
6045 | (vcpu->arch.dr7 & DR7_GD)) { | ||
6046 | vcpu->arch.dr7 &= ~DR7_GD; | ||
6047 | kvm_update_dr7(vcpu); | ||
6048 | } | ||
6049 | |||
5968 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, | 6050 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, |
5969 | vcpu->arch.exception.has_error_code, | 6051 | vcpu->arch.exception.has_error_code, |
5970 | vcpu->arch.exception.error_code, | 6052 | vcpu->arch.exception.error_code, |
@@ -6873,6 +6955,9 @@ int fx_init(struct kvm_vcpu *vcpu) | |||
6873 | return err; | 6955 | return err; |
6874 | 6956 | ||
6875 | fpu_finit(&vcpu->arch.guest_fpu); | 6957 | fpu_finit(&vcpu->arch.guest_fpu); |
6958 | if (cpu_has_xsaves) | ||
6959 | vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv = | ||
6960 | host_xcr0 | XSTATE_COMPACTION_ENABLED; | ||
6876 | 6961 | ||
6877 | /* | 6962 | /* |
6878 | * Ensure guest xcr0 is valid for loading | 6963 | * Ensure guest xcr0 is valid for loading |
@@ -7024,7 +7109,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu) | |||
7024 | kvm_x86_ops->vcpu_reset(vcpu); | 7109 | kvm_x86_ops->vcpu_reset(vcpu); |
7025 | } | 7110 | } |
7026 | 7111 | ||
7027 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) | 7112 | void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) |
7028 | { | 7113 | { |
7029 | struct kvm_segment cs; | 7114 | struct kvm_segment cs; |
7030 | 7115 | ||
@@ -7256,6 +7341,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
7256 | if (type) | 7341 | if (type) |
7257 | return -EINVAL; | 7342 | return -EINVAL; |
7258 | 7343 | ||
7344 | INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); | ||
7259 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); | 7345 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
7260 | INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); | 7346 | INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); |
7261 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 7347 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
@@ -7536,12 +7622,18 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
7536 | return kvm_x86_ops->interrupt_allowed(vcpu); | 7622 | return kvm_x86_ops->interrupt_allowed(vcpu); |
7537 | } | 7623 | } |
7538 | 7624 | ||
7539 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) | 7625 | unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) |
7540 | { | 7626 | { |
7541 | unsigned long current_rip = kvm_rip_read(vcpu) + | 7627 | if (is_64_bit_mode(vcpu)) |
7542 | get_segment_base(vcpu, VCPU_SREG_CS); | 7628 | return kvm_rip_read(vcpu); |
7629 | return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + | ||
7630 | kvm_rip_read(vcpu)); | ||
7631 | } | ||
7632 | EXPORT_SYMBOL_GPL(kvm_get_linear_rip); | ||
7543 | 7633 | ||
7544 | return current_rip == linear_rip; | 7634 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) |
7635 | { | ||
7636 | return kvm_get_linear_rip(vcpu) == linear_rip; | ||
7545 | } | 7637 | } |
7546 | EXPORT_SYMBOL_GPL(kvm_is_linear_rip); | 7638 | EXPORT_SYMBOL_GPL(kvm_is_linear_rip); |
7547 | 7639 | ||