diff options
-rw-r--r-- | arch/x86/kvm/x86.c | 90 |
1 files changed, 83 insertions, 7 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 08b5657e57ed..c259814200bd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3132,15 +3132,89 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | |||
3132 | return 0; | 3132 | return 0; |
3133 | } | 3133 | } |
3134 | 3134 | ||
3135 | #define XSTATE_COMPACTION_ENABLED (1ULL << 63) | ||
3136 | |||
3137 | static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) | ||
3138 | { | ||
3139 | struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; | ||
3140 | u64 xstate_bv = xsave->xsave_hdr.xstate_bv; | ||
3141 | u64 valid; | ||
3142 | |||
3143 | /* | ||
3144 | * Copy legacy XSAVE area, to avoid complications with CPUID | ||
3145 | * leaves 0 and 1 in the loop below. | ||
3146 | */ | ||
3147 | memcpy(dest, xsave, XSAVE_HDR_OFFSET); | ||
3148 | |||
3149 | /* Set XSTATE_BV */ | ||
3150 | *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; | ||
3151 | |||
3152 | /* | ||
3153 | * Copy each region from the possibly compacted offset to the | ||
3154 | * non-compacted offset. | ||
3155 | */ | ||
3156 | valid = xstate_bv & ~XSTATE_FPSSE; | ||
3157 | while (valid) { | ||
3158 | u64 feature = valid & -valid; | ||
3159 | int index = fls64(feature) - 1; | ||
3160 | void *src = get_xsave_addr(xsave, feature); | ||
3161 | |||
3162 | if (src) { | ||
3163 | u32 size, offset, ecx, edx; | ||
3164 | cpuid_count(XSTATE_CPUID, index, | ||
3165 | &size, &offset, &ecx, &edx); | ||
3166 | memcpy(dest + offset, src, size); | ||
3167 | } | ||
3168 | |||
3169 | valid -= feature; | ||
3170 | } | ||
3171 | } | ||
3172 | |||
3173 | static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) | ||
3174 | { | ||
3175 | struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; | ||
3176 | u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); | ||
3177 | u64 valid; | ||
3178 | |||
3179 | /* | ||
3180 | * Copy legacy XSAVE area, to avoid complications with CPUID | ||
3181 | * leaves 0 and 1 in the loop below. | ||
3182 | */ | ||
3183 | memcpy(xsave, src, XSAVE_HDR_OFFSET); | ||
3184 | |||
3185 | /* Set XSTATE_BV and possibly XCOMP_BV. */ | ||
3186 | xsave->xsave_hdr.xstate_bv = xstate_bv; | ||
3187 | if (cpu_has_xsaves) | ||
3188 | xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; | ||
3189 | |||
3190 | /* | ||
3191 | * Copy each region from the non-compacted offset to the | ||
3192 | * possibly compacted offset. | ||
3193 | */ | ||
3194 | valid = xstate_bv & ~XSTATE_FPSSE; | ||
3195 | while (valid) { | ||
3196 | u64 feature = valid & -valid; | ||
3197 | int index = fls64(feature) - 1; | ||
3198 | void *dest = get_xsave_addr(xsave, feature); | ||
3199 | |||
3200 | if (dest) { | ||
3201 | u32 size, offset, ecx, edx; | ||
3202 | cpuid_count(XSTATE_CPUID, index, | ||
3203 | &size, &offset, &ecx, &edx); | ||
3204 | memcpy(dest, src + offset, size); | ||
3205 | } else | ||
3206 | WARN_ON_ONCE(1); | ||
3207 | |||
3208 | valid -= feature; | ||
3209 | } | ||
3210 | } | ||
3211 | |||
3135 | static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, | 3212 | static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, |
3136 | struct kvm_xsave *guest_xsave) | 3213 | struct kvm_xsave *guest_xsave) |
3137 | { | 3214 | { |
3138 | if (cpu_has_xsave) { | 3215 | if (cpu_has_xsave) { |
3139 | memcpy(guest_xsave->region, | 3216 | memset(guest_xsave, 0, sizeof(struct kvm_xsave)); |
3140 | &vcpu->arch.guest_fpu.state->xsave, | 3217 | fill_xsave((u8 *) guest_xsave->region, vcpu); |
3141 | vcpu->arch.guest_xstate_size); | ||
3142 | *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= | ||
3143 | vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; | ||
3144 | } else { | 3218 | } else { |
3145 | memcpy(guest_xsave->region, | 3219 | memcpy(guest_xsave->region, |
3146 | &vcpu->arch.guest_fpu.state->fxsave, | 3220 | &vcpu->arch.guest_fpu.state->fxsave, |
@@ -3164,8 +3238,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, | |||
3164 | */ | 3238 | */ |
3165 | if (xstate_bv & ~kvm_supported_xcr0()) | 3239 | if (xstate_bv & ~kvm_supported_xcr0()) |
3166 | return -EINVAL; | 3240 | return -EINVAL; |
3167 | memcpy(&vcpu->arch.guest_fpu.state->xsave, | 3241 | load_xsave(vcpu, (u8 *)guest_xsave->region); |
3168 | guest_xsave->region, vcpu->arch.guest_xstate_size); | ||
3169 | } else { | 3242 | } else { |
3170 | if (xstate_bv & ~XSTATE_FPSSE) | 3243 | if (xstate_bv & ~XSTATE_FPSSE) |
3171 | return -EINVAL; | 3244 | return -EINVAL; |
@@ -6882,6 +6955,9 @@ int fx_init(struct kvm_vcpu *vcpu) | |||
6882 | return err; | 6955 | return err; |
6883 | 6956 | ||
6884 | fpu_finit(&vcpu->arch.guest_fpu); | 6957 | fpu_finit(&vcpu->arch.guest_fpu); |
6958 | if (cpu_has_xsaves) | ||
6959 | vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv = | ||
6960 | host_xcr0 | XSTATE_COMPACTION_ENABLED; | ||
6885 | 6961 | ||
6886 | /* | 6962 | /* |
6887 | * Ensure guest xcr0 is valid for loading | 6963 | * Ensure guest xcr0 is valid for loading |