aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c90
1 files changed, 70 insertions, 20 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b3e488a74828..be9c839e2c89 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,6 +49,7 @@
49#include <asm/debugreg.h> 49#include <asm/debugreg.h>
50#include <asm/kvm_para.h> 50#include <asm/kvm_para.h>
51#include <asm/irq_remapping.h> 51#include <asm/irq_remapping.h>
52#include <asm/microcode.h>
52#include <asm/nospec-branch.h> 53#include <asm/nospec-branch.h>
53 54
54#include <asm/virtext.h> 55#include <asm/virtext.h>
@@ -178,6 +179,8 @@ struct vcpu_svm {
178 uint64_t sysenter_eip; 179 uint64_t sysenter_eip;
179 uint64_t tsc_aux; 180 uint64_t tsc_aux;
180 181
182 u64 msr_decfg;
183
181 u64 next_rip; 184 u64 next_rip;
182 185
183 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; 186 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
@@ -300,6 +303,8 @@ module_param(vgif, int, 0444);
300static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); 303static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
301module_param(sev, int, 0444); 304module_param(sev, int, 0444);
302 305
306static u8 rsm_ins_bytes[] = "\x0f\xaa";
307
303static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 308static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
304static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); 309static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
305static void svm_complete_interrupts(struct vcpu_svm *svm); 310static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -1383,6 +1388,7 @@ static void init_vmcb(struct vcpu_svm *svm)
1383 set_intercept(svm, INTERCEPT_SKINIT); 1388 set_intercept(svm, INTERCEPT_SKINIT);
1384 set_intercept(svm, INTERCEPT_WBINVD); 1389 set_intercept(svm, INTERCEPT_WBINVD);
1385 set_intercept(svm, INTERCEPT_XSETBV); 1390 set_intercept(svm, INTERCEPT_XSETBV);
1391 set_intercept(svm, INTERCEPT_RSM);
1386 1392
1387 if (!kvm_mwait_in_guest()) { 1393 if (!kvm_mwait_in_guest()) {
1388 set_intercept(svm, INTERCEPT_MONITOR); 1394 set_intercept(svm, INTERCEPT_MONITOR);
@@ -1902,6 +1908,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1902 u32 dummy; 1908 u32 dummy;
1903 u32 eax = 1; 1909 u32 eax = 1;
1904 1910
1911 vcpu->arch.microcode_version = 0x01000065;
1905 svm->spec_ctrl = 0; 1912 svm->spec_ctrl = 0;
1906 1913
1907 if (!init_event) { 1914 if (!init_event) {
@@ -3699,6 +3706,12 @@ static int emulate_on_interception(struct vcpu_svm *svm)
3699 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; 3706 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3700} 3707}
3701 3708
3709static int rsm_interception(struct vcpu_svm *svm)
3710{
3711 return x86_emulate_instruction(&svm->vcpu, 0, 0,
3712 rsm_ins_bytes, 2) == EMULATE_DONE;
3713}
3714
3702static int rdpmc_interception(struct vcpu_svm *svm) 3715static int rdpmc_interception(struct vcpu_svm *svm)
3703{ 3716{
3704 int err; 3717 int err;
@@ -3860,6 +3873,22 @@ static int cr8_write_interception(struct vcpu_svm *svm)
3860 return 0; 3873 return 0;
3861} 3874}
3862 3875
3876static int svm_get_msr_feature(struct kvm_msr_entry *msr)
3877{
3878 msr->data = 0;
3879
3880 switch (msr->index) {
3881 case MSR_F10H_DECFG:
3882 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
3883 msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
3884 break;
3885 default:
3886 return 1;
3887 }
3888
3889 return 0;
3890}
3891
3863static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3892static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3864{ 3893{
3865 struct vcpu_svm *svm = to_svm(vcpu); 3894 struct vcpu_svm *svm = to_svm(vcpu);
@@ -3935,9 +3964,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3935 3964
3936 msr_info->data = svm->spec_ctrl; 3965 msr_info->data = svm->spec_ctrl;
3937 break; 3966 break;
3938 case MSR_IA32_UCODE_REV:
3939 msr_info->data = 0x01000065;
3940 break;
3941 case MSR_F15H_IC_CFG: { 3967 case MSR_F15H_IC_CFG: {
3942 3968
3943 int family, model; 3969 int family, model;
@@ -3955,6 +3981,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3955 msr_info->data = 0x1E; 3981 msr_info->data = 0x1E;
3956 } 3982 }
3957 break; 3983 break;
3984 case MSR_F10H_DECFG:
3985 msr_info->data = svm->msr_decfg;
3986 break;
3958 default: 3987 default:
3959 return kvm_get_msr_common(vcpu, msr_info); 3988 return kvm_get_msr_common(vcpu, msr_info);
3960 } 3989 }
@@ -4133,6 +4162,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4133 case MSR_VM_IGNNE: 4162 case MSR_VM_IGNNE:
4134 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); 4163 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
4135 break; 4164 break;
4165 case MSR_F10H_DECFG: {
4166 struct kvm_msr_entry msr_entry;
4167
4168 msr_entry.index = msr->index;
4169 if (svm_get_msr_feature(&msr_entry))
4170 return 1;
4171
4172 /* Check the supported bits */
4173 if (data & ~msr_entry.data)
4174 return 1;
4175
4176 /* Don't allow the guest to change a bit, #GP */
4177 if (!msr->host_initiated && (data ^ msr_entry.data))
4178 return 1;
4179
4180 svm->msr_decfg = data;
4181 break;
4182 }
4136 case MSR_IA32_APICBASE: 4183 case MSR_IA32_APICBASE:
4137 if (kvm_vcpu_apicv_active(vcpu)) 4184 if (kvm_vcpu_apicv_active(vcpu))
4138 avic_update_vapic_bar(to_svm(vcpu), data); 4185 avic_update_vapic_bar(to_svm(vcpu), data);
@@ -4541,7 +4588,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
4541 [SVM_EXIT_MWAIT] = mwait_interception, 4588 [SVM_EXIT_MWAIT] = mwait_interception,
4542 [SVM_EXIT_XSETBV] = xsetbv_interception, 4589 [SVM_EXIT_XSETBV] = xsetbv_interception,
4543 [SVM_EXIT_NPF] = npf_interception, 4590 [SVM_EXIT_NPF] = npf_interception,
4544 [SVM_EXIT_RSM] = emulate_on_interception, 4591 [SVM_EXIT_RSM] = rsm_interception,
4545 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, 4592 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
4546 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, 4593 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
4547}; 4594};
@@ -5355,7 +5402,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5355 * being speculatively taken. 5402 * being speculatively taken.
5356 */ 5403 */
5357 if (svm->spec_ctrl) 5404 if (svm->spec_ctrl)
5358 wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); 5405 native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
5359 5406
5360 asm volatile ( 5407 asm volatile (
5361 "push %%" _ASM_BP "; \n\t" 5408 "push %%" _ASM_BP "; \n\t"
@@ -5464,11 +5511,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5464 * If the L02 MSR bitmap does not intercept the MSR, then we need to 5511 * If the L02 MSR bitmap does not intercept the MSR, then we need to
5465 * save it. 5512 * save it.
5466 */ 5513 */
5467 if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) 5514 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
5468 rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); 5515 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
5469 5516
5470 if (svm->spec_ctrl) 5517 if (svm->spec_ctrl)
5471 wrmsrl(MSR_IA32_SPEC_CTRL, 0); 5518 native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
5472 5519
5473 /* Eliminate branch target predictions from guest mode */ 5520 /* Eliminate branch target predictions from guest mode */
5474 vmexit_fill_RSB(); 5521 vmexit_fill_RSB();
@@ -6236,16 +6283,18 @@ e_free:
6236 6283
6237static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) 6284static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6238{ 6285{
6286 void __user *measure = (void __user *)(uintptr_t)argp->data;
6239 struct kvm_sev_info *sev = &kvm->arch.sev_info; 6287 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6240 struct sev_data_launch_measure *data; 6288 struct sev_data_launch_measure *data;
6241 struct kvm_sev_launch_measure params; 6289 struct kvm_sev_launch_measure params;
6290 void __user *p = NULL;
6242 void *blob = NULL; 6291 void *blob = NULL;
6243 int ret; 6292 int ret;
6244 6293
6245 if (!sev_guest(kvm)) 6294 if (!sev_guest(kvm))
6246 return -ENOTTY; 6295 return -ENOTTY;
6247 6296
6248 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params))) 6297 if (copy_from_user(&params, measure, sizeof(params)))
6249 return -EFAULT; 6298 return -EFAULT;
6250 6299
6251 data = kzalloc(sizeof(*data), GFP_KERNEL); 6300 data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -6256,17 +6305,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6256 if (!params.len) 6305 if (!params.len)
6257 goto cmd; 6306 goto cmd;
6258 6307
6259 if (params.uaddr) { 6308 p = (void __user *)(uintptr_t)params.uaddr;
6309 if (p) {
6260 if (params.len > SEV_FW_BLOB_MAX_SIZE) { 6310 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6261 ret = -EINVAL; 6311 ret = -EINVAL;
6262 goto e_free; 6312 goto e_free;
6263 } 6313 }
6264 6314
6265 if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
6266 ret = -EFAULT;
6267 goto e_free;
6268 }
6269
6270 ret = -ENOMEM; 6315 ret = -ENOMEM;
6271 blob = kmalloc(params.len, GFP_KERNEL); 6316 blob = kmalloc(params.len, GFP_KERNEL);
6272 if (!blob) 6317 if (!blob)
@@ -6290,13 +6335,13 @@ cmd:
6290 goto e_free_blob; 6335 goto e_free_blob;
6291 6336
6292 if (blob) { 6337 if (blob) {
6293 if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len)) 6338 if (copy_to_user(p, blob, params.len))
6294 ret = -EFAULT; 6339 ret = -EFAULT;
6295 } 6340 }
6296 6341
6297done: 6342done:
6298 params.len = data->len; 6343 params.len = data->len;
6299 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) 6344 if (copy_to_user(measure, &params, sizeof(params)))
6300 ret = -EFAULT; 6345 ret = -EFAULT;
6301e_free_blob: 6346e_free_blob:
6302 kfree(blob); 6347 kfree(blob);
@@ -6597,7 +6642,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6597 struct page **pages; 6642 struct page **pages;
6598 void *blob, *hdr; 6643 void *blob, *hdr;
6599 unsigned long n; 6644 unsigned long n;
6600 int ret; 6645 int ret, offset;
6601 6646
6602 if (!sev_guest(kvm)) 6647 if (!sev_guest(kvm))
6603 return -ENOTTY; 6648 return -ENOTTY;
@@ -6623,6 +6668,10 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6623 if (!data) 6668 if (!data)
6624 goto e_unpin_memory; 6669 goto e_unpin_memory;
6625 6670
6671 offset = params.guest_uaddr & (PAGE_SIZE - 1);
6672 data->guest_address = __sme_page_pa(pages[0]) + offset;
6673 data->guest_len = params.guest_len;
6674
6626 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); 6675 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6627 if (IS_ERR(blob)) { 6676 if (IS_ERR(blob)) {
6628 ret = PTR_ERR(blob); 6677 ret = PTR_ERR(blob);
@@ -6637,8 +6686,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6637 ret = PTR_ERR(hdr); 6686 ret = PTR_ERR(hdr);
6638 goto e_free_blob; 6687 goto e_free_blob;
6639 } 6688 }
6640 data->trans_address = __psp_pa(blob); 6689 data->hdr_address = __psp_pa(hdr);
6641 data->trans_len = params.trans_len; 6690 data->hdr_len = params.hdr_len;
6642 6691
6643 data->handle = sev->handle; 6692 data->handle = sev->handle;
6644 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); 6693 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
@@ -6821,6 +6870,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
6821 .vcpu_unblocking = svm_vcpu_unblocking, 6870 .vcpu_unblocking = svm_vcpu_unblocking,
6822 6871
6823 .update_bp_intercept = update_bp_intercept, 6872 .update_bp_intercept = update_bp_intercept,
6873 .get_msr_feature = svm_get_msr_feature,
6824 .get_msr = svm_get_msr, 6874 .get_msr = svm_get_msr,
6825 .set_msr = svm_set_msr, 6875 .set_msr = svm_set_msr,
6826 .get_segment_base = svm_get_segment_base, 6876 .get_segment_base = svm_get_segment_base,