diff options
author | Alexander Graf <agraf@suse.de> | 2008-11-25 14:17:06 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:02:47 -0400 |
commit | 5542675baa7e62ca4d18278c8758b6a4ec410639 (patch) | |
tree | ca7b8a45f32701ebeafd1f982f39934efa178fdf /arch/x86/kvm/svm.c | |
parent | b286d5d8b0836e76832dafcc5a18b0e8e5a3bc5e (diff) |
KVM: SVM: Add VMLOAD and VMSAVE handlers
This implements the VMLOAD and VMSAVE instructions, that usually surround
the VMRUN instructions. Both instructions load / restore the same elements,
so we only need to implement them once.
v2 fixes CPL checking and replaces memcpy by assignments
v3 makes use of the new permission checking
Acked-by: Joerg Roedel <joro@8bytes.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 60 |
1 files changed, 58 insertions, 2 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 59aaff1c9597..a83c94eb5771 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1243,6 +1243,62 @@ static int nested_svm_do(struct vcpu_svm *svm, | |||
1243 | return retval; | 1243 | return retval; |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) | ||
1247 | { | ||
1248 | to_vmcb->save.fs = from_vmcb->save.fs; | ||
1249 | to_vmcb->save.gs = from_vmcb->save.gs; | ||
1250 | to_vmcb->save.tr = from_vmcb->save.tr; | ||
1251 | to_vmcb->save.ldtr = from_vmcb->save.ldtr; | ||
1252 | to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; | ||
1253 | to_vmcb->save.star = from_vmcb->save.star; | ||
1254 | to_vmcb->save.lstar = from_vmcb->save.lstar; | ||
1255 | to_vmcb->save.cstar = from_vmcb->save.cstar; | ||
1256 | to_vmcb->save.sfmask = from_vmcb->save.sfmask; | ||
1257 | to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; | ||
1258 | to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; | ||
1259 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; | ||
1260 | |||
1261 | return 1; | ||
1262 | } | ||
1263 | |||
1264 | static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb, | ||
1265 | void *arg2, void *opaque) | ||
1266 | { | ||
1267 | return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb); | ||
1268 | } | ||
1269 | |||
1270 | static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb, | ||
1271 | void *arg2, void *opaque) | ||
1272 | { | ||
1273 | return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb); | ||
1274 | } | ||
1275 | |||
1276 | static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1277 | { | ||
1278 | if (nested_svm_check_permissions(svm)) | ||
1279 | return 1; | ||
1280 | |||
1281 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | ||
1282 | skip_emulated_instruction(&svm->vcpu); | ||
1283 | |||
1284 | nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload); | ||
1285 | |||
1286 | return 1; | ||
1287 | } | ||
1288 | |||
1289 | static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1290 | { | ||
1291 | if (nested_svm_check_permissions(svm)) | ||
1292 | return 1; | ||
1293 | |||
1294 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | ||
1295 | skip_emulated_instruction(&svm->vcpu); | ||
1296 | |||
1297 | nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave); | ||
1298 | |||
1299 | return 1; | ||
1300 | } | ||
1301 | |||
1246 | static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1302 | static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1247 | { | 1303 | { |
1248 | if (nested_svm_check_permissions(svm)) | 1304 | if (nested_svm_check_permissions(svm)) |
@@ -1578,8 +1634,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | |||
1578 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, | 1634 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, |
1579 | [SVM_EXIT_VMRUN] = invalid_op_interception, | 1635 | [SVM_EXIT_VMRUN] = invalid_op_interception, |
1580 | [SVM_EXIT_VMMCALL] = vmmcall_interception, | 1636 | [SVM_EXIT_VMMCALL] = vmmcall_interception, |
1581 | [SVM_EXIT_VMLOAD] = invalid_op_interception, | 1637 | [SVM_EXIT_VMLOAD] = vmload_interception, |
1582 | [SVM_EXIT_VMSAVE] = invalid_op_interception, | 1638 | [SVM_EXIT_VMSAVE] = vmsave_interception, |
1583 | [SVM_EXIT_STGI] = stgi_interception, | 1639 | [SVM_EXIT_STGI] = stgi_interception, |
1584 | [SVM_EXIT_CLGI] = clgi_interception, | 1640 | [SVM_EXIT_CLGI] = clgi_interception, |
1585 | [SVM_EXIT_SKINIT] = invalid_op_interception, | 1641 | [SVM_EXIT_SKINIT] = invalid_op_interception, |