diff options
author | Dongxiao Xu <dongxiao.xu@intel.com> | 2010-05-11 06:29:38 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 03:35:41 -0400 |
commit | 7725b89414836df492d6222b1d3cacb0ca576d77 (patch) | |
tree | 1e96bbd57950801e6f8396752cb8bff2fc2a8ffa | |
parent | 6859762e8ae32ec258a671faf5c9fef07b25b83f (diff) |
KVM: VMX: Define new functions to wrapper direct call of asm code
Define vmcs_load() and kvm_cpu_vmxon() to avoid direct call of asm
code. Also move VMXE bit operation out of kvm_cpu_vmxoff().
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 36 |
1 files changed, 23 insertions, 13 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a82cfa1e2a40..823288821444 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -453,6 +453,19 @@ static void vmcs_clear(struct vmcs *vmcs) | |||
453 | vmcs, phys_addr); | 453 | vmcs, phys_addr); |
454 | } | 454 | } |
455 | 455 | ||
456 | static void vmcs_load(struct vmcs *vmcs) | ||
457 | { | ||
458 | u64 phys_addr = __pa(vmcs); | ||
459 | u8 error; | ||
460 | |||
461 | asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" | ||
462 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | ||
463 | : "cc", "memory"); | ||
464 | if (error) | ||
465 | printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", | ||
466 | vmcs, phys_addr); | ||
467 | } | ||
468 | |||
456 | static void __vcpu_clear(void *arg) | 469 | static void __vcpu_clear(void *arg) |
457 | { | 470 | { |
458 | struct vcpu_vmx *vmx = arg; | 471 | struct vcpu_vmx *vmx = arg; |
@@ -830,7 +843,6 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx) | |||
830 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 843 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
831 | { | 844 | { |
832 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 845 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
833 | u64 phys_addr = __pa(vmx->vmcs); | ||
834 | u64 tsc_this, delta, new_offset; | 846 | u64 tsc_this, delta, new_offset; |
835 | 847 | ||
836 | if (vcpu->cpu != cpu) { | 848 | if (vcpu->cpu != cpu) { |
@@ -844,15 +856,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
844 | } | 856 | } |
845 | 857 | ||
846 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { | 858 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { |
847 | u8 error; | ||
848 | |||
849 | per_cpu(current_vmcs, cpu) = vmx->vmcs; | 859 | per_cpu(current_vmcs, cpu) = vmx->vmcs; |
850 | asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" | 860 | vmcs_load(vmx->vmcs); |
851 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) | ||
852 | : "cc"); | ||
853 | if (error) | ||
854 | printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", | ||
855 | vmx->vmcs, phys_addr); | ||
856 | } | 861 | } |
857 | 862 | ||
858 | if (vcpu->cpu != cpu) { | 863 | if (vcpu->cpu != cpu) { |
@@ -1288,6 +1293,13 @@ static __init int vmx_disabled_by_bios(void) | |||
1288 | /* locked but not enabled */ | 1293 | /* locked but not enabled */ |
1289 | } | 1294 | } |
1290 | 1295 | ||
1296 | static void kvm_cpu_vmxon(u64 addr) | ||
1297 | { | ||
1298 | asm volatile (ASM_VMX_VMXON_RAX | ||
1299 | : : "a"(&addr), "m"(addr) | ||
1300 | : "memory", "cc"); | ||
1301 | } | ||
1302 | |||
1291 | static int hardware_enable(void *garbage) | 1303 | static int hardware_enable(void *garbage) |
1292 | { | 1304 | { |
1293 | int cpu = raw_smp_processor_id(); | 1305 | int cpu = raw_smp_processor_id(); |
@@ -1310,9 +1322,7 @@ static int hardware_enable(void *garbage) | |||
1310 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); | 1322 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); |
1311 | } | 1323 | } |
1312 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ | 1324 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
1313 | asm volatile (ASM_VMX_VMXON_RAX | 1325 | kvm_cpu_vmxon(phys_addr); |
1314 | : : "a"(&phys_addr), "m"(phys_addr) | ||
1315 | : "memory", "cc"); | ||
1316 | 1326 | ||
1317 | ept_sync_global(); | 1327 | ept_sync_global(); |
1318 | 1328 | ||
@@ -1336,13 +1346,13 @@ static void vmclear_local_vcpus(void) | |||
1336 | static void kvm_cpu_vmxoff(void) | 1346 | static void kvm_cpu_vmxoff(void) |
1337 | { | 1347 | { |
1338 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); | 1348 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); |
1339 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | ||
1340 | } | 1349 | } |
1341 | 1350 | ||
1342 | static void hardware_disable(void *garbage) | 1351 | static void hardware_disable(void *garbage) |
1343 | { | 1352 | { |
1344 | vmclear_local_vcpus(); | 1353 | vmclear_local_vcpus(); |
1345 | kvm_cpu_vmxoff(); | 1354 | kvm_cpu_vmxoff(); |
1355 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | ||
1346 | } | 1356 | } |
1347 | 1357 | ||
1348 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | 1358 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, |