aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2014-06-16 07:59:43 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-06-19 06:52:13 -0400
commit2996fca0690f03a5220203588f4a0d8c5acba2b0 (patch)
tree4190e1c208c3ab89c585524c91d688feb670de8a /arch
parent560b7ee12ca5e1ebc1675d7eb4008bb22708277a (diff)
KVM: nVMX: Allow to disable VM_{ENTRY_LOAD,EXIT_SAVE}_DEBUG_CONTROLS
Allow L1 to "leak" its debug controls into L2, i.e. permit cleared VM_{ENTRY_LOAD,EXIT_SAVE}_DEBUG_CONTROLS. This requires to manually transfer the state of DR7 and IA32_DEBUGCTLMSR from L1 into L2 as both run on different VMCS. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/vmx.c44
1 files changed, 38 insertions, 6 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 66365a009cff..b93e2ae2bb62 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -383,6 +383,9 @@ struct nested_vmx {
383 383
384 struct hrtimer preemption_timer; 384 struct hrtimer preemption_timer;
385 bool preemption_timer_expired; 385 bool preemption_timer_expired;
386
387 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
388 u64 vmcs01_debugctl;
386}; 389};
387 390
388#define POSTED_INTR_ON 0 391#define POSTED_INTR_ON 0
@@ -2243,7 +2246,9 @@ static u32 nested_vmx_true_procbased_ctls_low;
2243static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high; 2246static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
2244static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; 2247static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
2245static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; 2248static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
2249static u32 nested_vmx_true_exit_ctls_low;
2246static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; 2250static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
2251static u32 nested_vmx_true_entry_ctls_low;
2247static u32 nested_vmx_misc_low, nested_vmx_misc_high; 2252static u32 nested_vmx_misc_low, nested_vmx_misc_high;
2248static u32 nested_vmx_ept_caps; 2253static u32 nested_vmx_ept_caps;
2249static __init void nested_vmx_setup_ctls_msrs(void) 2254static __init void nested_vmx_setup_ctls_msrs(void)
@@ -2289,6 +2294,10 @@ static __init void nested_vmx_setup_ctls_msrs(void)
2289 if (vmx_mpx_supported()) 2294 if (vmx_mpx_supported())
2290 nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 2295 nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2291 2296
2297 /* We support free control of debug control saving. */
2298 nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low &
2299 ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2300
2292 /* entry controls */ 2301 /* entry controls */
2293 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 2302 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2294 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); 2303 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
@@ -2303,6 +2312,10 @@ static __init void nested_vmx_setup_ctls_msrs(void)
2303 if (vmx_mpx_supported()) 2312 if (vmx_mpx_supported())
2304 nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 2313 nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2305 2314
2315 /* We support free control of debug control loading. */
2316 nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low &
2317 ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2318
2306 /* cpu-based controls */ 2319 /* cpu-based controls */
2307 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 2320 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2308 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); 2321 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
@@ -2409,11 +2422,17 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2409 nested_vmx_procbased_ctls_high); 2422 nested_vmx_procbased_ctls_high);
2410 break; 2423 break;
2411 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 2424 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2425 *pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low,
2426 nested_vmx_exit_ctls_high);
2427 break;
2412 case MSR_IA32_VMX_EXIT_CTLS: 2428 case MSR_IA32_VMX_EXIT_CTLS:
2413 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low, 2429 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
2414 nested_vmx_exit_ctls_high); 2430 nested_vmx_exit_ctls_high);
2415 break; 2431 break;
2416 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 2432 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2433 *pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low,
2434 nested_vmx_entry_ctls_high);
2435 break;
2417 case MSR_IA32_VMX_ENTRY_CTLS: 2436 case MSR_IA32_VMX_ENTRY_CTLS:
2418 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low, 2437 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
2419 nested_vmx_entry_ctls_high); 2438 nested_vmx_entry_ctls_high);
@@ -7836,7 +7855,13 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7836 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 7855 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
7837 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 7856 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
7838 7857
7839 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 7858 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
7859 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
7860 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
7861 } else {
7862 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
7863 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
7864 }
7840 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 7865 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
7841 vmcs12->vm_entry_intr_info_field); 7866 vmcs12->vm_entry_intr_info_field);
7842 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 7867 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
@@ -7846,7 +7871,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7846 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 7871 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
7847 vmcs12->guest_interruptibility_info); 7872 vmcs12->guest_interruptibility_info);
7848 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 7873 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
7849 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
7850 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 7874 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
7851 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 7875 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
7852 vmcs12->guest_pending_dbg_exceptions); 7876 vmcs12->guest_pending_dbg_exceptions);
@@ -8143,9 +8167,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8143 !vmx_control_verify(vmcs12->pin_based_vm_exec_control, 8167 !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
8144 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) || 8168 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
8145 !vmx_control_verify(vmcs12->vm_exit_controls, 8169 !vmx_control_verify(vmcs12->vm_exit_controls,
8146 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) || 8170 nested_vmx_true_exit_ctls_low,
8171 nested_vmx_exit_ctls_high) ||
8147 !vmx_control_verify(vmcs12->vm_entry_controls, 8172 !vmx_control_verify(vmcs12->vm_entry_controls,
8148 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high)) 8173 nested_vmx_true_entry_ctls_low,
8174 nested_vmx_entry_ctls_high))
8149 { 8175 {
8150 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 8176 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8151 return 1; 8177 return 1;
@@ -8222,6 +8248,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8222 8248
8223 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); 8249 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
8224 8250
8251 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
8252 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
8253
8225 cpu = get_cpu(); 8254 cpu = get_cpu();
8226 vmx->loaded_vmcs = vmcs02; 8255 vmx->loaded_vmcs = vmcs02;
8227 vmx_vcpu_put(vcpu); 8256 vmx_vcpu_put(vcpu);
@@ -8399,7 +8428,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
8399 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 8428 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
8400 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 8429 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
8401 8430
8402 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
8403 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); 8431 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
8404 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); 8432 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
8405 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 8433 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
@@ -8478,9 +8506,13 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
8478 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 8506 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
8479 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 8507 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
8480 8508
8509 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
8510 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
8511 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
8512 }
8513
8481 /* TODO: These cannot have changed unless we have MSR bitmaps and 8514 /* TODO: These cannot have changed unless we have MSR bitmaps and
8482 * the relevant bit asks not to trap the change */ 8515 * the relevant bit asks not to trap the change */
8483 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
8484 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) 8516 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
8485 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); 8517 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
8486 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 8518 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)