diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-02-13 12:58:47 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 04:53:21 -0400 |
commit | 24e09cbf480a72f9c952af4ca77b159503dca44b (patch) | |
tree | f1f321963c620d26172f8fd0878374d9598c0ad0 | |
parent | f65c229c3e7743c6654c16b9ec6248466b5eef21 (diff) |
KVM: SVM: enable LBR virtualization
This patch implements the Last Branch Record Virtualization (LBRV) feature of
the AMD Barcelona and Phenom processors into the kvm-amd module. It will only
be enabled if the guest enables last branch recording in the DEBUG_CTL MSR. So
there is no increased world switch overhead when the guest doesn't use these
MSRs.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Markus Rechberger <markus.rechberger@amd.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r-- | arch/x86/kvm/svm.c | 39 |
1 files changed, 37 insertions, 2 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 281a2ffe1224..7d73e935dcc1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -47,6 +47,8 @@ MODULE_LICENSE("GPL"); | |||
47 | #define SVM_FEATURE_LBRV (1 << 1) | 47 | #define SVM_FEATURE_LBRV (1 << 1) |
48 | #define SVM_DEATURE_SVML (1 << 2) | 48 | #define SVM_DEATURE_SVML (1 << 2) |
49 | 49 | ||
50 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) | ||
51 | |||
50 | /* enable NPT for AMD64 and X86 with PAE */ | 52 | /* enable NPT for AMD64 and X86 with PAE */ |
51 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 53 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
52 | static bool npt_enabled = true; | 54 | static bool npt_enabled = true; |
@@ -387,6 +389,28 @@ static void svm_vcpu_init_msrpm(u32 *msrpm) | |||
387 | set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); | 389 | set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); |
388 | } | 390 | } |
389 | 391 | ||
392 | static void svm_enable_lbrv(struct vcpu_svm *svm) | ||
393 | { | ||
394 | u32 *msrpm = svm->msrpm; | ||
395 | |||
396 | svm->vmcb->control.lbr_ctl = 1; | ||
397 | set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); | ||
398 | set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); | ||
399 | set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); | ||
400 | set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1); | ||
401 | } | ||
402 | |||
403 | static void svm_disable_lbrv(struct vcpu_svm *svm) | ||
404 | { | ||
405 | u32 *msrpm = svm->msrpm; | ||
406 | |||
407 | svm->vmcb->control.lbr_ctl = 0; | ||
408 | set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); | ||
409 | set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); | ||
410 | set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); | ||
411 | set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); | ||
412 | } | ||
413 | |||
390 | static __init int svm_hardware_setup(void) | 414 | static __init int svm_hardware_setup(void) |
391 | { | 415 | { |
392 | int cpu; | 416 | int cpu; |
@@ -1231,8 +1255,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
1231 | svm->vmcb->save.sysenter_esp = data; | 1255 | svm->vmcb->save.sysenter_esp = data; |
1232 | break; | 1256 | break; |
1233 | case MSR_IA32_DEBUGCTLMSR: | 1257 | case MSR_IA32_DEBUGCTLMSR: |
1234 | pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", | 1258 | if (!svm_has(SVM_FEATURE_LBRV)) { |
1235 | __FUNCTION__, data); | 1259 | pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", |
1260 | __FUNCTION__, data); | ||
1261 | break; | ||
1262 | } | ||
1263 | if (data & DEBUGCTL_RESERVED_BITS) | ||
1264 | return 1; | ||
1265 | |||
1266 | svm->vmcb->save.dbgctl = data; | ||
1267 | if (data & (1ULL<<0)) | ||
1268 | svm_enable_lbrv(svm); | ||
1269 | else | ||
1270 | svm_disable_lbrv(svm); | ||
1236 | break; | 1271 | break; |
1237 | case MSR_K7_EVNTSEL0: | 1272 | case MSR_K7_EVNTSEL0: |
1238 | case MSR_K7_EVNTSEL1: | 1273 | case MSR_K7_EVNTSEL1: |