aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-03-28 12:18:35 -0400
committerGleb Natapov <gleb@redhat.com>2013-04-02 10:42:44 -0400
commitafd80d85aefac27e6e2f9dc10f60515357c504d2 (patch)
treef9c59ac17a12e113d9eff004e3bdae40ea995f35 /arch/x86
parente1e2e605c2ad6791ce6346b22443ce611709fa65 (diff)
pmu: prepare for migration support
In order to migrate the PMU state correctly, we need to restore the values of MSR_CORE_PERF_GLOBAL_STATUS (a read-only register) and MSR_CORE_PERF_GLOBAL_OVF_CTRL (which has side effects when written). We also need to write the full 40-bit value of the performance counter, which would only be possible with a v3 architectural PMU's full-width counter MSRs. To distinguish host-initiated writes from the guest's, pass the full struct msr_data to kvm_pmu_set_msr. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/pmu.c14
-rw-r--r--arch/x86/kvm/x86.c4
3 files changed, 14 insertions, 6 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b5a64621d5af..3dd84c996d56 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1030,7 +1030,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu);
1030void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); 1030void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
1031bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); 1031bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
1032int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 1032int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
1033int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); 1033int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
1034int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 1034int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
1035void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); 1035void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
1036void kvm_deliver_pmi(struct kvm_vcpu *vcpu); 1036void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index cfc258a6bf97..c53e797e7369 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -360,10 +360,12 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
360 return 1; 360 return 1;
361} 361}
362 362
363int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) 363int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
364{ 364{
365 struct kvm_pmu *pmu = &vcpu->arch.pmu; 365 struct kvm_pmu *pmu = &vcpu->arch.pmu;
366 struct kvm_pmc *pmc; 366 struct kvm_pmc *pmc;
367 u32 index = msr_info->index;
368 u64 data = msr_info->data;
367 369
368 switch (index) { 370 switch (index) {
369 case MSR_CORE_PERF_FIXED_CTR_CTRL: 371 case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -375,6 +377,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
375 } 377 }
376 break; 378 break;
377 case MSR_CORE_PERF_GLOBAL_STATUS: 379 case MSR_CORE_PERF_GLOBAL_STATUS:
380 if (msr_info->host_initiated) {
381 pmu->global_status = data;
382 return 0;
383 }
378 break; /* RO MSR */ 384 break; /* RO MSR */
379 case MSR_CORE_PERF_GLOBAL_CTRL: 385 case MSR_CORE_PERF_GLOBAL_CTRL:
380 if (pmu->global_ctrl == data) 386 if (pmu->global_ctrl == data)
@@ -386,7 +392,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
386 break; 392 break;
387 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 393 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
388 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) { 394 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
389 pmu->global_status &= ~data; 395 if (!msr_info->host_initiated)
396 pmu->global_status &= ~data;
390 pmu->global_ovf_ctrl = data; 397 pmu->global_ovf_ctrl = data;
391 return 0; 398 return 0;
392 } 399 }
@@ -394,7 +401,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
394 default: 401 default:
395 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || 402 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
396 (pmc = get_fixed_pmc(pmu, index))) { 403 (pmc = get_fixed_pmc(pmu, index))) {
397 data = (s64)(s32)data; 404 if (!msr_info->host_initiated)
405 data = (s64)(s32)data;
398 pmc->counter += data - read_pmc(pmc); 406 pmc->counter += data - read_pmc(pmc);
399 return 0; 407 return 0;
400 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { 408 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2aaba814f1c8..78c6f90a60cc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2040,7 +2040,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2040 case MSR_P6_EVNTSEL0: 2040 case MSR_P6_EVNTSEL0:
2041 case MSR_P6_EVNTSEL1: 2041 case MSR_P6_EVNTSEL1:
2042 if (kvm_pmu_msr(vcpu, msr)) 2042 if (kvm_pmu_msr(vcpu, msr))
2043 return kvm_pmu_set_msr(vcpu, msr, data); 2043 return kvm_pmu_set_msr(vcpu, msr_info);
2044 2044
2045 if (pr || data != 0) 2045 if (pr || data != 0)
2046 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " 2046 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
@@ -2086,7 +2086,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2086 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) 2086 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
2087 return xen_hvm_config(vcpu, data); 2087 return xen_hvm_config(vcpu, data);
2088 if (kvm_pmu_msr(vcpu, msr)) 2088 if (kvm_pmu_msr(vcpu, msr))
2089 return kvm_pmu_set_msr(vcpu, msr, data); 2089 return kvm_pmu_set_msr(vcpu, msr_info);
2090 if (!ignore_msrs) { 2090 if (!ignore_msrs) {
2091 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", 2091 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
2092 msr, data); 2092 msr, data);