diff options
author | Wei Huang <wehuang@redhat.com> | 2015-06-19 09:51:47 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-06-19 11:16:30 -0400 |
commit | e5af058aacd55e578b3c57b1582b90c4290b77f9 (patch) | |
tree | 43b504afe694df4f34dcae742536ab2ef9c5a2fa | |
parent | e84cfe4ce0113a6c5e3bdf70e20a21552ad3a28d (diff) |
KVM: x86/vPMU: reorder PMU functions
Keep called functions closer to their callers, and init/destroy
functions next to each other.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/pmu.c | 156 |
1 files changed, 78 insertions, 78 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 24d213bd42d4..f38ad84be87e 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c | |||
@@ -83,12 +83,6 @@ static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx) | |||
83 | return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED); | 83 | return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED); |
84 | } | 84 | } |
85 | 85 | ||
86 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) | ||
87 | { | ||
88 | if (vcpu->arch.apic) | ||
89 | kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); | ||
90 | } | ||
91 | |||
92 | static void kvm_pmi_trigger_fn(struct irq_work *irq_work) | 86 | static void kvm_pmi_trigger_fn(struct irq_work *irq_work) |
93 | { | 87 | { |
94 | struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); | 88 | struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); |
@@ -324,6 +318,65 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) | |||
324 | reprogram_counter(pmu, bit); | 318 | reprogram_counter(pmu, bit); |
325 | } | 319 | } |
326 | 320 | ||
321 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) | ||
322 | { | ||
323 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | ||
324 | u64 bitmask; | ||
325 | int bit; | ||
326 | |||
327 | bitmask = pmu->reprogram_pmi; | ||
328 | |||
329 | for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { | ||
330 | struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit); | ||
331 | |||
332 | if (unlikely(!pmc || !pmc->perf_event)) { | ||
333 | clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); | ||
334 | continue; | ||
335 | } | ||
336 | |||
337 | reprogram_counter(pmu, bit); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | /* check if idx is a valid index to access PMU */ | ||
342 | int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) | ||
343 | { | ||
344 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | ||
345 | bool fixed = idx & (1u << 30); | ||
346 | idx &= ~(3u << 30); | ||
347 | return (!fixed && idx >= pmu->nr_arch_gp_counters) || | ||
348 | (fixed && idx >= pmu->nr_arch_fixed_counters); | ||
349 | } | ||
350 | |||
351 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) | ||
352 | { | ||
353 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | ||
354 | bool fast_mode = idx & (1u << 31); | ||
355 | bool fixed = idx & (1u << 30); | ||
356 | struct kvm_pmc *counters; | ||
357 | u64 ctr_val; | ||
358 | |||
359 | idx &= ~(3u << 30); | ||
360 | if (!fixed && idx >= pmu->nr_arch_gp_counters) | ||
361 | return 1; | ||
362 | if (fixed && idx >= pmu->nr_arch_fixed_counters) | ||
363 | return 1; | ||
364 | counters = fixed ? pmu->fixed_counters : pmu->gp_counters; | ||
365 | |||
366 | ctr_val = pmc_read_counter(&counters[idx]); | ||
367 | if (fast_mode) | ||
368 | ctr_val = (u32)ctr_val; | ||
369 | |||
370 | *data = ctr_val; | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) | ||
375 | { | ||
376 | if (vcpu->arch.apic) | ||
377 | kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); | ||
378 | } | ||
379 | |||
327 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) | 380 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
328 | { | 381 | { |
329 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | 382 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
@@ -433,39 +486,6 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
433 | return 1; | 486 | return 1; |
434 | } | 487 | } |
435 | 488 | ||
436 | /* check if idx is a valid index to access PMU */ | ||
437 | int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) | ||
438 | { | ||
439 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | ||
440 | bool fixed = idx & (1u << 30); | ||
441 | idx &= ~(3u << 30); | ||
442 | return (!fixed && idx >= pmu->nr_arch_gp_counters) || | ||
443 | (fixed && idx >= pmu->nr_arch_fixed_counters); | ||
444 | } | ||
445 | |||
446 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) | ||
447 | { | ||
448 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | ||
449 | bool fast_mode = idx & (1u << 31); | ||
450 | bool fixed = idx & (1u << 30); | ||
451 | struct kvm_pmc *counters; | ||
452 | u64 ctr_val; | ||
453 | |||
454 | idx &= ~(3u << 30); | ||
455 | if (!fixed && idx >= pmu->nr_arch_gp_counters) | ||
456 | return 1; | ||
457 | if (fixed && idx >= pmu->nr_arch_fixed_counters) | ||
458 | return 1; | ||
459 | counters = fixed ? pmu->fixed_counters : pmu->gp_counters; | ||
460 | |||
461 | ctr_val = pmc_read_counter(&counters[idx]); | ||
462 | if (fast_mode) | ||
463 | ctr_val = (u32)ctr_val; | ||
464 | |||
465 | *data = ctr_val; | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | /* refresh PMU settings. This function generally is called when underlying | 489 | /* refresh PMU settings. This function generally is called when underlying |
470 | * settings are changed (such as changes of PMU CPUID by guest VMs), which | 490 | * settings are changed (such as changes of PMU CPUID by guest VMs), which |
471 | * should rarely happen. | 491 | * should rarely happen. |
@@ -521,26 +541,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) | |||
521 | pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; | 541 | pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; |
522 | } | 542 | } |
523 | 543 | ||
524 | void kvm_pmu_init(struct kvm_vcpu *vcpu) | ||
525 | { | ||
526 | int i; | ||
527 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | ||
528 | |||
529 | memset(pmu, 0, sizeof(*pmu)); | ||
530 | for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { | ||
531 | pmu->gp_counters[i].type = KVM_PMC_GP; | ||
532 | pmu->gp_counters[i].vcpu = vcpu; | ||
533 | pmu->gp_counters[i].idx = i; | ||
534 | } | ||
535 | for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { | ||
536 | pmu->fixed_counters[i].type = KVM_PMC_FIXED; | ||
537 | pmu->fixed_counters[i].vcpu = vcpu; | ||
538 | pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; | ||
539 | } | ||
540 | init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); | ||
541 | kvm_pmu_refresh(vcpu); | ||
542 | } | ||
543 | |||
544 | void kvm_pmu_reset(struct kvm_vcpu *vcpu) | 544 | void kvm_pmu_reset(struct kvm_vcpu *vcpu) |
545 | { | 545 | { |
546 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | 546 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
@@ -560,27 +560,27 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu) | |||
560 | pmu->global_ovf_ctrl = 0; | 560 | pmu->global_ovf_ctrl = 0; |
561 | } | 561 | } |
562 | 562 | ||
563 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu) | 563 | void kvm_pmu_init(struct kvm_vcpu *vcpu) |
564 | { | ||
565 | kvm_pmu_reset(vcpu); | ||
566 | } | ||
567 | |||
568 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) | ||
569 | { | 564 | { |
565 | int i; | ||
570 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | 566 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
571 | u64 bitmask; | ||
572 | int bit; | ||
573 | |||
574 | bitmask = pmu->reprogram_pmi; | ||
575 | |||
576 | for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { | ||
577 | struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit); | ||
578 | 567 | ||
579 | if (unlikely(!pmc || !pmc->perf_event)) { | 568 | memset(pmu, 0, sizeof(*pmu)); |
580 | clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); | 569 | for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { |
581 | continue; | 570 | pmu->gp_counters[i].type = KVM_PMC_GP; |
582 | } | 571 | pmu->gp_counters[i].vcpu = vcpu; |
583 | 572 | pmu->gp_counters[i].idx = i; | |
584 | reprogram_counter(pmu, bit); | ||
585 | } | 573 | } |
574 | for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { | ||
575 | pmu->fixed_counters[i].type = KVM_PMC_FIXED; | ||
576 | pmu->fixed_counters[i].vcpu = vcpu; | ||
577 | pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; | ||
578 | } | ||
579 | init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); | ||
580 | kvm_pmu_refresh(vcpu); | ||
581 | } | ||
582 | |||
583 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu) | ||
584 | { | ||
585 | kvm_pmu_reset(vcpu); | ||
586 | } | 586 | } |