summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorAndrew Murray <andrew.murray@arm.com>2019-06-17 15:01:05 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2019-07-05 08:56:22 -0400
commit80f393a23be68e2f8a0f74258d6155438c200bbd (patch)
treec07e0e47ad5c14a8cbfef8cfe9a0763faf153254 /virt
parent218907cbc2b82419c70180610163c987d4764b27 (diff)
KVM: arm/arm64: Support chained PMU counters
ARMv8 provides support for chained PMU counters, where an event type of 0x001E is set for odd-numbered counters, the event counter will increment by one for each overflow of the preceding even-numbered counter. Let's emulate this in KVM by creating a 64 bit perf counter when a user chains two emulated counters together. For chained events we only support generating an overflow interrupt on the high counter. We use the attributes of the low counter to determine the attributes of the perf event. Suggested-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Andrew Murray <andrew.murray@arm.com> Reviewed-by: Julien Thierry <julien.thierry@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/pmu.c252
1 files changed, 215 insertions, 37 deletions
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 24c6cf869a16..3dd8238ed246 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -15,6 +15,8 @@
15 15
16static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx); 16static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
17 17
18#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
19
18/** 20/**
19 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter 21 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
20 * @vcpu: The vcpu pointer 22 * @vcpu: The vcpu pointer
@@ -26,29 +28,126 @@ static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
26 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); 28 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
27} 29}
28 30
31static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
32{
33 struct kvm_pmu *pmu;
34 struct kvm_vcpu_arch *vcpu_arch;
35
36 pmc -= pmc->idx;
37 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
38 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
39 return container_of(vcpu_arch, struct kvm_vcpu, arch);
40}
41
29/** 42/**
30 * kvm_pmu_get_counter_value - get PMU counter value 43 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
44 * @pmc: The PMU counter pointer
45 */
46static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
47{
48 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
49
50 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
51}
52
53/**
54 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
55 * @select_idx: The counter index
56 */
57static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
58{
59 return select_idx & 0x1;
60}
61
62/**
63 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
64 * @pmc: The PMU counter pointer
65 *
66 * When a pair of PMCs are chained together we use the low counter (canonical)
67 * to hold the underlying perf event.
68 */
69static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
70{
71 if (kvm_pmu_pmc_is_chained(pmc) &&
72 kvm_pmu_idx_is_high_counter(pmc->idx))
73 return pmc - 1;
74
75 return pmc;
76}
77
78/**
79 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
31 * @vcpu: The vcpu pointer 80 * @vcpu: The vcpu pointer
32 * @select_idx: The counter index 81 * @select_idx: The counter index
33 */ 82 */
34u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) 83static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
35{ 84{
36 u64 counter, reg, enabled, running; 85 u64 eventsel, reg;
37 struct kvm_pmu *pmu = &vcpu->arch.pmu;
38 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
39 86
40 reg = (select_idx == ARMV8_PMU_CYCLE_IDX) 87 select_idx |= 0x1;
41 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; 88
42 counter = __vcpu_sys_reg(vcpu, reg); 89 if (select_idx == ARMV8_PMU_CYCLE_IDX)
90 return false;
91
92 reg = PMEVTYPER0_EL0 + select_idx;
93 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
94
95 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
96}
97
98/**
99 * kvm_pmu_get_pair_counter_value - get PMU counter value
100 * @vcpu: The vcpu pointer
101 * @pmc: The PMU counter pointer
102 */
103static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
104 struct kvm_pmc *pmc)
105{
106 u64 counter, counter_high, reg, enabled, running;
43 107
44 /* The real counter value is equal to the value of counter register plus 108 if (kvm_pmu_pmc_is_chained(pmc)) {
109 pmc = kvm_pmu_get_canonical_pmc(pmc);
110 reg = PMEVCNTR0_EL0 + pmc->idx;
111
112 counter = __vcpu_sys_reg(vcpu, reg);
113 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
114
115 counter = lower_32_bits(counter) | (counter_high << 32);
116 } else {
117 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
118 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
119 counter = __vcpu_sys_reg(vcpu, reg);
120 }
121
122 /*
123 * The real counter value is equal to the value of counter register plus
45 * the value perf event counts. 124 * the value perf event counts.
46 */ 125 */
47 if (pmc->perf_event) 126 if (pmc->perf_event)
48 counter += perf_event_read_value(pmc->perf_event, &enabled, 127 counter += perf_event_read_value(pmc->perf_event, &enabled,
49 &running); 128 &running);
50 129
51 if (!kvm_pmu_idx_is_64bit(vcpu, select_idx)) 130 return counter;
131}
132
133/**
134 * kvm_pmu_get_counter_value - get PMU counter value
135 * @vcpu: The vcpu pointer
136 * @select_idx: The counter index
137 */
138u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
139{
140 u64 counter;
141 struct kvm_pmu *pmu = &vcpu->arch.pmu;
142 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
143
144 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
145
146 if (kvm_pmu_pmc_is_chained(pmc) &&
147 kvm_pmu_idx_is_high_counter(select_idx))
148 counter = upper_32_bits(counter);
149
150 else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
52 counter = lower_32_bits(counter); 151 counter = lower_32_bits(counter);
53 152
54 return counter; 153 return counter;
@@ -78,6 +177,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
78 */ 177 */
79static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) 178static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
80{ 179{
180 pmc = kvm_pmu_get_canonical_pmc(pmc);
81 if (pmc->perf_event) { 181 if (pmc->perf_event) {
82 perf_event_disable(pmc->perf_event); 182 perf_event_disable(pmc->perf_event);
83 perf_event_release_kernel(pmc->perf_event); 183 perf_event_release_kernel(pmc->perf_event);
@@ -95,13 +195,23 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
95{ 195{
96 u64 counter, reg; 196 u64 counter, reg;
97 197
98 if (pmc->perf_event) { 198 pmc = kvm_pmu_get_canonical_pmc(pmc);
99 counter = kvm_pmu_get_counter_value(vcpu, pmc->idx); 199 if (!pmc->perf_event)
200 return;
201
202 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
203
204 if (kvm_pmu_pmc_is_chained(pmc)) {
205 reg = PMEVCNTR0_EL0 + pmc->idx;
206 __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
207 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
208 } else {
100 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) 209 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
101 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; 210 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
102 __vcpu_sys_reg(vcpu, reg) = counter; 211 __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
103 kvm_pmu_release_perf_event(pmc);
104 } 212 }
213
214 kvm_pmu_release_perf_event(pmc);
105} 215}
106 216
107/** 217/**
@@ -118,6 +228,8 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
118 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); 228 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
119 pmu->pmc[i].idx = i; 229 pmu->pmc[i].idx = i;
120 } 230 }
231
232 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
121} 233}
122 234
123/** 235/**
@@ -166,6 +278,18 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
166 continue; 278 continue;
167 279
168 pmc = &pmu->pmc[i]; 280 pmc = &pmu->pmc[i];
281
282 /*
283 * For high counters of chained events we must recreate the
284 * perf event with the long (64bit) attribute set.
285 */
286 if (kvm_pmu_pmc_is_chained(pmc) &&
287 kvm_pmu_idx_is_high_counter(i)) {
288 kvm_pmu_create_perf_event(vcpu, i);
289 continue;
290 }
291
292 /* At this point, pmc must be the canonical */
169 if (pmc->perf_event) { 293 if (pmc->perf_event) {
170 perf_event_enable(pmc->perf_event); 294 perf_event_enable(pmc->perf_event);
171 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) 295 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
@@ -195,6 +319,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
195 continue; 319 continue;
196 320
197 pmc = &pmu->pmc[i]; 321 pmc = &pmu->pmc[i];
322
323 /*
324 * For high counters of chained events we must recreate the
325 * perf event with the long (64bit) attribute unset.
326 */
327 if (kvm_pmu_pmc_is_chained(pmc) &&
328 kvm_pmu_idx_is_high_counter(i)) {
329 kvm_pmu_create_perf_event(vcpu, i);
330 continue;
331 }
332
333 /* At this point, pmc must be the canonical */
198 if (pmc->perf_event) 334 if (pmc->perf_event)
199 perf_event_disable(pmc->perf_event); 335 perf_event_disable(pmc->perf_event);
200 } 336 }
@@ -284,17 +420,6 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
284 kvm_pmu_update_state(vcpu); 420 kvm_pmu_update_state(vcpu);
285} 421}
286 422
287static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
288{
289 struct kvm_pmu *pmu;
290 struct kvm_vcpu_arch *vcpu_arch;
291
292 pmc -= pmc->idx;
293 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
294 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
295 return container_of(vcpu_arch, struct kvm_vcpu, arch);
296}
297
298/** 423/**
299 * When the perf event overflows, set the overflow status and inform the vcpu. 424 * When the perf event overflows, set the overflow status and inform the vcpu.
300 */ 425 */
@@ -385,13 +510,20 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
385static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) 510static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
386{ 511{
387 struct kvm_pmu *pmu = &vcpu->arch.pmu; 512 struct kvm_pmu *pmu = &vcpu->arch.pmu;
388 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; 513 struct kvm_pmc *pmc;
389 struct perf_event *event; 514 struct perf_event *event;
390 struct perf_event_attr attr; 515 struct perf_event_attr attr;
391 u64 eventsel, counter, reg, data; 516 u64 eventsel, counter, reg, data;
392 517
393 reg = (select_idx == ARMV8_PMU_CYCLE_IDX) 518 /*
394 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx; 519 * For chained counters the event type and filtering attributes are
520 * obtained from the low/even counter. We also use this counter to
521 * determine if the event is enabled/disabled.
522 */
523 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
524
525 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
526 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
395 data = __vcpu_sys_reg(vcpu, reg); 527 data = __vcpu_sys_reg(vcpu, reg);
396 528
397 kvm_pmu_stop_counter(vcpu, pmc); 529 kvm_pmu_stop_counter(vcpu, pmc);
@@ -399,30 +531,47 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
399 531
400 /* Software increment event does't need to be backed by a perf event */ 532 /* Software increment event does't need to be backed by a perf event */
401 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR && 533 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
402 select_idx != ARMV8_PMU_CYCLE_IDX) 534 pmc->idx != ARMV8_PMU_CYCLE_IDX)
403 return; 535 return;
404 536
405 memset(&attr, 0, sizeof(struct perf_event_attr)); 537 memset(&attr, 0, sizeof(struct perf_event_attr));
406 attr.type = PERF_TYPE_RAW; 538 attr.type = PERF_TYPE_RAW;
407 attr.size = sizeof(attr); 539 attr.size = sizeof(attr);
408 attr.pinned = 1; 540 attr.pinned = 1;
409 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx); 541 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
410 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; 542 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
411 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; 543 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
412 attr.exclude_hv = 1; /* Don't count EL2 events */ 544 attr.exclude_hv = 1; /* Don't count EL2 events */
413 attr.exclude_host = 1; /* Don't count host events */ 545 attr.exclude_host = 1; /* Don't count host events */
414 attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ? 546 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
415 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel; 547 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
416 548
417 counter = kvm_pmu_get_counter_value(vcpu, select_idx); 549 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
418 /* The initial sample period (overflow count) of an event. */ 550
419 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) 551 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
552 /**
553 * The initial sample period (overflow count) of an event. For
554 * chained counters we only support overflow interrupts on the
555 * high counter.
556 */
420 attr.sample_period = (-counter) & GENMASK(63, 0); 557 attr.sample_period = (-counter) & GENMASK(63, 0);
421 else 558 event = perf_event_create_kernel_counter(&attr, -1, current,
422 attr.sample_period = (-counter) & GENMASK(31, 0); 559 kvm_pmu_perf_overflow,
560 pmc + 1);
561
562 if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
563 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
564 } else {
565 /* The initial sample period (overflow count) of an event. */
566 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
567 attr.sample_period = (-counter) & GENMASK(63, 0);
568 else
569 attr.sample_period = (-counter) & GENMASK(31, 0);
423 570
424 event = perf_event_create_kernel_counter(&attr, -1, current, 571 event = perf_event_create_kernel_counter(&attr, -1, current,
425 kvm_pmu_perf_overflow, pmc); 572 kvm_pmu_perf_overflow, pmc);
573 }
574
426 if (IS_ERR(event)) { 575 if (IS_ERR(event)) {
427 pr_err_once("kvm: pmu event creation failed %ld\n", 576 pr_err_once("kvm: pmu event creation failed %ld\n",
428 PTR_ERR(event)); 577 PTR_ERR(event));
@@ -433,6 +582,33 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
433} 582}
434 583
435/** 584/**
585 * kvm_pmu_update_pmc_chained - update chained bitmap
586 * @vcpu: The vcpu pointer
587 * @select_idx: The number of selected counter
588 *
589 * Update the chained bitmap based on the event type written in the
590 * typer register.
591 */
592static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
593{
594 struct kvm_pmu *pmu = &vcpu->arch.pmu;
595 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
596
597 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
598 /*
599 * During promotion from !chained to chained we must ensure
600 * the adjacent counter is stopped and its event destroyed
601 */
602 if (!kvm_pmu_pmc_is_chained(pmc))
603 kvm_pmu_stop_counter(vcpu, pmc);
604
605 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
606 } else {
607 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
608 }
609}
610
611/**
436 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event 612 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
437 * @vcpu: The vcpu pointer 613 * @vcpu: The vcpu pointer
438 * @data: The data guest writes to PMXEVTYPER_EL0 614 * @data: The data guest writes to PMXEVTYPER_EL0
@@ -451,6 +627,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
451 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx; 627 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
452 628
453 __vcpu_sys_reg(vcpu, reg) = event_type; 629 __vcpu_sys_reg(vcpu, reg) = event_type;
630
631 kvm_pmu_update_pmc_chained(vcpu, select_idx);
454 kvm_pmu_create_perf_event(vcpu, select_idx); 632 kvm_pmu_create_perf_event(vcpu, select_idx);
455} 633}
456 634