summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorAndrew Murray <andrew.murray@arm.com>2019-04-09 15:22:16 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2019-04-24 10:46:26 -0400
commit39e3406a090a54e700a7c0820c8258af1196b0c2 (patch)
treedfb938c6a6cd46c7572150d8b94e9b48326b8d5f /arch/arm64/kvm
parent435e53fb5e21ad1820c5c69f208304c0e5623d01 (diff)
arm64: KVM: Avoid isb's by using direct pmxevtyper sysreg
Upon entering or exiting a guest we may modify multiple PMU counters to enable of disable EL0 filtering. We presently do this via the indirect PMXEVTYPER_EL0 system register (where the counter we modify is selected by PMSELR). With this approach it is necessary to order the writes via isb instructions such that we select the correct counter before modifying it. Let's avoid potentially expensive instruction barriers by using the direct PMEVTYPER<n>_EL0 registers instead. As the change to counter type relates only to EL0 filtering we can rely on the implicit instruction barrier which occurs when we transition from EL2 to EL1 on entering the guest. On returning to userspace we can, at the latest, rely on the implicit barrier between EL2 and EL0. We can also depend on the explicit isb in armv8pmu_select_counter to order our write against any other kernel changes by the PMU driver to the type register as a result of preemption. Signed-off-by: Andrew Murray <andrew.murray@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/pmu.c84
1 files changed, 74 insertions, 10 deletions
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 3f99a095a1ff..cd49db845ef4 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -91,6 +91,74 @@ void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
91 write_sysreg(pmu->events_host, pmcntenset_el0); 91 write_sysreg(pmu->events_host, pmcntenset_el0);
92} 92}
93 93
94#define PMEVTYPER_READ_CASE(idx) \
95 case idx: \
96 return read_sysreg(pmevtyper##idx##_el0)
97
98#define PMEVTYPER_WRITE_CASE(idx) \
99 case idx: \
100 write_sysreg(val, pmevtyper##idx##_el0); \
101 break
102
103#define PMEVTYPER_CASES(readwrite) \
104 PMEVTYPER_##readwrite##_CASE(0); \
105 PMEVTYPER_##readwrite##_CASE(1); \
106 PMEVTYPER_##readwrite##_CASE(2); \
107 PMEVTYPER_##readwrite##_CASE(3); \
108 PMEVTYPER_##readwrite##_CASE(4); \
109 PMEVTYPER_##readwrite##_CASE(5); \
110 PMEVTYPER_##readwrite##_CASE(6); \
111 PMEVTYPER_##readwrite##_CASE(7); \
112 PMEVTYPER_##readwrite##_CASE(8); \
113 PMEVTYPER_##readwrite##_CASE(9); \
114 PMEVTYPER_##readwrite##_CASE(10); \
115 PMEVTYPER_##readwrite##_CASE(11); \
116 PMEVTYPER_##readwrite##_CASE(12); \
117 PMEVTYPER_##readwrite##_CASE(13); \
118 PMEVTYPER_##readwrite##_CASE(14); \
119 PMEVTYPER_##readwrite##_CASE(15); \
120 PMEVTYPER_##readwrite##_CASE(16); \
121 PMEVTYPER_##readwrite##_CASE(17); \
122 PMEVTYPER_##readwrite##_CASE(18); \
123 PMEVTYPER_##readwrite##_CASE(19); \
124 PMEVTYPER_##readwrite##_CASE(20); \
125 PMEVTYPER_##readwrite##_CASE(21); \
126 PMEVTYPER_##readwrite##_CASE(22); \
127 PMEVTYPER_##readwrite##_CASE(23); \
128 PMEVTYPER_##readwrite##_CASE(24); \
129 PMEVTYPER_##readwrite##_CASE(25); \
130 PMEVTYPER_##readwrite##_CASE(26); \
131 PMEVTYPER_##readwrite##_CASE(27); \
132 PMEVTYPER_##readwrite##_CASE(28); \
133 PMEVTYPER_##readwrite##_CASE(29); \
134 PMEVTYPER_##readwrite##_CASE(30)
135
136/*
137 * Read a value direct from PMEVTYPER<idx>
138 */
139static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
140{
141 switch (idx) {
142 PMEVTYPER_CASES(READ);
143 default:
144 WARN_ON(1);
145 }
146
147 return 0;
148}
149
150/*
151 * Write a value direct to PMEVTYPER<idx>
152 */
153static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
154{
155 switch (idx) {
156 PMEVTYPER_CASES(WRITE);
157 default:
158 WARN_ON(1);
159 }
160}
161
94/* 162/*
95 * Modify ARMv8 PMU events to include EL0 counting 163 * Modify ARMv8 PMU events to include EL0 counting
96 */ 164 */
@@ -100,11 +168,9 @@ static void kvm_vcpu_pmu_enable_el0(unsigned long events)
100 u32 counter; 168 u32 counter;
101 169
102 for_each_set_bit(counter, &events, 32) { 170 for_each_set_bit(counter, &events, 32) {
103 write_sysreg(counter, pmselr_el0); 171 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
104 isb(); 172 typer &= ~ARMV8_PMU_EXCLUDE_EL0;
105 typer = read_sysreg(pmxevtyper_el0) & ~ARMV8_PMU_EXCLUDE_EL0; 173 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
106 write_sysreg(typer, pmxevtyper_el0);
107 isb();
108 } 174 }
109} 175}
110 176
@@ -117,11 +183,9 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
117 u32 counter; 183 u32 counter;
118 184
119 for_each_set_bit(counter, &events, 32) { 185 for_each_set_bit(counter, &events, 32) {
120 write_sysreg(counter, pmselr_el0); 186 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
121 isb(); 187 typer |= ARMV8_PMU_EXCLUDE_EL0;
122 typer = read_sysreg(pmxevtyper_el0) | ARMV8_PMU_EXCLUDE_EL0; 188 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
123 write_sysreg(typer, pmxevtyper_el0);
124 isb();
125 } 189 }
126} 190}
127 191