aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/sys_regs.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/sys_regs.c')
-rw-r--r--arch/arm64/kvm/sys_regs.c99
1 files changed, 87 insertions, 12 deletions
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 02e9d09e1d80..03244582bc55 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -27,6 +27,7 @@
27#include <asm/kvm_host.h> 27#include <asm/kvm_host.h>
28#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
29#include <asm/kvm_coproc.h> 29#include <asm/kvm_coproc.h>
30#include <asm/kvm_mmu.h>
30#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
31#include <asm/cputype.h> 32#include <asm/cputype.h>
32#include <trace/events/kvm.h> 33#include <trace/events/kvm.h>
@@ -121,6 +122,48 @@ done:
121} 122}
122 123
123/* 124/*
125 * Generic accessor for VM registers. Only called as long as HCR_TVM
126 * is set.
127 */
128static bool access_vm_reg(struct kvm_vcpu *vcpu,
129 const struct sys_reg_params *p,
130 const struct sys_reg_desc *r)
131{
132 unsigned long val;
133
134 BUG_ON(!p->is_write);
135
136 val = *vcpu_reg(vcpu, p->Rt);
137 if (!p->is_aarch32) {
138 vcpu_sys_reg(vcpu, r->reg) = val;
139 } else {
140 vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL;
141 if (!p->is_32bit)
142 vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
143 }
144 return true;
145}
146
147/*
148 * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
149 * guest enables the MMU, we stop trapping the VM sys_regs and leave
150 * it in complete control of the caches.
151 */
152static bool access_sctlr(struct kvm_vcpu *vcpu,
153 const struct sys_reg_params *p,
154 const struct sys_reg_desc *r)
155{
156 access_vm_reg(vcpu, p, r);
157
158 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
159 vcpu->arch.hcr_el2 &= ~HCR_TVM;
160 stage2_flush_vm(vcpu->kvm);
161 }
162
163 return true;
164}
165
166/*
124 * We could trap ID_DFR0 and tell the guest we don't support performance 167 * We could trap ID_DFR0 and tell the guest we don't support performance
125 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 168 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
126 * NAKed, so it will read the PMCR anyway. 169 * NAKed, so it will read the PMCR anyway.
@@ -185,32 +228,32 @@ static const struct sys_reg_desc sys_reg_descs[] = {
185 NULL, reset_mpidr, MPIDR_EL1 }, 228 NULL, reset_mpidr, MPIDR_EL1 },
186 /* SCTLR_EL1 */ 229 /* SCTLR_EL1 */
187 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 230 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
188 NULL, reset_val, SCTLR_EL1, 0x00C50078 }, 231 access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
189 /* CPACR_EL1 */ 232 /* CPACR_EL1 */
190 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), 233 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
191 NULL, reset_val, CPACR_EL1, 0 }, 234 NULL, reset_val, CPACR_EL1, 0 },
192 /* TTBR0_EL1 */ 235 /* TTBR0_EL1 */
193 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), 236 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
194 NULL, reset_unknown, TTBR0_EL1 }, 237 access_vm_reg, reset_unknown, TTBR0_EL1 },
195 /* TTBR1_EL1 */ 238 /* TTBR1_EL1 */
196 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), 239 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
197 NULL, reset_unknown, TTBR1_EL1 }, 240 access_vm_reg, reset_unknown, TTBR1_EL1 },
198 /* TCR_EL1 */ 241 /* TCR_EL1 */
199 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), 242 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
200 NULL, reset_val, TCR_EL1, 0 }, 243 access_vm_reg, reset_val, TCR_EL1, 0 },
201 244
202 /* AFSR0_EL1 */ 245 /* AFSR0_EL1 */
203 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), 246 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
204 NULL, reset_unknown, AFSR0_EL1 }, 247 access_vm_reg, reset_unknown, AFSR0_EL1 },
205 /* AFSR1_EL1 */ 248 /* AFSR1_EL1 */
206 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), 249 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
207 NULL, reset_unknown, AFSR1_EL1 }, 250 access_vm_reg, reset_unknown, AFSR1_EL1 },
208 /* ESR_EL1 */ 251 /* ESR_EL1 */
209 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), 252 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
210 NULL, reset_unknown, ESR_EL1 }, 253 access_vm_reg, reset_unknown, ESR_EL1 },
211 /* FAR_EL1 */ 254 /* FAR_EL1 */
212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 255 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
213 NULL, reset_unknown, FAR_EL1 }, 256 access_vm_reg, reset_unknown, FAR_EL1 },
214 /* PAR_EL1 */ 257 /* PAR_EL1 */
215 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), 258 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
216 NULL, reset_unknown, PAR_EL1 }, 259 NULL, reset_unknown, PAR_EL1 },
@@ -224,17 +267,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
224 267
225 /* MAIR_EL1 */ 268 /* MAIR_EL1 */
226 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), 269 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
227 NULL, reset_unknown, MAIR_EL1 }, 270 access_vm_reg, reset_unknown, MAIR_EL1 },
228 /* AMAIR_EL1 */ 271 /* AMAIR_EL1 */
229 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), 272 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
230 NULL, reset_amair_el1, AMAIR_EL1 }, 273 access_vm_reg, reset_amair_el1, AMAIR_EL1 },
231 274
232 /* VBAR_EL1 */ 275 /* VBAR_EL1 */
233 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), 276 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
234 NULL, reset_val, VBAR_EL1, 0 }, 277 NULL, reset_val, VBAR_EL1, 0 },
235 /* CONTEXTIDR_EL1 */ 278 /* CONTEXTIDR_EL1 */
236 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 279 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
237 NULL, reset_val, CONTEXTIDR_EL1, 0 }, 280 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
238 /* TPIDR_EL1 */ 281 /* TPIDR_EL1 */
239 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), 282 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
240 NULL, reset_unknown, TPIDR_EL1 }, 283 NULL, reset_unknown, TPIDR_EL1 },
@@ -305,14 +348,32 @@ static const struct sys_reg_desc sys_reg_descs[] = {
305 NULL, reset_val, FPEXC32_EL2, 0x70 }, 348 NULL, reset_val, FPEXC32_EL2, 0x70 },
306}; 349};
307 350
308/* Trapped cp15 registers */ 351/*
352 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
353 * depending on the way they are accessed (as a 32bit or a 64bit
354 * register).
355 */
309static const struct sys_reg_desc cp15_regs[] = { 356static const struct sys_reg_desc cp15_regs[] = {
357 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
358 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
359 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
360 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
361 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
362 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
363 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
364 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
365 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
366 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
367 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
368 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
369
310 /* 370 /*
311 * DC{C,I,CI}SW operations: 371 * DC{C,I,CI}SW operations:
312 */ 372 */
313 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 373 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
314 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 374 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
315 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 375 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
376
316 { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, 377 { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
317 { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, 378 { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
318 { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, 379 { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
@@ -326,6 +387,14 @@ static const struct sys_reg_desc cp15_regs[] = {
326 { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, 387 { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
327 { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, 388 { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
328 { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, 389 { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
390
391 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
392 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
393 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
394 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
395 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
396
397 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
329}; 398};
330 399
331/* Target specific emulation tables */ 400/* Target specific emulation tables */
@@ -437,6 +506,8 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
437 u32 hsr = kvm_vcpu_get_hsr(vcpu); 506 u32 hsr = kvm_vcpu_get_hsr(vcpu);
438 int Rt2 = (hsr >> 10) & 0xf; 507 int Rt2 = (hsr >> 10) & 0xf;
439 508
509 params.is_aarch32 = true;
510 params.is_32bit = false;
440 params.CRm = (hsr >> 1) & 0xf; 511 params.CRm = (hsr >> 1) & 0xf;
441 params.Rt = (hsr >> 5) & 0xf; 512 params.Rt = (hsr >> 5) & 0xf;
442 params.is_write = ((hsr & 1) == 0); 513 params.is_write = ((hsr & 1) == 0);
@@ -480,6 +551,8 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
480 struct sys_reg_params params; 551 struct sys_reg_params params;
481 u32 hsr = kvm_vcpu_get_hsr(vcpu); 552 u32 hsr = kvm_vcpu_get_hsr(vcpu);
482 553
554 params.is_aarch32 = true;
555 params.is_32bit = true;
483 params.CRm = (hsr >> 1) & 0xf; 556 params.CRm = (hsr >> 1) & 0xf;
484 params.Rt = (hsr >> 5) & 0xf; 557 params.Rt = (hsr >> 5) & 0xf;
485 params.is_write = ((hsr & 1) == 0); 558 params.is_write = ((hsr & 1) == 0);
@@ -549,6 +622,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
549 struct sys_reg_params params; 622 struct sys_reg_params params;
550 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 623 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
551 624
625 params.is_aarch32 = false;
626 params.is_32bit = false;
552 params.Op0 = (esr >> 20) & 3; 627 params.Op0 = (esr >> 20) & 3;
553 params.Op1 = (esr >> 14) & 0x7; 628 params.Op1 = (esr >> 14) & 0x7;
554 params.CRn = (esr >> 10) & 0xf; 629 params.CRn = (esr >> 10) & 0xf;