aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h5
-rw-r--r--arch/arm64/kvm/handle_exit.c7
-rw-r--r--arch/arm64/kvm/sys_regs.c181
3 files changed, 186 insertions, 7 deletions
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
index 9b4477acb554..9a59301cd014 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -32,11 +32,16 @@ struct kvm_sys_reg_table {
32 32
33struct kvm_sys_reg_target_table { 33struct kvm_sys_reg_target_table {
34 struct kvm_sys_reg_table table64; 34 struct kvm_sys_reg_table table64;
35 struct kvm_sys_reg_table table32;
35}; 36};
36 37
37void kvm_register_target_sys_reg_table(unsigned int target, 38void kvm_register_target_sys_reg_table(unsigned int target,
38 struct kvm_sys_reg_target_table *table); 39 struct kvm_sys_reg_target_table *table);
39 40
41int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
42int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
43int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
44int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
40int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); 45int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
41 46
42#define kvm_coproc_table_init kvm_sys_reg_table_init 47#define kvm_coproc_table_init kvm_sys_reg_table_init
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 4766b7f3515e..9beaca033437 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -62,6 +62,13 @@ static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
62 62
63static exit_handle_fn arm_exit_handlers[] = { 63static exit_handle_fn arm_exit_handlers[] = {
64 [ESR_EL2_EC_WFI] = kvm_handle_wfi, 64 [ESR_EL2_EC_WFI] = kvm_handle_wfi,
65 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
66 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
67 [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
68 [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
69 [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access,
70 [ESR_EL2_EC_HVC32] = handle_hvc,
71 [ESR_EL2_EC_SMC32] = handle_smc,
65 [ESR_EL2_EC_HVC64] = handle_hvc, 72 [ESR_EL2_EC_HVC64] = handle_hvc,
66 [ESR_EL2_EC_SMC64] = handle_smc, 73 [ESR_EL2_EC_SMC64] = handle_smc,
67 [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg, 74 [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 52fff0ae3442..94923609753b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -38,6 +38,10 @@
38 * types are different. My gut feeling is that it should be pretty 38 * types are different. My gut feeling is that it should be pretty
39 * easy to merge, but that would be an ABI breakage -- again. VFP 39 * easy to merge, but that would be an ABI breakage -- again. VFP
40 * would also need to be abstracted. 40 * would also need to be abstracted.
41 *
42 * For AArch32, we only take care of what is being trapped. Anything
43 * that has to do with init and userspace access has to go via the
44 * 64bit interface.
41 */ 45 */
42 46
43/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 47/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
@@ -166,6 +170,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
166 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), 170 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
167 access_dcsw }, 171 access_dcsw },
168 172
173 /* TEECR32_EL1 */
174 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
175 NULL, reset_val, TEECR32_EL1, 0 },
176 /* TEEHBR32_EL1 */
177 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
178 NULL, reset_val, TEEHBR32_EL1, 0 },
179 /* DBGVCR32_EL2 */
180 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
181 NULL, reset_val, DBGVCR32_EL2, 0 },
182
169 /* MPIDR_EL1 */ 183 /* MPIDR_EL1 */
170 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), 184 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
171 NULL, reset_mpidr, MPIDR_EL1 }, 185 NULL, reset_mpidr, MPIDR_EL1 },
@@ -276,6 +290,39 @@ static const struct sys_reg_desc sys_reg_descs[] = {
276 /* TPIDRRO_EL0 */ 290 /* TPIDRRO_EL0 */
277 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), 291 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
278 NULL, reset_unknown, TPIDRRO_EL0 }, 292 NULL, reset_unknown, TPIDRRO_EL0 },
293
294 /* DACR32_EL2 */
295 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
296 NULL, reset_unknown, DACR32_EL2 },
297 /* IFSR32_EL2 */
298 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
299 NULL, reset_unknown, IFSR32_EL2 },
300 /* FPEXC32_EL2 */
301 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
302 NULL, reset_val, FPEXC32_EL2, 0x70 },
303};
304
305/* Trapped cp15 registers */
306static const struct sys_reg_desc cp15_regs[] = {
307 /*
308 * DC{C,I,CI}SW operations:
309 */
310 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
311 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
312 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
313 { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
314 { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
315 { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
316 { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
317 { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
318 { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
319 { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
320 { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
321 { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
322 { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
323 { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
324 { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
325 { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
279}; 326};
280 327
281/* Target specific emulation tables */ 328/* Target specific emulation tables */
@@ -288,13 +335,20 @@ void kvm_register_target_sys_reg_table(unsigned int target,
288} 335}
289 336
290/* Get specific register table for this target. */ 337/* Get specific register table for this target. */
291static const struct sys_reg_desc *get_target_table(unsigned target, size_t *num) 338static const struct sys_reg_desc *get_target_table(unsigned target,
339 bool mode_is_64,
340 size_t *num)
292{ 341{
293 struct kvm_sys_reg_target_table *table; 342 struct kvm_sys_reg_target_table *table;
294 343
295 table = target_tables[target]; 344 table = target_tables[target];
296 *num = table->table64.num; 345 if (mode_is_64) {
297 return table->table64.table; 346 *num = table->table64.num;
347 return table->table64.table;
348 } else {
349 *num = table->table32.num;
350 return table->table32.table;
351 }
298} 352}
299 353
300static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, 354static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
@@ -322,13 +376,126 @@ static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
322 return NULL; 376 return NULL;
323} 377}
324 378
379int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
380{
381 kvm_inject_undefined(vcpu);
382 return 1;
383}
384
385int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
386{
387 kvm_inject_undefined(vcpu);
388 return 1;
389}
390
391static void emulate_cp15(struct kvm_vcpu *vcpu,
392 const struct sys_reg_params *params)
393{
394 size_t num;
395 const struct sys_reg_desc *table, *r;
396
397 table = get_target_table(vcpu->arch.target, false, &num);
398
399 /* Search target-specific then generic table. */
400 r = find_reg(params, table, num);
401 if (!r)
402 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
403
404 if (likely(r)) {
405 /*
406 * Not having an accessor means that we have
407 * configured a trap that we don't know how to
408 * handle. This certainly qualifies as a gross bug
409 * that should be fixed right away.
410 */
411 BUG_ON(!r->access);
412
413 if (likely(r->access(vcpu, params, r))) {
414 /* Skip instruction, since it was emulated */
415 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
416 return;
417 }
418 /* If access function fails, it should complain. */
419 }
420
421 kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
422 print_sys_reg_instr(params);
423 kvm_inject_undefined(vcpu);
424}
425
426/**
427 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
428 * @vcpu: The VCPU pointer
429 * @run: The kvm_run struct
430 */
431int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
432{
433 struct sys_reg_params params;
434 u32 hsr = kvm_vcpu_get_hsr(vcpu);
435 int Rt2 = (hsr >> 10) & 0xf;
436
437 params.CRm = (hsr >> 1) & 0xf;
438 params.Rt = (hsr >> 5) & 0xf;
439 params.is_write = ((hsr & 1) == 0);
440
441 params.Op0 = 0;
442 params.Op1 = (hsr >> 16) & 0xf;
443 params.Op2 = 0;
444 params.CRn = 0;
445
446 /*
447 * Massive hack here. Store Rt2 in the top 32bits so we only
448 * have one register to deal with. As we use the same trap
449 * backends between AArch32 and AArch64, we get away with it.
450 */
451 if (params.is_write) {
452 u64 val = *vcpu_reg(vcpu, params.Rt);
453 val &= 0xffffffff;
454 val |= *vcpu_reg(vcpu, Rt2) << 32;
455 *vcpu_reg(vcpu, params.Rt) = val;
456 }
457
458 emulate_cp15(vcpu, &params);
459
460 /* Do the opposite hack for the read side */
461 if (!params.is_write) {
462 u64 val = *vcpu_reg(vcpu, params.Rt);
463 val >>= 32;
464 *vcpu_reg(vcpu, Rt2) = val;
465 }
466
467 return 1;
468}
469
470/**
471 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
472 * @vcpu: The VCPU pointer
473 * @run: The kvm_run struct
474 */
475int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
476{
477 struct sys_reg_params params;
478 u32 hsr = kvm_vcpu_get_hsr(vcpu);
479
480 params.CRm = (hsr >> 1) & 0xf;
481 params.Rt = (hsr >> 5) & 0xf;
482 params.is_write = ((hsr & 1) == 0);
483 params.CRn = (hsr >> 10) & 0xf;
484 params.Op0 = 0;
485 params.Op1 = (hsr >> 14) & 0x7;
486 params.Op2 = (hsr >> 17) & 0x7;
487
488 emulate_cp15(vcpu, &params);
489 return 1;
490}
491
325static int emulate_sys_reg(struct kvm_vcpu *vcpu, 492static int emulate_sys_reg(struct kvm_vcpu *vcpu,
326 const struct sys_reg_params *params) 493 const struct sys_reg_params *params)
327{ 494{
328 size_t num; 495 size_t num;
329 const struct sys_reg_desc *table, *r; 496 const struct sys_reg_desc *table, *r;
330 497
331 table = get_target_table(vcpu->arch.target, &num); 498 table = get_target_table(vcpu->arch.target, true, &num);
332 499
333 /* Search target-specific then generic table. */ 500 /* Search target-specific then generic table. */
334 r = find_reg(params, table, num); 501 r = find_reg(params, table, num);
@@ -438,7 +605,7 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
438 if (!index_to_params(id, &params)) 605 if (!index_to_params(id, &params))
439 return NULL; 606 return NULL;
440 607
441 table = get_target_table(vcpu->arch.target, &num); 608 table = get_target_table(vcpu->arch.target, true, &num);
442 r = find_reg(&params, table, num); 609 r = find_reg(&params, table, num);
443 if (!r) 610 if (!r)
444 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 611 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
@@ -762,7 +929,7 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
762 size_t num; 929 size_t num;
763 930
764 /* We check for duplicates here, to allow arch-specific overrides. */ 931 /* We check for duplicates here, to allow arch-specific overrides. */
765 i1 = get_target_table(vcpu->arch.target, &num); 932 i1 = get_target_table(vcpu->arch.target, true, &num);
766 end1 = i1 + num; 933 end1 = i1 + num;
767 i2 = sys_reg_descs; 934 i2 = sys_reg_descs;
768 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 935 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
@@ -874,7 +1041,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
874 /* Generic chip reset first (so target could override). */ 1041 /* Generic chip reset first (so target could override). */
875 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1042 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
876 1043
877 table = get_target_table(vcpu->arch.target, &num); 1044 table = get_target_table(vcpu->arch.target, true, &num);
878 reset_sys_reg_descs(vcpu, table, num); 1045 reset_sys_reg_descs(vcpu, table, num);
879 1046
880 for (num = 1; num < NR_SYS_REGS; num++) 1047 for (num = 1; num < NR_SYS_REGS; num++)