aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorChristoffer Dall <c.dall@virtualopensystems.com>2013-01-20 18:28:09 -0500
committerChristoffer Dall <c.dall@virtualopensystems.com>2013-01-23 13:29:13 -0500
commit5b3e5e5bf230f56309706dfc05fc0cb173cc83aa (patch)
treeda2ed7c2553526469c802c2a7903b1595b8be3ce /arch/arm
parentf7ed45be3ba524e06a6d933f0517dc7ad2d06703 (diff)
KVM: ARM: Emulation framework and CP15 emulation
Adds a new important function in the main KVM/ARM code called handle_exit() which is called from kvm_arch_vcpu_ioctl_run() on returns from guest execution. This function examines the Hyp-Syndrome-Register (HSR), which contains information telling KVM what caused the exit from the guest. Some of the reasons for an exit are CP15 accesses, which are not allowed from the guest and this commit handles these exits by emulating the intended operation in software and skipping the guest instruction. Minor notes about the coproc register reset: 1) We reserve a value of 0 as an invalid cp15 offset, to catch bugs in our table, at cost of 4 bytes per vcpu. 2) Added comments on the table indicating how we handle each register, for simplicity of understanding. Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_arm.h11
-rw-r--r--arch/arm/include/asm/kvm_coproc.h14
-rw-r--r--arch/arm/include/asm/kvm_emulate.h6
-rw-r--r--arch/arm/include/asm/kvm_host.h4
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arm.c169
-rw-r--r--arch/arm/kvm/coproc.c360
-rw-r--r--arch/arm/kvm/coproc.h153
-rw-r--r--arch/arm/kvm/coproc_a15.c162
-rw-r--r--arch/arm/kvm/emulate.c218
-rw-r--r--arch/arm/kvm/trace.h65
11 files changed, 1160 insertions, 4 deletions
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index c69936b1fc53..9a34c20d41ec 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -70,6 +70,11 @@
70 HCR_SWIO | HCR_TIDCP) 70 HCR_SWIO | HCR_TIDCP)
71#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) 71#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
72 72
73/* System Control Register (SCTLR) bits */
74#define SCTLR_TE (1 << 30)
75#define SCTLR_EE (1 << 25)
76#define SCTLR_V (1 << 13)
77
73/* Hyp System Control Register (HSCTLR) bits */ 78/* Hyp System Control Register (HSCTLR) bits */
74#define HSCTLR_TE (1 << 30) 79#define HSCTLR_TE (1 << 30)
75#define HSCTLR_EE (1 << 25) 80#define HSCTLR_EE (1 << 25)
@@ -171,6 +176,10 @@
171#define HSR_FSC (0x3f) 176#define HSR_FSC (0x3f)
172#define HSR_FSC_TYPE (0x3c) 177#define HSR_FSC_TYPE (0x3c)
173#define HSR_WNR (1 << 6) 178#define HSR_WNR (1 << 6)
179#define HSR_CV_SHIFT (24)
180#define HSR_CV (1U << HSR_CV_SHIFT)
181#define HSR_COND_SHIFT (20)
182#define HSR_COND (0xfU << HSR_COND_SHIFT)
174 183
175#define FSC_FAULT (0x04) 184#define FSC_FAULT (0x04)
176#define FSC_PERM (0x0c) 185#define FSC_PERM (0x0c)
@@ -197,4 +206,6 @@
197#define HSR_EC_DABT (0x24) 206#define HSR_EC_DABT (0x24)
198#define HSR_EC_DABT_HYP (0x25) 207#define HSR_EC_DABT_HYP (0x25)
199 208
209#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
210
200#endif /* __ARM_KVM_ARM_H__ */ 211#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
index b6d023deb426..bd1ace030495 100644
--- a/arch/arm/include/asm/kvm_coproc.h
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -21,4 +21,18 @@
21 21
22void kvm_reset_coprocs(struct kvm_vcpu *vcpu); 22void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
23 23
24struct kvm_coproc_target_table {
25 unsigned target;
26 const struct coproc_reg *table;
27 size_t num;
28};
29void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
30
31int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
32int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
33int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
34int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
35int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
36int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
37void kvm_coproc_table_init(void);
24#endif /* __ARM_KVM_COPROC_H__ */ 38#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 17dad674b90f..01a755b80632 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -25,6 +25,12 @@
25u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 25u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
26u32 *vcpu_spsr(struct kvm_vcpu *vcpu); 26u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
27 27
28int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
29void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
30void kvm_inject_undefined(struct kvm_vcpu *vcpu);
31void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
32void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
33
28static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) 34static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
29{ 35{
30 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; 36 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 7a121089c733..e1d4168d4f19 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -94,6 +94,10 @@ struct kvm_vcpu_arch {
94 * Anything that is not used directly from assembly code goes 94 * Anything that is not used directly from assembly code goes
95 * here. 95 * here.
96 */ 96 */
97 /* dcache set/way operation pending */
98 int last_pcpu;
99 cpumask_t require_dcache_flush;
100
97 /* Interrupt related fields */ 101 /* Interrupt related fields */
98 u32 irq_lines; /* IRQ and FIQ levels */ 102 u32 irq_lines; /* IRQ and FIQ levels */
99 103
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index dfc293f277b3..88edce6c97d4 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -18,4 +18,4 @@ kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
18 18
19obj-y += kvm-arm.o init.o interrupts.o 19obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o guest.o mmu.o emulate.o reset.o 20obj-y += arm.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o 21obj-y += coproc.o coproc_a15.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9e9fa4477884..be06c5de51e3 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -36,11 +36,14 @@
36#include <asm/mman.h> 36#include <asm/mman.h>
37#include <asm/cputype.h> 37#include <asm/cputype.h>
38#include <asm/tlbflush.h> 38#include <asm/tlbflush.h>
39#include <asm/cacheflush.h>
39#include <asm/virt.h> 40#include <asm/virt.h>
40#include <asm/kvm_arm.h> 41#include <asm/kvm_arm.h>
41#include <asm/kvm_asm.h> 42#include <asm/kvm_asm.h>
42#include <asm/kvm_mmu.h> 43#include <asm/kvm_mmu.h>
43#include <asm/kvm_emulate.h> 44#include <asm/kvm_emulate.h>
45#include <asm/kvm_coproc.h>
46#include <asm/opcodes.h>
44 47
45#ifdef REQUIRES_VIRT 48#ifdef REQUIRES_VIRT
46__asm__(".arch_extension virt"); 49__asm__(".arch_extension virt");
@@ -294,6 +297,15 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
294{ 297{
295 vcpu->cpu = cpu; 298 vcpu->cpu = cpu;
296 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); 299 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state);
300
301 /*
302 * Check whether this vcpu requires the cache to be flushed on
303 * this physical CPU. This is a consequence of doing dcache
304 * operations by set/way on this vcpu. We do it here to be in
305 * a non-preemptible section.
306 */
307 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
308 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
297} 309}
298 310
299void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 311void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -319,9 +331,16 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
319 return -EINVAL; 331 return -EINVAL;
320} 332}
321 333
334/**
335 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
336 * @v: The VCPU pointer
337 *
338 * If the guest CPU is not waiting for interrupts or an interrupt line is
339 * asserted, the CPU is by definition runnable.
340 */
322int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 341int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
323{ 342{
324 return 0; 343 return !!v->arch.irq_lines;
325} 344}
326 345
327/* Just ensure a guest exit from a particular CPU */ 346/* Just ensure a guest exit from a particular CPU */
@@ -411,6 +430,110 @@ static void update_vttbr(struct kvm *kvm)
411 spin_unlock(&kvm_vmid_lock); 430 spin_unlock(&kvm_vmid_lock);
412} 431}
413 432
433static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
434{
435 /* SVC called from Hyp mode should never get here */
436 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
437 BUG();
438 return -EINVAL; /* Squash warning */
439}
440
441static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
442{
443 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
444 vcpu->arch.hsr & HSR_HVC_IMM_MASK);
445
446 kvm_inject_undefined(vcpu);
447 return 1;
448}
449
450static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
451{
452 /* We don't support SMC; don't do that. */
453 kvm_debug("smc: at %08x", *vcpu_pc(vcpu));
454 kvm_inject_undefined(vcpu);
455 return 1;
456}
457
458static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
459{
460 /* The hypervisor should never cause aborts */
461 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
462 vcpu->arch.hxfar, vcpu->arch.hsr);
463 return -EFAULT;
464}
465
466static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
467{
468 /* This is either an error in the ws. code or an external abort */
469 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
470 vcpu->arch.hxfar, vcpu->arch.hsr);
471 return -EFAULT;
472}
473
474typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
475static exit_handle_fn arm_exit_handlers[] = {
476 [HSR_EC_WFI] = kvm_handle_wfi,
477 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
478 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
479 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
480 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
481 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
482 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
483 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
484 [HSR_EC_SVC_HYP] = handle_svc_hyp,
485 [HSR_EC_HVC] = handle_hvc,
486 [HSR_EC_SMC] = handle_smc,
487 [HSR_EC_IABT] = kvm_handle_guest_abort,
488 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
489 [HSR_EC_DABT] = kvm_handle_guest_abort,
490 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
491};
492
493/*
494 * A conditional instruction is allowed to trap, even though it
495 * wouldn't be executed. So let's re-implement the hardware, in
496 * software!
497 */
498static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
499{
500 unsigned long cpsr, cond, insn;
501
502 /*
503 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
504 * catch undefined instructions, and then we won't get past
505 * the arm_exit_handlers test anyway.
506 */
507 BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
508
509 /* Top two bits non-zero? Unconditional. */
510 if (vcpu->arch.hsr >> 30)
511 return true;
512
513 cpsr = *vcpu_cpsr(vcpu);
514
515 /* Is condition field valid? */
516 if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
517 cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
518 else {
519 /* This can happen in Thumb mode: examine IT state. */
520 unsigned long it;
521
522 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
523
524 /* it == 0 => unconditional. */
525 if (it == 0)
526 return true;
527
528 /* The cond for this insn works out as the top 4 bits. */
529 cond = (it >> 4);
530 }
531
532 /* Shift makes it look like an ARM-mode instruction */
533 insn = cond << 28;
534 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
535}
536
414/* 537/*
415 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 538 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
416 * proper exit to QEMU. 539 * proper exit to QEMU.
@@ -418,8 +541,46 @@ static void update_vttbr(struct kvm *kvm)
418static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 541static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
419 int exception_index) 542 int exception_index)
420{ 543{
421 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 544 unsigned long hsr_ec;
422 return 0; 545
546 switch (exception_index) {
547 case ARM_EXCEPTION_IRQ:
548 return 1;
549 case ARM_EXCEPTION_UNDEFINED:
550 kvm_err("Undefined exception in Hyp mode at: %#08x\n",
551 vcpu->arch.hyp_pc);
552 BUG();
553 panic("KVM: Hypervisor undefined exception!\n");
554 case ARM_EXCEPTION_DATA_ABORT:
555 case ARM_EXCEPTION_PREF_ABORT:
556 case ARM_EXCEPTION_HVC:
557 hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
558
559 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
560 || !arm_exit_handlers[hsr_ec]) {
561 kvm_err("Unkown exception class: %#08lx, "
562 "hsr: %#08x\n", hsr_ec,
563 (unsigned int)vcpu->arch.hsr);
564 BUG();
565 }
566
567 /*
568 * See ARM ARM B1.14.1: "Hyp traps on instructions
569 * that fail their condition code check"
570 */
571 if (!kvm_condition_valid(vcpu)) {
572 bool is_wide = vcpu->arch.hsr & HSR_IL;
573 kvm_skip_instr(vcpu, is_wide);
574 return 1;
575 }
576
577 return arm_exit_handlers[hsr_ec](vcpu, run);
578 default:
579 kvm_pr_unimpl("Unsupported exception type: %d",
580 exception_index);
581 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
582 return 0;
583 }
423} 584}
424 585
425static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 586static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
@@ -493,6 +654,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
493 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); 654 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
494 655
495 vcpu->mode = OUTSIDE_GUEST_MODE; 656 vcpu->mode = OUTSIDE_GUEST_MODE;
657 vcpu->arch.last_pcpu = smp_processor_id();
496 kvm_guest_exit(); 658 kvm_guest_exit();
497 trace_kvm_exit(*vcpu_pc(vcpu)); 659 trace_kvm_exit(*vcpu_pc(vcpu));
498 /* 660 /*
@@ -801,6 +963,7 @@ int kvm_arch_init(void *opaque)
801 if (err) 963 if (err)
802 goto out_err; 964 goto out_err;
803 965
966 kvm_coproc_table_init();
804 return 0; 967 return 0;
805out_err: 968out_err:
806 return err; 969 return err;
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 0c433558591c..722efe3b1675 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -16,8 +16,368 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */ 18 */
19#include <linux/mm.h>
19#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_host.h>
23#include <asm/kvm_emulate.h>
24#include <asm/kvm_coproc.h>
25#include <asm/cacheflush.h>
26#include <asm/cputype.h>
27#include <trace/events/kvm.h>
20 28
29#include "trace.h"
30#include "coproc.h"
31
32
33/******************************************************************************
34 * Co-processor emulation
35 *****************************************************************************/
36
37int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
38{
39 kvm_inject_undefined(vcpu);
40 return 1;
41}
42
43int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
44{
45 /*
46 * We can get here, if the host has been built without VFPv3 support,
47 * but the guest attempted a floating point operation.
48 */
49 kvm_inject_undefined(vcpu);
50 return 1;
51}
52
53int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
54{
55 kvm_inject_undefined(vcpu);
56 return 1;
57}
58
59int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
60{
61 kvm_inject_undefined(vcpu);
62 return 1;
63}
64
65/* See note at ARM ARM B1.14.4 */
66static bool access_dcsw(struct kvm_vcpu *vcpu,
67 const struct coproc_params *p,
68 const struct coproc_reg *r)
69{
70 u32 val;
71 int cpu;
72
73 cpu = get_cpu();
74
75 if (!p->is_write)
76 return read_from_write_only(vcpu, p);
77
78 cpumask_setall(&vcpu->arch.require_dcache_flush);
79 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
80
81 /* If we were already preempted, take the long way around */
82 if (cpu != vcpu->arch.last_pcpu) {
83 flush_cache_all();
84 goto done;
85 }
86
87 val = *vcpu_reg(vcpu, p->Rt1);
88
89 switch (p->CRm) {
90 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
91 case 14: /* DCCISW */
92 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
93 break;
94
95 case 10: /* DCCSW */
96 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
97 break;
98 }
99
100done:
101 put_cpu();
102
103 return true;
104}
105
106/*
107 * We could trap ID_DFR0 and tell the guest we don't support performance
108 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
109 * NAKed, so it will read the PMCR anyway.
110 *
111 * Therefore we tell the guest we have 0 counters. Unfortunately, we
112 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
113 * all PM registers, which doesn't crash the guest kernel at least.
114 */
115static bool pm_fake(struct kvm_vcpu *vcpu,
116 const struct coproc_params *p,
117 const struct coproc_reg *r)
118{
119 if (p->is_write)
120 return ignore_write(vcpu, p);
121 else
122 return read_zero(vcpu, p);
123}
124
125#define access_pmcr pm_fake
126#define access_pmcntenset pm_fake
127#define access_pmcntenclr pm_fake
128#define access_pmovsr pm_fake
129#define access_pmselr pm_fake
130#define access_pmceid0 pm_fake
131#define access_pmceid1 pm_fake
132#define access_pmccntr pm_fake
133#define access_pmxevtyper pm_fake
134#define access_pmxevcntr pm_fake
135#define access_pmuserenr pm_fake
136#define access_pmintenset pm_fake
137#define access_pmintenclr pm_fake
138
139/* Architected CP15 registers.
140 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
141 */
142static const struct coproc_reg cp15_regs[] = {
143 /* CSSELR: swapped by interrupt.S. */
144 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
145 NULL, reset_unknown, c0_CSSELR },
146
147 /* TTBR0/TTBR1: swapped by interrupt.S. */
148 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
149 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
150
151 /* TTBCR: swapped by interrupt.S. */
152 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
153 NULL, reset_val, c2_TTBCR, 0x00000000 },
154
155 /* DACR: swapped by interrupt.S. */
156 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
157 NULL, reset_unknown, c3_DACR },
158
159 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
160 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
161 NULL, reset_unknown, c5_DFSR },
162 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
163 NULL, reset_unknown, c5_IFSR },
164 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
165 NULL, reset_unknown, c5_ADFSR },
166 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
167 NULL, reset_unknown, c5_AIFSR },
168
169 /* DFAR/IFAR: swapped by interrupt.S. */
170 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
171 NULL, reset_unknown, c6_DFAR },
172 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
173 NULL, reset_unknown, c6_IFAR },
174 /*
175 * DC{C,I,CI}SW operations:
176 */
177 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
178 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
179 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
180 /*
181 * Dummy performance monitor implementation.
182 */
183 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
184 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
185 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
186 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
187 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
188 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
189 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
190 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
191 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
192 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
193 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
194 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
195 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
196
197 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
198 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
199 NULL, reset_unknown, c10_PRRR},
200 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
201 NULL, reset_unknown, c10_NMRR},
202
203 /* VBAR: swapped by interrupt.S. */
204 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
205 NULL, reset_val, c12_VBAR, 0x00000000 },
206
207 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
208 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
209 NULL, reset_val, c13_CID, 0x00000000 },
210 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
211 NULL, reset_unknown, c13_TID_URW },
212 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
213 NULL, reset_unknown, c13_TID_URO },
214 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
215 NULL, reset_unknown, c13_TID_PRIV },
216};
217
218/* Target specific emulation tables */
219static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
220
221void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
222{
223 target_tables[table->target] = table;
224}
225
226/* Get specific register table for this target. */
227static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
228{
229 struct kvm_coproc_target_table *table;
230
231 table = target_tables[target];
232 *num = table->num;
233 return table->table;
234}
235
236static const struct coproc_reg *find_reg(const struct coproc_params *params,
237 const struct coproc_reg table[],
238 unsigned int num)
239{
240 unsigned int i;
241
242 for (i = 0; i < num; i++) {
243 const struct coproc_reg *r = &table[i];
244
245 if (params->is_64bit != r->is_64)
246 continue;
247 if (params->CRn != r->CRn)
248 continue;
249 if (params->CRm != r->CRm)
250 continue;
251 if (params->Op1 != r->Op1)
252 continue;
253 if (params->Op2 != r->Op2)
254 continue;
255
256 return r;
257 }
258 return NULL;
259}
260
261static int emulate_cp15(struct kvm_vcpu *vcpu,
262 const struct coproc_params *params)
263{
264 size_t num;
265 const struct coproc_reg *table, *r;
266
267 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
268 params->CRm, params->Op2, params->is_write);
269
270 table = get_target_table(vcpu->arch.target, &num);
271
272 /* Search target-specific then generic table. */
273 r = find_reg(params, table, num);
274 if (!r)
275 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
276
277 if (likely(r)) {
278 /* If we don't have an accessor, we should never get here! */
279 BUG_ON(!r->access);
280
281 if (likely(r->access(vcpu, params, r))) {
282 /* Skip instruction, since it was emulated */
283 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
284 return 1;
285 }
286 /* If access function fails, it should complain. */
287 } else {
288 kvm_err("Unsupported guest CP15 access at: %08x\n",
289 *vcpu_pc(vcpu));
290 print_cp_instr(params);
291 }
292 kvm_inject_undefined(vcpu);
293 return 1;
294}
295
296/**
297 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
298 * @vcpu: The VCPU pointer
299 * @run: The kvm_run struct
300 */
301int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
302{
303 struct coproc_params params;
304
305 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
306 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
307 params.is_write = ((vcpu->arch.hsr & 1) == 0);
308 params.is_64bit = true;
309
310 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
311 params.Op2 = 0;
312 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
313 params.CRn = 0;
314
315 return emulate_cp15(vcpu, &params);
316}
317
318static void reset_coproc_regs(struct kvm_vcpu *vcpu,
319 const struct coproc_reg *table, size_t num)
320{
321 unsigned long i;
322
323 for (i = 0; i < num; i++)
324 if (table[i].reset)
325 table[i].reset(vcpu, &table[i]);
326}
327
328/**
329 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
330 * @vcpu: The VCPU pointer
331 * @run: The kvm_run struct
332 */
333int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
334{
335 struct coproc_params params;
336
337 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
338 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
339 params.is_write = ((vcpu->arch.hsr & 1) == 0);
340 params.is_64bit = false;
341
342 params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
343 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
344 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
345 params.Rt2 = 0;
346
347 return emulate_cp15(vcpu, &params);
348}
349
350void kvm_coproc_table_init(void)
351{
352 unsigned int i;
353
354 /* Make sure tables are unique and in order. */
355 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
356 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
357}
358
359/**
360 * kvm_reset_coprocs - sets cp15 registers to reset value
361 * @vcpu: The VCPU pointer
362 *
363 * This function finds the right table above and sets the registers on the
364 * virtual CPU struct to their architecturally defined reset values.
365 */
21void kvm_reset_coprocs(struct kvm_vcpu *vcpu) 366void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
22{ 367{
368 size_t num;
369 const struct coproc_reg *table;
370
371 /* Catch someone adding a register without putting in reset entry. */
372 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
373
374 /* Generic chip reset first (so target could override). */
375 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
376
377 table = get_target_table(vcpu->arch.target, &num);
378 reset_coproc_regs(vcpu, table, num);
379
380 for (num = 1; num < NR_CP15_REGS; num++)
381 if (vcpu->arch.cp15[num] == 0x42424242)
382 panic("Didn't reset vcpu->arch.cp15[%zi]", num);
23} 383}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
new file mode 100644
index 000000000000..992adfafa2ff
--- /dev/null
+++ b/arch/arm/kvm/coproc.h
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_COPROC_LOCAL_H__
20#define __ARM_KVM_COPROC_LOCAL_H__
21
22struct coproc_params {
23 unsigned long CRn;
24 unsigned long CRm;
25 unsigned long Op1;
26 unsigned long Op2;
27 unsigned long Rt1;
28 unsigned long Rt2;
29 bool is_64bit;
30 bool is_write;
31};
32
33struct coproc_reg {
34 /* MRC/MCR/MRRC/MCRR instruction which accesses it. */
35 unsigned long CRn;
36 unsigned long CRm;
37 unsigned long Op1;
38 unsigned long Op2;
39
40 bool is_64;
41
42 /* Trapped access from guest, if non-NULL. */
43 bool (*access)(struct kvm_vcpu *,
44 const struct coproc_params *,
45 const struct coproc_reg *);
46
47 /* Initialization for vcpu. */
48 void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
49
50 /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
51 unsigned long reg;
52
53 /* Value (usually reset value) */
54 u64 val;
55};
56
57static inline void print_cp_instr(const struct coproc_params *p)
58{
59 /* Look, we even formatted it for you to paste into the table! */
60 if (p->is_64bit) {
61 kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
62 p->CRm, p->Op1, p->is_write ? "write" : "read");
63 } else {
64 kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
65 " func_%s },\n",
66 p->CRn, p->CRm, p->Op1, p->Op2,
67 p->is_write ? "write" : "read");
68 }
69}
70
71static inline bool ignore_write(struct kvm_vcpu *vcpu,
72 const struct coproc_params *p)
73{
74 return true;
75}
76
77static inline bool read_zero(struct kvm_vcpu *vcpu,
78 const struct coproc_params *p)
79{
80 *vcpu_reg(vcpu, p->Rt1) = 0;
81 return true;
82}
83
84static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
85 const struct coproc_params *params)
86{
87 kvm_debug("CP15 write to read-only register at: %08x\n",
88 *vcpu_pc(vcpu));
89 print_cp_instr(params);
90 return false;
91}
92
93static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
94 const struct coproc_params *params)
95{
96 kvm_debug("CP15 read to write-only register at: %08x\n",
97 *vcpu_pc(vcpu));
98 print_cp_instr(params);
99 return false;
100}
101
102/* Reset functions */
103static inline void reset_unknown(struct kvm_vcpu *vcpu,
104 const struct coproc_reg *r)
105{
106 BUG_ON(!r->reg);
107 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
108 vcpu->arch.cp15[r->reg] = 0xdecafbad;
109}
110
111static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
112{
113 BUG_ON(!r->reg);
114 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
115 vcpu->arch.cp15[r->reg] = r->val;
116}
117
118static inline void reset_unknown64(struct kvm_vcpu *vcpu,
119 const struct coproc_reg *r)
120{
121 BUG_ON(!r->reg);
122 BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
123
124 vcpu->arch.cp15[r->reg] = 0xdecafbad;
125 vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
126}
127
128static inline int cmp_reg(const struct coproc_reg *i1,
129 const struct coproc_reg *i2)
130{
131 BUG_ON(i1 == i2);
132 if (!i1)
133 return 1;
134 else if (!i2)
135 return -1;
136 if (i1->CRn != i2->CRn)
137 return i1->CRn - i2->CRn;
138 if (i1->CRm != i2->CRm)
139 return i1->CRm - i2->CRm;
140 if (i1->Op1 != i2->Op1)
141 return i1->Op1 - i2->Op1;
142 return i1->Op2 - i2->Op2;
143}
144
145
146#define CRn(_x) .CRn = _x
147#define CRm(_x) .CRm = _x
148#define Op1(_x) .Op1 = _x
149#define Op2(_x) .Op2 = _x
150#define is64 .is_64 = true
151#define is32 .is_64 = false
152
153#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
new file mode 100644
index 000000000000..685063a6d0cf
--- /dev/null
+++ b/arch/arm/kvm/coproc_a15.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */
19#include <linux/kvm_host.h>
20#include <asm/cputype.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_host.h>
23#include <asm/kvm_emulate.h>
24#include <asm/kvm_coproc.h>
25#include <linux/init.h>
26
27static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
28{
29 /*
30 * Compute guest MPIDR:
31 * (Even if we present only one VCPU to the guest on an SMP
32 * host we don't set the U bit in the MPIDR, or vice versa, as
33 * revealing the underlying hardware properties is likely to
34 * be the best choice).
35 */
36 vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
37 | (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
38}
39
40#include "coproc.h"
41
42/* A15 TRM 4.3.28: RO WI */
43static bool access_actlr(struct kvm_vcpu *vcpu,
44 const struct coproc_params *p,
45 const struct coproc_reg *r)
46{
47 if (p->is_write)
48 return ignore_write(vcpu, p);
49
50 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
51 return true;
52}
53
54/* A15 TRM 4.3.60: R/O. */
55static bool access_cbar(struct kvm_vcpu *vcpu,
56 const struct coproc_params *p,
57 const struct coproc_reg *r)
58{
59 if (p->is_write)
60 return write_to_read_only(vcpu, p);
61 return read_zero(vcpu, p);
62}
63
64/* A15 TRM 4.3.48: R/O WI. */
65static bool access_l2ctlr(struct kvm_vcpu *vcpu,
66 const struct coproc_params *p,
67 const struct coproc_reg *r)
68{
69 if (p->is_write)
70 return ignore_write(vcpu, p);
71
72 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
73 return true;
74}
75
76static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
77{
78 u32 l2ctlr, ncores;
79
80 asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
81 l2ctlr &= ~(3 << 24);
82 ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
83 l2ctlr |= (ncores & 3) << 24;
84
85 vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
86}
87
88static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
89{
90 u32 actlr;
91
92 /* ACTLR contains SMP bit: make sure you create all cpus first! */
93 asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
94 /* Make the SMP bit consistent with the guest configuration */
95 if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
96 actlr |= 1U << 6;
97 else
98 actlr &= ~(1U << 6);
99
100 vcpu->arch.cp15[c1_ACTLR] = actlr;
101}
102
103/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
104static bool access_l2ectlr(struct kvm_vcpu *vcpu,
105 const struct coproc_params *p,
106 const struct coproc_reg *r)
107{
108 if (p->is_write)
109 return ignore_write(vcpu, p);
110
111 *vcpu_reg(vcpu, p->Rt1) = 0;
112 return true;
113}
114
115/*
116 * A15-specific CP15 registers.
117 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
118 */
119static const struct coproc_reg a15_regs[] = {
120 /* MPIDR: we use VMPIDR for guest access. */
121 { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
122 NULL, reset_mpidr, c0_MPIDR },
123
124 /* SCTLR: swapped by interrupt.S. */
125 { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
126 NULL, reset_val, c1_SCTLR, 0x00C50078 },
127 /* ACTLR: trapped by HCR.TAC bit. */
128 { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
129 access_actlr, reset_actlr, c1_ACTLR },
130 /* CPACR: swapped by interrupt.S. */
131 { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
132 NULL, reset_val, c1_CPACR, 0x00000000 },
133
134 /*
135 * L2CTLR access (guest wants to know #CPUs).
136 */
137 { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
138 access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
139 { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
140
141 /* The Configuration Base Address Register. */
142 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
143};
144
145static struct kvm_coproc_target_table a15_target_table = {
146 .target = KVM_ARM_TARGET_CORTEX_A15,
147 .table = a15_regs,
148 .num = ARRAY_SIZE(a15_regs),
149};
150
151static int __init coproc_a15_init(void)
152{
153 unsigned int i;
154
155 for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
156 BUG_ON(cmp_reg(&a15_regs[i-1],
157 &a15_regs[i]) >= 0);
158
159 kvm_register_target_coproc_table(&a15_target_table);
160 return 0;
161}
162late_initcall(coproc_a15_init);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 3eadc25e95de..d61450ac6665 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -16,7 +16,13 @@
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */ 17 */
18 18
19#include <linux/mm.h>
20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h>
19#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <trace/events/kvm.h>
24
25#include "trace.h"
20 26
21#define VCPU_NR_MODES 6 27#define VCPU_NR_MODES 6
22#define VCPU_REG_OFFSET_USR 0 28#define VCPU_REG_OFFSET_USR 0
@@ -153,3 +159,215 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
153 BUG(); 159 BUG();
154 } 160 }
155} 161}
162
163/**
164 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
165 * @vcpu: the vcpu pointer
166 * @run: the kvm_run structure pointer
167 *
168 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
169 * halt execution of world-switches and schedule other host processes until
170 * there is an incoming IRQ or FIQ to the VM.
171 */
172int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
173{
174 trace_kvm_wfi(*vcpu_pc(vcpu));
175 kvm_vcpu_block(vcpu);
176 return 1;
177}
178
179/**
180 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
181 * @vcpu: The VCPU pointer
182 *
183 * When exceptions occur while instructions are executed in Thumb IF-THEN
184 * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
185 * to do this little bit of work manually. The fields map like this:
186 *
187 * IT[7:0] -> CPSR[26:25],CPSR[15:10]
188 */
189static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
190{
191 unsigned long itbits, cond;
192 unsigned long cpsr = *vcpu_cpsr(vcpu);
193 bool is_arm = !(cpsr & PSR_T_BIT);
194
195 BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
196
197 if (!(cpsr & PSR_IT_MASK))
198 return;
199
200 cond = (cpsr & 0xe000) >> 13;
201 itbits = (cpsr & 0x1c00) >> (10 - 2);
202 itbits |= (cpsr & (0x3 << 25)) >> 25;
203
204 /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
205 if ((itbits & 0x7) == 0)
206 itbits = cond = 0;
207 else
208 itbits = (itbits << 1) & 0x1f;
209
210 cpsr &= ~PSR_IT_MASK;
211 cpsr |= cond << 13;
212 cpsr |= (itbits & 0x1c) << (10 - 2);
213 cpsr |= (itbits & 0x3) << 25;
214 *vcpu_cpsr(vcpu) = cpsr;
215}
216
217/**
218 * kvm_skip_instr - skip a trapped instruction and proceed to the next
219 * @vcpu: The vcpu pointer
220 */
221void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
222{
223 bool is_thumb;
224
225 is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
226 if (is_thumb && !is_wide_instr)
227 *vcpu_pc(vcpu) += 2;
228 else
229 *vcpu_pc(vcpu) += 4;
230 kvm_adjust_itstate(vcpu);
231}
232
233
234/******************************************************************************
235 * Inject exceptions into the guest
236 */
237
238static u32 exc_vector_base(struct kvm_vcpu *vcpu)
239{
240 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
241 u32 vbar = vcpu->arch.cp15[c12_VBAR];
242
243 if (sctlr & SCTLR_V)
244 return 0xffff0000;
245 else /* always have security exceptions */
246 return vbar;
247}
248
249/**
250 * kvm_inject_undefined - inject an undefined exception into the guest
251 * @vcpu: The VCPU to receive the undefined exception
252 *
253 * It is assumed that this code is called from the VCPU thread and that the
254 * VCPU therefore is not currently executing guest code.
255 *
256 * Modelled after TakeUndefInstrException() pseudocode.
257 */
258void kvm_inject_undefined(struct kvm_vcpu *vcpu)
259{
260 u32 new_lr_value;
261 u32 new_spsr_value;
262 u32 cpsr = *vcpu_cpsr(vcpu);
263 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
264 bool is_thumb = (cpsr & PSR_T_BIT);
265 u32 vect_offset = 4;
266 u32 return_offset = (is_thumb) ? 2 : 4;
267
268 new_spsr_value = cpsr;
269 new_lr_value = *vcpu_pc(vcpu) - return_offset;
270
271 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
272 *vcpu_cpsr(vcpu) |= PSR_I_BIT;
273 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
274
275 if (sctlr & SCTLR_TE)
276 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
277 if (sctlr & SCTLR_EE)
278 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
279
280 /* Note: These now point to UND banked copies */
281 *vcpu_spsr(vcpu) = cpsr;
282 *vcpu_reg(vcpu, 14) = new_lr_value;
283
284 /* Branch to exception vector */
285 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
286}
287
288/*
289 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
290 * pseudocode.
291 */
292static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
293{
294 u32 new_lr_value;
295 u32 new_spsr_value;
296 u32 cpsr = *vcpu_cpsr(vcpu);
297 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
298 bool is_thumb = (cpsr & PSR_T_BIT);
299 u32 vect_offset;
300 u32 return_offset = (is_thumb) ? 4 : 0;
301 bool is_lpae;
302
303 new_spsr_value = cpsr;
304 new_lr_value = *vcpu_pc(vcpu) + return_offset;
305
306 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
307 *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
308 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
309
310 if (sctlr & SCTLR_TE)
311 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
312 if (sctlr & SCTLR_EE)
313 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
314
315 /* Note: These now point to ABT banked copies */
316 *vcpu_spsr(vcpu) = cpsr;
317 *vcpu_reg(vcpu, 14) = new_lr_value;
318
319 if (is_pabt)
320 vect_offset = 12;
321 else
322 vect_offset = 16;
323
324 /* Branch to exception vector */
325 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
326
327 if (is_pabt) {
328 /* Set DFAR and DFSR */
329 vcpu->arch.cp15[c6_IFAR] = addr;
330 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
331 /* Always give debug fault for now - should give guest a clue */
332 if (is_lpae)
333 vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
334 else
335 vcpu->arch.cp15[c5_IFSR] = 2;
336 } else { /* !iabt */
337 /* Set DFAR and DFSR */
338 vcpu->arch.cp15[c6_DFAR] = addr;
339 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
340 /* Always give debug fault for now - should give guest a clue */
341 if (is_lpae)
342 vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
343 else
344 vcpu->arch.cp15[c5_DFSR] = 2;
345 }
346
347}
348
349/**
350 * kvm_inject_dabt - inject a data abort into the guest
351 * @vcpu: The VCPU to receive the undefined exception
352 * @addr: The address to report in the DFAR
353 *
354 * It is assumed that this code is called from the VCPU thread and that the
355 * VCPU therefore is not currently executing guest code.
356 */
357void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
358{
359 inject_abt(vcpu, false, addr);
360}
361
362/**
363 * kvm_inject_pabt - inject a prefetch abort into the guest
364 * @vcpu: The VCPU to receive the undefined exception
365 * @addr: The address to report in the DFAR
366 *
367 * It is assumed that this code is called from the VCPU thread and that the
368 * VCPU therefore is not currently executing guest code.
369 */
370void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
371{
372 inject_abt(vcpu, true, addr);
373}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 105d1f79909a..022305b38c27 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -64,6 +64,51 @@ TRACE_EVENT(kvm_irq_line,
64 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) 64 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
65); 65);
66 66
67/* Architecturally implementation defined CP15 register access */
68TRACE_EVENT(kvm_emulate_cp15_imp,
69 TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
70 unsigned long CRm, unsigned long Op2, bool is_write),
71 TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
72
73 TP_STRUCT__entry(
74 __field( unsigned int, Op1 )
75 __field( unsigned int, Rt1 )
76 __field( unsigned int, CRn )
77 __field( unsigned int, CRm )
78 __field( unsigned int, Op2 )
79 __field( bool, is_write )
80 ),
81
82 TP_fast_assign(
83 __entry->is_write = is_write;
84 __entry->Op1 = Op1;
85 __entry->Rt1 = Rt1;
86 __entry->CRn = CRn;
87 __entry->CRm = CRm;
88 __entry->Op2 = Op2;
89 ),
90
91 TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
92 (__entry->is_write) ? "mcr" : "mrc",
93 __entry->Op1, __entry->Rt1, __entry->CRn,
94 __entry->CRm, __entry->Op2)
95);
96
97TRACE_EVENT(kvm_wfi,
98 TP_PROTO(unsigned long vcpu_pc),
99 TP_ARGS(vcpu_pc),
100
101 TP_STRUCT__entry(
102 __field( unsigned long, vcpu_pc )
103 ),
104
105 TP_fast_assign(
106 __entry->vcpu_pc = vcpu_pc;
107 ),
108
109 TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
110);
111
67TRACE_EVENT(kvm_unmap_hva, 112TRACE_EVENT(kvm_unmap_hva,
68 TP_PROTO(unsigned long hva), 113 TP_PROTO(unsigned long hva),
69 TP_ARGS(hva), 114 TP_ARGS(hva),
@@ -112,6 +157,26 @@ TRACE_EVENT(kvm_set_spte_hva,
112 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) 157 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
113); 158);
114 159
160TRACE_EVENT(kvm_hvc,
161 TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
162 TP_ARGS(vcpu_pc, r0, imm),
163
164 TP_STRUCT__entry(
165 __field( unsigned long, vcpu_pc )
166 __field( unsigned long, r0 )
167 __field( unsigned long, imm )
168 ),
169
170 TP_fast_assign(
171 __entry->vcpu_pc = vcpu_pc;
172 __entry->r0 = r0;
173 __entry->imm = imm;
174 ),
175
176 TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
177 __entry->vcpu_pc, __entry->r0, __entry->imm)
178);
179
115#endif /* _TRACE_KVM_H */ 180#endif /* _TRACE_KVM_H */
116 181
117#undef TRACE_INCLUDE_PATH 182#undef TRACE_INCLUDE_PATH