aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoffer Dall <c.dall@virtualopensystems.com>2013-01-20 18:47:42 -0500
committerChristoffer Dall <c.dall@virtualopensystems.com>2013-01-23 13:29:12 -0500
commitf7ed45be3ba524e06a6d933f0517dc7ad2d06703 (patch)
tree9e3cc5b0441daf154bccec4e672f17522b9fe13a
parent86ce85352f0da7e1431ad8efcb04323819a620e7 (diff)
KVM: ARM: World-switch implementation
Provides complete world-switch implementation to switch to other guests running in non-secure modes. Includes Hyp exception handlers that capture necessary exception information and stores the information on the VCPU and KVM structures. The following Hyp-ABI is also documented in the code: Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): Switching to Hyp mode is done through a simple HVC #0 instruction. The exception vector code will check that the HVC comes from VMID==0 and if so will push the necessary state (SPSR, lr_usr) on the Hyp stack. - r0 contains a pointer to a HYP function - r1, r2, and r3 contain arguments to the above function. - The HYP function will be called with its arguments in r0, r1 and r2. On HYP function return, we return directly to SVC. A call to a function executing in Hyp mode is performed like the following: <svc code> ldr r0, =BSYM(my_hyp_fn) ldr r1, =my_param hvc #0 ; Call my_hyp_fn(my_param) from HYP mode <svc code> Otherwise, the world-switch is pretty straight-forward. All state that can be modified by the guest is first backed up on the Hyp stack and the VCPU values is loaded onto the hardware. State, which is not loaded, but theoretically modifiable by the guest is protected through the virtualiation features to generate a trap and cause software emulation. Upon guest returns, all state is restored from hardware onto the VCPU struct and the original state is restored from the Hyp-stack onto the hardware. SMP support using the VMPIDR calculated on the basis of the host MPIDR and overriding the low bits with KVM vcpu_id contributed by Marc Zyngier. Reuse of VMIDs has been implemented by Antonios Motakis and adapated from a separate patch into the appropriate patches introducing the functionality. Note that the VMIDs are stored per VM as required by the ARM architecture reference manual. To support VFP/NEON we trap those instructions using the HPCTR. When we trap, we switch the FPU. After a guest exit, the VFP state is returned to the host. When disabling access to floating point instructions, we also mask FPEXC_EN in order to avoid the guest receiving Undefined instruction exceptions before we have a chance to switch back the floating point state. We are reusing vfp_hard_struct, so we depend on VFPv3 being enabled in the host kernel, if not, we still trap cp10 and cp11 in order to inject an undefined instruction exception whenever the guest tries to use VFP/NEON. VFP/NEON developed by Antionios Motakis and Rusty Russell. Aborts that are permission faults, and not stage-1 page table walk, do not report the faulting address in the HPFAR. We have to resolve the IPA, and store it just like the HPFAR register on the VCPU struct. If the IPA cannot be resolved, it means another CPU is playing with the page tables, and we simply restart the guest. This quirk was fixed by Marc Zyngier. Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Antonios Motakis <a.motakis@virtualopensystems.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
-rw-r--r--arch/arm/include/asm/kvm_arm.h51
-rw-r--r--arch/arm/include/asm/kvm_host.h13
-rw-r--r--arch/arm/kernel/asm-offsets.c25
-rw-r--r--arch/arm/kvm/arm.c200
-rw-r--r--arch/arm/kvm/interrupts.S396
-rw-r--r--arch/arm/kvm/interrupts_head.S441
6 files changed, 1122 insertions, 4 deletions
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index d64b5250ad4e..c69936b1fc53 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -98,6 +98,18 @@
98#define TTBCR_T0SZ 3 98#define TTBCR_T0SZ 3
99#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) 99#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
100 100
101/* Hyp System Trap Register */
102#define HSTR_T(x) (1 << x)
103#define HSTR_TTEE (1 << 16)
104#define HSTR_TJDBX (1 << 17)
105
106/* Hyp Coprocessor Trap Register */
107#define HCPTR_TCP(x) (1 << x)
108#define HCPTR_TCP_MASK (0x3fff)
109#define HCPTR_TASE (1 << 15)
110#define HCPTR_TTA (1 << 20)
111#define HCPTR_TCPAC (1 << 31)
112
101/* Hyp Debug Configuration Register bits */ 113/* Hyp Debug Configuration Register bits */
102#define HDCR_TDRA (1 << 11) 114#define HDCR_TDRA (1 << 11)
103#define HDCR_TDOSA (1 << 10) 115#define HDCR_TDOSA (1 << 10)
@@ -144,6 +156,45 @@
144#else 156#else
145#define VTTBR_X (5 - KVM_T0SZ) 157#define VTTBR_X (5 - KVM_T0SZ)
146#endif 158#endif
159#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
160#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
161#define VTTBR_VMID_SHIFT (48LLU)
162#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
163
164/* Hyp Syndrome Register (HSR) bits */
165#define HSR_EC_SHIFT (26)
166#define HSR_EC (0x3fU << HSR_EC_SHIFT)
167#define HSR_IL (1U << 25)
168#define HSR_ISS (HSR_IL - 1)
169#define HSR_ISV_SHIFT (24)
170#define HSR_ISV (1U << HSR_ISV_SHIFT)
171#define HSR_FSC (0x3f)
172#define HSR_FSC_TYPE (0x3c)
173#define HSR_WNR (1 << 6)
174
175#define FSC_FAULT (0x04)
176#define FSC_PERM (0x0c)
177
178/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
179#define HPFAR_MASK (~0xf)
147 180
181#define HSR_EC_UNKNOWN (0x00)
182#define HSR_EC_WFI (0x01)
183#define HSR_EC_CP15_32 (0x03)
184#define HSR_EC_CP15_64 (0x04)
185#define HSR_EC_CP14_MR (0x05)
186#define HSR_EC_CP14_LS (0x06)
187#define HSR_EC_CP_0_13 (0x07)
188#define HSR_EC_CP10_ID (0x08)
189#define HSR_EC_JAZELLE (0x09)
190#define HSR_EC_BXJ (0x0A)
191#define HSR_EC_CP14_64 (0x0C)
192#define HSR_EC_SVC_HYP (0x11)
193#define HSR_EC_HVC (0x12)
194#define HSR_EC_SMC (0x13)
195#define HSR_EC_IABT (0x20)
196#define HSR_EC_IABT_HYP (0x21)
197#define HSR_EC_DABT (0x24)
198#define HSR_EC_DABT_HYP (0x25)
148 199
149#endif /* __ARM_KVM_ARM_H__ */ 200#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 3636c7ea4eb2..7a121089c733 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -21,6 +21,7 @@
21 21
22#include <asm/kvm.h> 22#include <asm/kvm.h>
23#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
24#include <asm/fpstate.h>
24 25
25#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS 26#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
26#define KVM_MEMORY_SLOTS 32 27#define KVM_MEMORY_SLOTS 32
@@ -85,6 +86,14 @@ struct kvm_vcpu_arch {
85 u32 hxfar; /* Hyp Data/Inst Fault Address Register */ 86 u32 hxfar; /* Hyp Data/Inst Fault Address Register */
86 u32 hpfar; /* Hyp IPA Fault Address Register */ 87 u32 hpfar; /* Hyp IPA Fault Address Register */
87 88
89 /* Floating point registers (VFP and Advanced SIMD/NEON) */
90 struct vfp_hard_struct vfp_guest;
91 struct vfp_hard_struct *vfp_host;
92
93 /*
94 * Anything that is not used directly from assembly code goes
95 * here.
96 */
88 /* Interrupt related fields */ 97 /* Interrupt related fields */
89 u32 irq_lines; /* IRQ and FIQ levels */ 98 u32 irq_lines; /* IRQ and FIQ levels */
90 99
@@ -93,6 +102,9 @@ struct kvm_vcpu_arch {
93 102
94 /* Cache some mmu pages needed inside spinlock regions */ 103 /* Cache some mmu pages needed inside spinlock regions */
95 struct kvm_mmu_memory_cache mmu_page_cache; 104 struct kvm_mmu_memory_cache mmu_page_cache;
105
106 /* Detect first run of a vcpu */
107 bool has_run_once;
96}; 108};
97 109
98struct kvm_vm_stat { 110struct kvm_vm_stat {
@@ -112,6 +124,7 @@ struct kvm_one_reg;
112int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 124int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
113int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 125int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
114u64 kvm_call_hyp(void *hypfn, ...); 126u64 kvm_call_hyp(void *hypfn, ...);
127void force_vm_exit(const cpumask_t *mask);
115 128
116#define KVM_ARCH_WANT_MMU_NOTIFIER 129#define KVM_ARCH_WANT_MMU_NOTIFIER
117struct kvm; 130struct kvm;
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index c985b481192c..c8b3272dfed1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -13,6 +13,9 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#ifdef CONFIG_KVM_ARM_HOST
17#include <linux/kvm_host.h>
18#endif
16#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
17#include <asm/glue-df.h> 20#include <asm/glue-df.h>
18#include <asm/glue-pf.h> 21#include <asm/glue-pf.h>
@@ -146,5 +149,27 @@ int main(void)
146 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
147 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
148 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); 151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
152#ifdef CONFIG_KVM_ARM_HOST
153 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
154 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
155 DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15));
156 DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest));
157 DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host));
158 DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs));
159 DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs));
160 DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs));
161 DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs));
162 DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs));
163 DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs));
164 DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
165 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
166 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
167 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
168 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
172 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
173#endif
149 return 0; 174 return 0;
150} 175}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2101152c3a4b..9e9fa4477884 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -40,6 +40,7 @@
40#include <asm/kvm_arm.h> 40#include <asm/kvm_arm.h>
41#include <asm/kvm_asm.h> 41#include <asm/kvm_asm.h>
42#include <asm/kvm_mmu.h> 42#include <asm/kvm_mmu.h>
43#include <asm/kvm_emulate.h>
43 44
44#ifdef REQUIRES_VIRT 45#ifdef REQUIRES_VIRT
45__asm__(".arch_extension virt"); 46__asm__(".arch_extension virt");
@@ -49,6 +50,10 @@ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
49static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 50static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
50static unsigned long hyp_default_vectors; 51static unsigned long hyp_default_vectors;
51 52
53/* The VMID used in the VTTBR */
54static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
55static u8 kvm_next_vmid;
56static DEFINE_SPINLOCK(kvm_vmid_lock);
52 57
53int kvm_arch_hardware_enable(void *garbage) 58int kvm_arch_hardware_enable(void *garbage)
54{ 59{
@@ -276,6 +281,8 @@ int __attribute_const__ kvm_target_cpu(void)
276 281
277int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 282int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
278{ 283{
284 /* Force users to call KVM_ARM_VCPU_INIT */
285 vcpu->arch.target = -1;
279 return 0; 286 return 0;
280} 287}
281 288
@@ -286,6 +293,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
286void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 293void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
287{ 294{
288 vcpu->cpu = cpu; 295 vcpu->cpu = cpu;
296 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state);
289} 297}
290 298
291void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 299void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -316,9 +324,199 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
316 return 0; 324 return 0;
317} 325}
318 326
327/* Just ensure a guest exit from a particular CPU */
328static void exit_vm_noop(void *info)
329{
330}
331
332void force_vm_exit(const cpumask_t *mask)
333{
334 smp_call_function_many(mask, exit_vm_noop, NULL, true);
335}
336
337/**
338 * need_new_vmid_gen - check that the VMID is still valid
339 * @kvm: The VM's VMID to checkt
340 *
341 * return true if there is a new generation of VMIDs being used
342 *
343 * The hardware supports only 256 values with the value zero reserved for the
344 * host, so we check if an assigned value belongs to a previous generation,
345 * which which requires us to assign a new value. If we're the first to use a
346 * VMID for the new generation, we must flush necessary caches and TLBs on all
347 * CPUs.
348 */
349static bool need_new_vmid_gen(struct kvm *kvm)
350{
351 return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
352}
353
354/**
355 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
356 * @kvm The guest that we are about to run
357 *
358 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
359 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
360 * caches and TLBs.
361 */
362static void update_vttbr(struct kvm *kvm)
363{
364 phys_addr_t pgd_phys;
365 u64 vmid;
366
367 if (!need_new_vmid_gen(kvm))
368 return;
369
370 spin_lock(&kvm_vmid_lock);
371
372 /*
373 * We need to re-check the vmid_gen here to ensure that if another vcpu
374 * already allocated a valid vmid for this vm, then this vcpu should
375 * use the same vmid.
376 */
377 if (!need_new_vmid_gen(kvm)) {
378 spin_unlock(&kvm_vmid_lock);
379 return;
380 }
381
382 /* First user of a new VMID generation? */
383 if (unlikely(kvm_next_vmid == 0)) {
384 atomic64_inc(&kvm_vmid_gen);
385 kvm_next_vmid = 1;
386
387 /*
388 * On SMP we know no other CPUs can use this CPU's or each
389 * other's VMID after force_vm_exit returns since the
390 * kvm_vmid_lock blocks them from reentry to the guest.
391 */
392 force_vm_exit(cpu_all_mask);
393 /*
394 * Now broadcast TLB + ICACHE invalidation over the inner
395 * shareable domain to make sure all data structures are
396 * clean.
397 */
398 kvm_call_hyp(__kvm_flush_vm_context);
399 }
400
401 kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
402 kvm->arch.vmid = kvm_next_vmid;
403 kvm_next_vmid++;
404
405 /* update vttbr to be used with the new vmid */
406 pgd_phys = virt_to_phys(kvm->arch.pgd);
407 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
408 kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
409 kvm->arch.vttbr |= vmid;
410
411 spin_unlock(&kvm_vmid_lock);
412}
413
414/*
415 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
416 * proper exit to QEMU.
417 */
418static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
419 int exception_index)
420{
421 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
422 return 0;
423}
424
425static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
426{
427 if (likely(vcpu->arch.has_run_once))
428 return 0;
429
430 vcpu->arch.has_run_once = true;
431 return 0;
432}
433
434/**
435 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
436 * @vcpu: The VCPU pointer
437 * @run: The kvm_run structure pointer used for userspace state exchange
438 *
439 * This function is called through the VCPU_RUN ioctl called from user space. It
440 * will execute VM code in a loop until the time slice for the process is used
441 * or some emulation is needed from user space in which case the function will
442 * return with return value 0 and with the kvm_run structure filled in with the
443 * required data for the requested emulation.
444 */
319int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 445int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
320{ 446{
321 return -EINVAL; 447 int ret;
448 sigset_t sigsaved;
449
450 /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
451 if (unlikely(vcpu->arch.target < 0))
452 return -ENOEXEC;
453
454 ret = kvm_vcpu_first_run_init(vcpu);
455 if (ret)
456 return ret;
457
458 if (vcpu->sigset_active)
459 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
460
461 ret = 1;
462 run->exit_reason = KVM_EXIT_UNKNOWN;
463 while (ret > 0) {
464 /*
465 * Check conditions before entering the guest
466 */
467 cond_resched();
468
469 update_vttbr(vcpu->kvm);
470
471 local_irq_disable();
472
473 /*
474 * Re-check atomic conditions
475 */
476 if (signal_pending(current)) {
477 ret = -EINTR;
478 run->exit_reason = KVM_EXIT_INTR;
479 }
480
481 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
482 local_irq_enable();
483 continue;
484 }
485
486 /**************************************************************
487 * Enter the guest
488 */
489 trace_kvm_entry(*vcpu_pc(vcpu));
490 kvm_guest_enter();
491 vcpu->mode = IN_GUEST_MODE;
492
493 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
494
495 vcpu->mode = OUTSIDE_GUEST_MODE;
496 kvm_guest_exit();
497 trace_kvm_exit(*vcpu_pc(vcpu));
498 /*
499 * We may have taken a host interrupt in HYP mode (ie
500 * while executing the guest). This interrupt is still
501 * pending, as we haven't serviced it yet!
502 *
503 * We're now back in SVC mode, with interrupts
504 * disabled. Enabling the interrupts now will have
505 * the effect of taking the interrupt again, in SVC
506 * mode this time.
507 */
508 local_irq_enable();
509
510 /*
511 * Back from guest
512 *************************************************************/
513
514 ret = handle_exit(vcpu, run, ret);
515 }
516
517 if (vcpu->sigset_active)
518 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
519 return ret;
322} 520}
323 521
324static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) 522static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index f701aff31e44..c5400d2e97ca 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -20,9 +20,12 @@
20#include <linux/const.h> 20#include <linux/const.h>
21#include <asm/unified.h> 21#include <asm/unified.h>
22#include <asm/page.h> 22#include <asm/page.h>
23#include <asm/ptrace.h>
23#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
24#include <asm/kvm_asm.h> 25#include <asm/kvm_asm.h>
25#include <asm/kvm_arm.h> 26#include <asm/kvm_arm.h>
27#include <asm/vfpmacros.h>
28#include "interrupts_head.S"
26 29
27 .text 30 .text
28 31
@@ -31,23 +34,164 @@ __kvm_hyp_code_start:
31 34
32/******************************************************************** 35/********************************************************************
33 * Flush per-VMID TLBs 36 * Flush per-VMID TLBs
37 *
38 * void __kvm_tlb_flush_vmid(struct kvm *kvm);
39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations). If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version.
34 */ 45 */
35ENTRY(__kvm_tlb_flush_vmid) 46ENTRY(__kvm_tlb_flush_vmid)
47 push {r2, r3}
48
49 add r0, r0, #KVM_VTTBR
50 ldrd r2, r3, [r0]
51 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
52 isb
53 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
54 dsb
55 isb
56 mov r2, #0
57 mov r3, #0
58 mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
59 isb @ Not necessary if followed by eret
60
61 pop {r2, r3}
36 bx lr 62 bx lr
37ENDPROC(__kvm_tlb_flush_vmid) 63ENDPROC(__kvm_tlb_flush_vmid)
38 64
39/******************************************************************** 65/********************************************************************
40 * Flush TLBs and instruction caches of current CPU for all VMIDs 66 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
67 * domain, for all VMIDs
68 *
69 * void __kvm_flush_vm_context(void);
41 */ 70 */
42ENTRY(__kvm_flush_vm_context) 71ENTRY(__kvm_flush_vm_context)
72 mov r0, #0 @ rn parameter for c15 flushes is SBZ
73
74 /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
75 mcr p15, 4, r0, c8, c3, 4
76 /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
77 mcr p15, 0, r0, c7, c1, 0
78 dsb
79 isb @ Not necessary if followed by eret
80
43 bx lr 81 bx lr
44ENDPROC(__kvm_flush_vm_context) 82ENDPROC(__kvm_flush_vm_context)
45 83
84
46/******************************************************************** 85/********************************************************************
47 * Hypervisor world-switch code 86 * Hypervisor world-switch code
87 *
88 *
89 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
48 */ 90 */
49ENTRY(__kvm_vcpu_run) 91ENTRY(__kvm_vcpu_run)
50 bx lr 92 @ Save the vcpu pointer
93 mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
94
95 save_host_regs
96
97 @ Store hardware CP15 state and load guest state
98 read_cp15_state store_to_vcpu = 0
99 write_cp15_state read_from_vcpu = 1
100
101 @ If the host kernel has not been configured with VFPv3 support,
102 @ then it is safer if we deny guests from using it as well.
103#ifdef CONFIG_VFPv3
104 @ Set FPEXC_EN so the guest doesn't trap floating point instructions
105 VFPFMRX r2, FPEXC @ VMRS
106 push {r2}
107 orr r2, r2, #FPEXC_EN
108 VFPFMXR FPEXC, r2 @ VMSR
109#endif
110
111 @ Configure Hyp-role
112 configure_hyp_role vmentry
113
114 @ Trap coprocessor CRx accesses
115 set_hstr vmentry
116 set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
117 set_hdcr vmentry
118
119 @ Write configured ID register into MIDR alias
120 ldr r1, [vcpu, #VCPU_MIDR]
121 mcr p15, 4, r1, c0, c0, 0
122
123 @ Write guest view of MPIDR into VMPIDR
124 ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
125 mcr p15, 4, r1, c0, c0, 5
126
127 @ Set up guest memory translation
128 ldr r1, [vcpu, #VCPU_KVM]
129 add r1, r1, #KVM_VTTBR
130 ldrd r2, r3, [r1]
131 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
132
133 @ We're all done, just restore the GPRs and go to the guest
134 restore_guest_regs
135 clrex @ Clear exclusive monitor
136 eret
137
138__kvm_vcpu_return:
139 /*
140 * return convention:
141 * guest r0, r1, r2 saved on the stack
142 * r0: vcpu pointer
143 * r1: exception code
144 */
145 save_guest_regs
146
147 @ Set VMID == 0
148 mov r2, #0
149 mov r3, #0
150 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
151
152 @ Don't trap coprocessor accesses for host kernel
153 set_hstr vmexit
154 set_hdcr vmexit
155 set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
156
157#ifdef CONFIG_VFPv3
158 @ Save floating point registers we if let guest use them.
159 tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
160 bne after_vfp_restore
161
162 @ Switch VFP/NEON hardware state to the host's
163 add r7, vcpu, #VCPU_VFP_GUEST
164 store_vfp_state r7
165 add r7, vcpu, #VCPU_VFP_HOST
166 ldr r7, [r7]
167 restore_vfp_state r7
168
169after_vfp_restore:
170 @ Restore FPEXC_EN which we clobbered on entry
171 pop {r2}
172 VFPFMXR FPEXC, r2
173#endif
174
175 @ Reset Hyp-role
176 configure_hyp_role vmexit
177
178 @ Let host read hardware MIDR
179 mrc p15, 0, r2, c0, c0, 0
180 mcr p15, 4, r2, c0, c0, 0
181
182 @ Back to hardware MPIDR
183 mrc p15, 0, r2, c0, c0, 5
184 mcr p15, 4, r2, c0, c0, 5
185
186 @ Store guest CP15 state and restore host state
187 read_cp15_state store_to_vcpu = 1
188 write_cp15_state read_from_vcpu = 0
189
190 restore_host_regs
191 clrex @ Clear exclusive monitor
192 mov r0, r1 @ Return the return code
193 mov r1, #0 @ Clear upper bits in return value
194 bx lr @ return to IOCTL
51 195
52/******************************************************************** 196/********************************************************************
53 * Call function in Hyp mode 197 * Call function in Hyp mode
@@ -77,12 +221,258 @@ ENTRY(kvm_call_hyp)
77 221
78/******************************************************************** 222/********************************************************************
79 * Hypervisor exception vector and handlers 223 * Hypervisor exception vector and handlers
224 *
225 *
226 * The KVM/ARM Hypervisor ABI is defined as follows:
227 *
228 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
229 * instruction is issued since all traps are disabled when running the host
230 * kernel as per the Hyp-mode initialization at boot time.
231 *
232 * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc
233 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
234 * host kernel) and they cause a trap to the vector page + offset 0xc when HVC
235 * instructions are called from within Hyp-mode.
236 *
237 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
238 * Switching to Hyp mode is done through a simple HVC #0 instruction. The
239 * exception vector code will check that the HVC comes from VMID==0 and if
240 * so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
241 * - r0 contains a pointer to a HYP function
242 * - r1, r2, and r3 contain arguments to the above function.
243 * - The HYP function will be called with its arguments in r0, r1 and r2.
244 * On HYP function return, we return directly to SVC.
245 *
246 * Note that the above is used to execute code in Hyp-mode from a host-kernel
247 * point of view, and is a different concept from performing a world-switch and
248 * executing guest code SVC mode (with a VMID != 0).
80 */ 249 */
81 250
251/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
252.macro bad_exception exception_code, panic_str
253 push {r0-r2}
254 mrrc p15, 6, r0, r1, c2 @ Read VTTBR
255 lsr r1, r1, #16
256 ands r1, r1, #0xff
257 beq 99f
258
259 load_vcpu @ Load VCPU pointer
260 .if \exception_code == ARM_EXCEPTION_DATA_ABORT
261 mrc p15, 4, r2, c5, c2, 0 @ HSR
262 mrc p15, 4, r1, c6, c0, 0 @ HDFAR
263 str r2, [vcpu, #VCPU_HSR]
264 str r1, [vcpu, #VCPU_HxFAR]
265 .endif
266 .if \exception_code == ARM_EXCEPTION_PREF_ABORT
267 mrc p15, 4, r2, c5, c2, 0 @ HSR
268 mrc p15, 4, r1, c6, c0, 2 @ HIFAR
269 str r2, [vcpu, #VCPU_HSR]
270 str r1, [vcpu, #VCPU_HxFAR]
271 .endif
272 mov r1, #\exception_code
273 b __kvm_vcpu_return
274
275 @ We were in the host already. Let's craft a panic-ing return to SVC.
27699: mrs r2, cpsr
277 bic r2, r2, #MODE_MASK
278 orr r2, r2, #SVC_MODE
279THUMB( orr r2, r2, #PSR_T_BIT )
280 msr spsr_cxsf, r2
281 mrs r1, ELR_hyp
282 ldr r2, =BSYM(panic)
283 msr ELR_hyp, r2
284 ldr r0, =\panic_str
285 eret
286.endm
287
288 .text
289
82 .align 5 290 .align 5
83__kvm_hyp_vector: 291__kvm_hyp_vector:
84 .globl __kvm_hyp_vector 292 .globl __kvm_hyp_vector
85 nop 293
294 @ Hyp-mode exception vector
295 W(b) hyp_reset
296 W(b) hyp_undef
297 W(b) hyp_svc
298 W(b) hyp_pabt
299 W(b) hyp_dabt
300 W(b) hyp_hvc
301 W(b) hyp_irq
302 W(b) hyp_fiq
303
304 .align
305hyp_reset:
306 b hyp_reset
307
308 .align
309hyp_undef:
310 bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
311
312 .align
313hyp_svc:
314 bad_exception ARM_EXCEPTION_HVC, svc_die_str
315
316 .align
317hyp_pabt:
318 bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
319
320 .align
321hyp_dabt:
322 bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
323
324 .align
325hyp_hvc:
326 /*
327 * Getting here is either becuase of a trap from a guest or from calling
328 * HVC from the host kernel, which means "switch to Hyp mode".
329 */
330 push {r0, r1, r2}
331
332 @ Check syndrome register
333 mrc p15, 4, r1, c5, c2, 0 @ HSR
334 lsr r0, r1, #HSR_EC_SHIFT
335#ifdef CONFIG_VFPv3
336 cmp r0, #HSR_EC_CP_0_13
337 beq switch_to_guest_vfp
338#endif
339 cmp r0, #HSR_EC_HVC
340 bne guest_trap @ Not HVC instr.
341
342 /*
343 * Let's check if the HVC came from VMID 0 and allow simple
344 * switch to Hyp mode
345 */
346 mrrc p15, 6, r0, r2, c2
347 lsr r2, r2, #16
348 and r2, r2, #0xff
349 cmp r2, #0
350 bne guest_trap @ Guest called HVC
351
352host_switch_to_hyp:
353 pop {r0, r1, r2}
354
355 push {lr}
356 mrs lr, SPSR
357 push {lr}
358
359 mov lr, r0
360 mov r0, r1
361 mov r1, r2
362 mov r2, r3
363
364THUMB( orr lr, #1)
365 blx lr @ Call the HYP function
366
367 pop {lr}
368 msr SPSR_csxf, lr
369 pop {lr}
370 eret
371
372guest_trap:
373 load_vcpu @ Load VCPU pointer to r0
374 str r1, [vcpu, #VCPU_HSR]
375
376 @ Check if we need the fault information
377 lsr r1, r1, #HSR_EC_SHIFT
378 cmp r1, #HSR_EC_IABT
379 mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
380 beq 2f
381 cmp r1, #HSR_EC_DABT
382 bne 1f
383 mrc p15, 4, r2, c6, c0, 0 @ HDFAR
384
3852: str r2, [vcpu, #VCPU_HxFAR]
386
387 /*
388 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
389 *
390 * Abort on the stage 2 translation for a memory access from a
391 * Non-secure PL1 or PL0 mode:
392 *
393 * For any Access flag fault or Translation fault, and also for any
394 * Permission fault on the stage 2 translation of a memory access
395 * made as part of a translation table walk for a stage 1 translation,
396 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
397 * is UNKNOWN.
398 */
399
400 /* Check for permission fault, and S1PTW */
401 mrc p15, 4, r1, c5, c2, 0 @ HSR
402 and r0, r1, #HSR_FSC_TYPE
403 cmp r0, #FSC_PERM
404 tsteq r1, #(1 << 7) @ S1PTW
405 mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
406 bne 3f
407
408 /* Resolve IPA using the xFAR */
409 mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
410 isb
411 mrrc p15, 0, r0, r1, c7 @ PAR
412 tst r0, #1
413 bne 4f @ Failed translation
414 ubfx r2, r0, #12, #20
415 lsl r2, r2, #4
416 orr r2, r2, r1, lsl #24
417
4183: load_vcpu @ Load VCPU pointer to r0
419 str r2, [r0, #VCPU_HPFAR]
420
4211: mov r1, #ARM_EXCEPTION_HVC
422 b __kvm_vcpu_return
423
4244: pop {r0, r1, r2} @ Failed translation, return to guest
425 eret
426
427/*
428 * If VFPv3 support is not available, then we will not switch the VFP
429 * registers; however cp10 and cp11 accesses will still trap and fallback
430 * to the regular coprocessor emulation code, which currently will
431 * inject an undefined exception to the guest.
432 */
433#ifdef CONFIG_VFPv3
434switch_to_guest_vfp:
435 load_vcpu @ Load VCPU pointer to r0
436 push {r3-r7}
437
438 @ NEON/VFP used. Turn on VFP access.
439 set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
440
441 @ Switch VFP/NEON hardware state to the guest's
442 add r7, r0, #VCPU_VFP_HOST
443 ldr r7, [r7]
444 store_vfp_state r7
445 add r7, r0, #VCPU_VFP_GUEST
446 restore_vfp_state r7
447
448 pop {r3-r7}
449 pop {r0-r2}
450 eret
451#endif
452
453 .align
454hyp_irq:
455 push {r0, r1, r2}
456 mov r1, #ARM_EXCEPTION_IRQ
457 load_vcpu @ Load VCPU pointer to r0
458 b __kvm_vcpu_return
459
460 .align
461hyp_fiq:
462 b hyp_fiq
463
464 .ltorg
86 465
87__kvm_hyp_code_end: 466__kvm_hyp_code_end:
88 .globl __kvm_hyp_code_end 467 .globl __kvm_hyp_code_end
468
469 .section ".rodata"
470
471und_die_str:
472 .ascii "unexpected undefined exception in Hyp mode at: %#08x"
473pabt_die_str:
474 .ascii "unexpected prefetch abort in Hyp mode at: %#08x"
475dabt_die_str:
476 .ascii "unexpected data abort in Hyp mode at: %#08x"
477svc_die_str:
478 .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x"
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
new file mode 100644
index 000000000000..6a95d341e9c5
--- /dev/null
+++ b/arch/arm/kvm/interrupts_head.S
@@ -0,0 +1,441 @@
1#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
2#define VCPU_USR_SP (VCPU_USR_REG(13))
3#define VCPU_USR_LR (VCPU_USR_REG(14))
4#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
5
6/*
7 * Many of these macros need to access the VCPU structure, which is always
8 * held in r0. These macros should never clobber r1, as it is used to hold the
9 * exception code on the return path (except of course the macro that switches
10 * all the registers before the final jump to the VM).
11 */
12vcpu .req r0 @ vcpu pointer always in r0
13
14/* Clobbers {r2-r6} */
15.macro store_vfp_state vfp_base
16 @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
17 VFPFMRX r2, FPEXC
18 @ Make sure VFP is enabled so we can touch the registers.
19 orr r6, r2, #FPEXC_EN
20 VFPFMXR FPEXC, r6
21
22 VFPFMRX r3, FPSCR
23 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
24 beq 1f
25 @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
26 @ we only need to save them if FPEXC_EX is set.
27 VFPFMRX r4, FPINST
28 tst r2, #FPEXC_FP2V
29 VFPFMRX r5, FPINST2, ne @ vmrsne
30 bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
31 VFPFMXR FPEXC, r6
321:
33 VFPFSTMIA \vfp_base, r6 @ Save VFP registers
34 stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
35.endm
36
37/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
38.macro restore_vfp_state vfp_base
39 VFPFLDMIA \vfp_base, r6 @ Load VFP registers
40 ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
41
42 VFPFMXR FPSCR, r3
43 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
44 beq 1f
45 VFPFMXR FPINST, r4
46 tst r2, #FPEXC_FP2V
47 VFPFMXR FPINST2, r5, ne
481:
49 VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
50.endm
51
52/* These are simply for the macros to work - value don't have meaning */
53.equ usr, 0
54.equ svc, 1
55.equ abt, 2
56.equ und, 3
57.equ irq, 4
58.equ fiq, 5
59
60.macro push_host_regs_mode mode
61 mrs r2, SP_\mode
62 mrs r3, LR_\mode
63 mrs r4, SPSR_\mode
64 push {r2, r3, r4}
65.endm
66
67/*
68 * Store all host persistent registers on the stack.
69 * Clobbers all registers, in all modes, except r0 and r1.
70 */
71.macro save_host_regs
72 /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
73 mrs r2, ELR_hyp
74 push {r2}
75
76 /* usr regs */
77 push {r4-r12} @ r0-r3 are always clobbered
78 mrs r2, SP_usr
79 mov r3, lr
80 push {r2, r3}
81
82 push_host_regs_mode svc
83 push_host_regs_mode abt
84 push_host_regs_mode und
85 push_host_regs_mode irq
86
87 /* fiq regs */
88 mrs r2, r8_fiq
89 mrs r3, r9_fiq
90 mrs r4, r10_fiq
91 mrs r5, r11_fiq
92 mrs r6, r12_fiq
93 mrs r7, SP_fiq
94 mrs r8, LR_fiq
95 mrs r9, SPSR_fiq
96 push {r2-r9}
97.endm
98
99.macro pop_host_regs_mode mode
100 pop {r2, r3, r4}
101 msr SP_\mode, r2
102 msr LR_\mode, r3
103 msr SPSR_\mode, r4
104.endm
105
106/*
107 * Restore all host registers from the stack.
108 * Clobbers all registers, in all modes, except r0 and r1.
109 */
110.macro restore_host_regs
111 pop {r2-r9}
112 msr r8_fiq, r2
113 msr r9_fiq, r3
114 msr r10_fiq, r4
115 msr r11_fiq, r5
116 msr r12_fiq, r6
117 msr SP_fiq, r7
118 msr LR_fiq, r8
119 msr SPSR_fiq, r9
120
121 pop_host_regs_mode irq
122 pop_host_regs_mode und
123 pop_host_regs_mode abt
124 pop_host_regs_mode svc
125
126 pop {r2, r3}
127 msr SP_usr, r2
128 mov lr, r3
129 pop {r4-r12}
130
131 pop {r2}
132 msr ELR_hyp, r2
133.endm
134
135/*
136 * Restore SP, LR and SPSR for a given mode. offset is the offset of
137 * this mode's registers from the VCPU base.
138 *
139 * Assumes vcpu pointer in vcpu reg
140 *
141 * Clobbers r1, r2, r3, r4.
142 */
143.macro restore_guest_regs_mode mode, offset
144 add r1, vcpu, \offset
145 ldm r1, {r2, r3, r4}
146 msr SP_\mode, r2
147 msr LR_\mode, r3
148 msr SPSR_\mode, r4
149.endm
150
151/*
152 * Restore all guest registers from the vcpu struct.
153 *
154 * Assumes vcpu pointer in vcpu reg
155 *
156 * Clobbers *all* registers.
157 */
158.macro restore_guest_regs
159 restore_guest_regs_mode svc, #VCPU_SVC_REGS
160 restore_guest_regs_mode abt, #VCPU_ABT_REGS
161 restore_guest_regs_mode und, #VCPU_UND_REGS
162 restore_guest_regs_mode irq, #VCPU_IRQ_REGS
163
164 add r1, vcpu, #VCPU_FIQ_REGS
165 ldm r1, {r2-r9}
166 msr r8_fiq, r2
167 msr r9_fiq, r3
168 msr r10_fiq, r4
169 msr r11_fiq, r5
170 msr r12_fiq, r6
171 msr SP_fiq, r7
172 msr LR_fiq, r8
173 msr SPSR_fiq, r9
174
175 @ Load return state
176 ldr r2, [vcpu, #VCPU_PC]
177 ldr r3, [vcpu, #VCPU_CPSR]
178 msr ELR_hyp, r2
179 msr SPSR_cxsf, r3
180
181 @ Load user registers
182 ldr r2, [vcpu, #VCPU_USR_SP]
183 ldr r3, [vcpu, #VCPU_USR_LR]
184 msr SP_usr, r2
185 mov lr, r3
186 add vcpu, vcpu, #(VCPU_USR_REGS)
187 ldm vcpu, {r0-r12}
188.endm
189
190/*
191 * Save SP, LR and SPSR for a given mode. offset is the offset of
192 * this mode's registers from the VCPU base.
193 *
194 * Assumes vcpu pointer in vcpu reg
195 *
196 * Clobbers r2, r3, r4, r5.
197 */
198.macro save_guest_regs_mode mode, offset
199 add r2, vcpu, \offset
200 mrs r3, SP_\mode
201 mrs r4, LR_\mode
202 mrs r5, SPSR_\mode
203 stm r2, {r3, r4, r5}
204.endm
205
206/*
207 * Save all guest registers to the vcpu struct
208 * Expects guest's r0, r1, r2 on the stack.
209 *
210 * Assumes vcpu pointer in vcpu reg
211 *
212 * Clobbers r2, r3, r4, r5.
213 */
214.macro save_guest_regs
215 @ Store usr registers
216 add r2, vcpu, #VCPU_USR_REG(3)
217 stm r2, {r3-r12}
218 add r2, vcpu, #VCPU_USR_REG(0)
219 pop {r3, r4, r5} @ r0, r1, r2
220 stm r2, {r3, r4, r5}
221 mrs r2, SP_usr
222 mov r3, lr
223 str r2, [vcpu, #VCPU_USR_SP]
224 str r3, [vcpu, #VCPU_USR_LR]
225
226 @ Store return state
227 mrs r2, ELR_hyp
228 mrs r3, spsr
229 str r2, [vcpu, #VCPU_PC]
230 str r3, [vcpu, #VCPU_CPSR]
231
232 @ Store other guest registers
233 save_guest_regs_mode svc, #VCPU_SVC_REGS
234 save_guest_regs_mode abt, #VCPU_ABT_REGS
235 save_guest_regs_mode und, #VCPU_UND_REGS
236 save_guest_regs_mode irq, #VCPU_IRQ_REGS
237.endm
238
239/* Reads cp15 registers from hardware and stores them in memory
240 * @store_to_vcpu: If 0, registers are written in-order to the stack,
241 * otherwise to the VCPU struct pointed to by vcpup
242 *
243 * Assumes vcpu pointer in vcpu reg
244 *
245 * Clobbers r2 - r12
246 */
247.macro read_cp15_state store_to_vcpu
248 mrc p15, 0, r2, c1, c0, 0 @ SCTLR
249 mrc p15, 0, r3, c1, c0, 2 @ CPACR
250 mrc p15, 0, r4, c2, c0, 2 @ TTBCR
251 mrc p15, 0, r5, c3, c0, 0 @ DACR
252 mrrc p15, 0, r6, r7, c2 @ TTBR 0
253 mrrc p15, 1, r8, r9, c2 @ TTBR 1
254 mrc p15, 0, r10, c10, c2, 0 @ PRRR
255 mrc p15, 0, r11, c10, c2, 1 @ NMRR
256 mrc p15, 2, r12, c0, c0, 0 @ CSSELR
257
258 .if \store_to_vcpu == 0
259 push {r2-r12} @ Push CP15 registers
260 .else
261 str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
262 str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
263 str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
264 str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
265 add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
266 strd r6, r7, [r2]
267 add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
268 strd r8, r9, [r2]
269 str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
270 str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
271 str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
272 .endif
273
274 mrc p15, 0, r2, c13, c0, 1 @ CID
275 mrc p15, 0, r3, c13, c0, 2 @ TID_URW
276 mrc p15, 0, r4, c13, c0, 3 @ TID_URO
277 mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
278 mrc p15, 0, r6, c5, c0, 0 @ DFSR
279 mrc p15, 0, r7, c5, c0, 1 @ IFSR
280 mrc p15, 0, r8, c5, c1, 0 @ ADFSR
281 mrc p15, 0, r9, c5, c1, 1 @ AIFSR
282 mrc p15, 0, r10, c6, c0, 0 @ DFAR
283 mrc p15, 0, r11, c6, c0, 2 @ IFAR
284 mrc p15, 0, r12, c12, c0, 0 @ VBAR
285
286 .if \store_to_vcpu == 0
287 push {r2-r12} @ Push CP15 registers
288 .else
289 str r2, [vcpu, #CP15_OFFSET(c13_CID)]
290 str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
291 str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
292 str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
293 str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
294 str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
295 str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
296 str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
297 str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
298 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
299 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
300 .endif
301.endm
302
303/*
304 * Reads cp15 registers from memory and writes them to hardware
305 * @read_from_vcpu: If 0, registers are read in-order from the stack,
306 * otherwise from the VCPU struct pointed to by vcpup
307 *
308 * Assumes vcpu pointer in vcpu reg
309 */
310.macro write_cp15_state read_from_vcpu
311 .if \read_from_vcpu == 0
312 pop {r2-r12}
313 .else
314 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
315 ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
316 ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
317 ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
318 ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
319 ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
320 ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
321 ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
322 ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
323 ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
324 ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
325 .endif
326
327 mcr p15, 0, r2, c13, c0, 1 @ CID
328 mcr p15, 0, r3, c13, c0, 2 @ TID_URW
329 mcr p15, 0, r4, c13, c0, 3 @ TID_URO
330 mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
331 mcr p15, 0, r6, c5, c0, 0 @ DFSR
332 mcr p15, 0, r7, c5, c0, 1 @ IFSR
333 mcr p15, 0, r8, c5, c1, 0 @ ADFSR
334 mcr p15, 0, r9, c5, c1, 1 @ AIFSR
335 mcr p15, 0, r10, c6, c0, 0 @ DFAR
336 mcr p15, 0, r11, c6, c0, 2 @ IFAR
337 mcr p15, 0, r12, c12, c0, 0 @ VBAR
338
339 .if \read_from_vcpu == 0
340 pop {r2-r12}
341 .else
342 ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
343 ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
344 ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
345 ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
346 add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
347 ldrd r6, r7, [r12]
348 add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
349 ldrd r8, r9, [r12]
350 ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
351 ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
352 ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
353 .endif
354
355 mcr p15, 0, r2, c1, c0, 0 @ SCTLR
356 mcr p15, 0, r3, c1, c0, 2 @ CPACR
357 mcr p15, 0, r4, c2, c0, 2 @ TTBCR
358 mcr p15, 0, r5, c3, c0, 0 @ DACR
359 mcrr p15, 0, r6, r7, c2 @ TTBR 0
360 mcrr p15, 1, r8, r9, c2 @ TTBR 1
361 mcr p15, 0, r10, c10, c2, 0 @ PRRR
362 mcr p15, 0, r11, c10, c2, 1 @ NMRR
363 mcr p15, 2, r12, c0, c0, 0 @ CSSELR
364.endm
365
366/*
367 * Save the VGIC CPU state into memory
368 *
369 * Assumes vcpu pointer in vcpu reg
370 */
371.macro save_vgic_state
372.endm
373
374/*
375 * Restore the VGIC CPU state from memory
376 *
377 * Assumes vcpu pointer in vcpu reg
378 */
379.macro restore_vgic_state
380.endm
381
382.equ vmentry, 0
383.equ vmexit, 1
384
385/* Configures the HSTR (Hyp System Trap Register) on entry/return
386 * (hardware reset value is 0) */
387.macro set_hstr operation
388 mrc p15, 4, r2, c1, c1, 3
389 ldr r3, =HSTR_T(15)
390 .if \operation == vmentry
391 orr r2, r2, r3 @ Trap CR{15}
392 .else
393 bic r2, r2, r3 @ Don't trap any CRx accesses
394 .endif
395 mcr p15, 4, r2, c1, c1, 3
396.endm
397
398/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
399 * (hardware reset value is 0). Keep previous value in r2. */
400.macro set_hcptr operation, mask
401 mrc p15, 4, r2, c1, c1, 2
402 ldr r3, =\mask
403 .if \operation == vmentry
404 orr r3, r2, r3 @ Trap coproc-accesses defined in mask
405 .else
406 bic r3, r2, r3 @ Don't trap defined coproc-accesses
407 .endif
408 mcr p15, 4, r3, c1, c1, 2
409.endm
410
411/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
412 * (hardware reset value is 0) */
413.macro set_hdcr operation
414 mrc p15, 4, r2, c1, c1, 1
415 ldr r3, =(HDCR_TPM|HDCR_TPMCR)
416 .if \operation == vmentry
417 orr r2, r2, r3 @ Trap some perfmon accesses
418 .else
419 bic r2, r2, r3 @ Don't trap any perfmon accesses
420 .endif
421 mcr p15, 4, r2, c1, c1, 1
422.endm
423
424/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
425.macro configure_hyp_role operation
426 mrc p15, 4, r2, c1, c1, 0 @ HCR
427 bic r2, r2, #HCR_VIRT_EXCP_MASK
428 ldr r3, =HCR_GUEST_MASK
429 .if \operation == vmentry
430 orr r2, r2, r3
431 ldr r3, [vcpu, #VCPU_IRQ_LINES]
432 orr r2, r2, r3
433 .else
434 bic r2, r2, r3
435 .endif
436 mcr p15, 4, r2, c1, c1, 0
437.endm
438
439.macro load_vcpu
440 mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
441.endm