aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-01-05 13:45:17 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2016-02-29 13:34:14 -0500
commitb98e2e728eed3091edbce64cfcc447a482b7726c (patch)
treec3ca5d0ff2d4563d9803cc59f3b3e6e693b99e08 /arch/arm/kvm
parentb57cd6f6407d420d522ab71b9c0dd11993e49ba1 (diff)
ARM: KVM: Remove the old world switch
As we now have a full reimplementation of the world switch, it is time to kiss the old stuff goodbye. I'm not sure we'll miss it. Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/interrupts.S469
-rw-r--r--arch/arm/kvm/interrupts_head.S660
2 files changed, 1 insertions, 1128 deletions
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 01eb169f38f6..b1bd316f14c0 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -17,198 +17,8 @@
17 */ 17 */
18 18
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <linux/const.h>
21#include <asm/unified.h>
22#include <asm/page.h>
23#include <asm/ptrace.h>
24#include <asm/asm-offsets.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/vfpmacros.h>
28#include "interrupts_head.S"
29 20
30 .text 21 .text
31 .pushsection .hyp.text, "ax"
32
33/********************************************************************
34 * Flush per-VMID TLBs
35 *
36 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
37 *
38 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
39 * inside the inner-shareable domain (which is the case for all v7
40 * implementations). If we come across a non-IS SMP implementation, we'll
41 * have to use an IPI based mechanism. Until then, we stick to the simple
42 * hardware assisted version.
43 *
44 * As v7 does not support flushing per IPA, just nuke the whole TLB
45 * instead, ignoring the ipa value.
46 */
47ENTRY(__kvm_tlb_flush_vmid_ipa)
48 push {r2, r3}
49
50 dsb ishst
51 add r0, r0, #KVM_VTTBR
52 ldrd r2, r3, [r0]
53 mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
54 isb
55 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
56 dsb ish
57 isb
58 mov r2, #0
59 mov r3, #0
60 mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
61 isb @ Not necessary if followed by eret
62
63 pop {r2, r3}
64 bx lr
65ENDPROC(__kvm_tlb_flush_vmid_ipa)
66
67/**
68 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
69 *
70 * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
71 * parameter
72 */
73
74ENTRY(__kvm_tlb_flush_vmid)
75 b __kvm_tlb_flush_vmid_ipa
76ENDPROC(__kvm_tlb_flush_vmid)
77
78/********************************************************************
79 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
80 * domain, for all VMIDs
81 *
82 * void __kvm_flush_vm_context(void);
83 */
84ENTRY(__kvm_flush_vm_context)
85 mov r0, #0 @ rn parameter for c15 flushes is SBZ
86
87 /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
88 mcr p15, 4, r0, c8, c3, 4
89 /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
90 mcr p15, 0, r0, c7, c1, 0
91 dsb ish
92 isb @ Not necessary if followed by eret
93
94 bx lr
95ENDPROC(__kvm_flush_vm_context)
96
97
98/********************************************************************
99 * Hypervisor world-switch code
100 *
101 *
102 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
103 */
104ENTRY(__kvm_vcpu_run)
105 @ Save the vcpu pointer
106 mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
107
108 save_host_regs
109
110 restore_vgic_state
111 restore_timer_state
112
113 @ Store hardware CP15 state and load guest state
114 read_cp15_state store_to_vcpu = 0
115 write_cp15_state read_from_vcpu = 1
116
117 @ If the host kernel has not been configured with VFPv3 support,
118 @ then it is safer if we deny guests from using it as well.
119#ifdef CONFIG_VFPv3
120 @ Set FPEXC_EN so the guest doesn't trap floating point instructions
121 VFPFMRX r2, FPEXC @ VMRS
122 push {r2}
123 orr r2, r2, #FPEXC_EN
124 VFPFMXR FPEXC, r2 @ VMSR
125#endif
126
127 @ Configure Hyp-role
128 configure_hyp_role vmentry
129
130 @ Trap coprocessor CRx accesses
131 set_hstr vmentry
132 set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
133 set_hdcr vmentry
134
135 @ Write configured ID register into MIDR alias
136 ldr r1, [vcpu, #VCPU_MIDR]
137 mcr p15, 4, r1, c0, c0, 0
138
139 @ Write guest view of MPIDR into VMPIDR
140 ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
141 mcr p15, 4, r1, c0, c0, 5
142
143 @ Set up guest memory translation
144 ldr r1, [vcpu, #VCPU_KVM]
145 add r1, r1, #KVM_VTTBR
146 ldrd r2, r3, [r1]
147 mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
148
149 @ We're all done, just restore the GPRs and go to the guest
150 restore_guest_regs
151 clrex @ Clear exclusive monitor
152 eret
153
154__kvm_vcpu_return:
155 /*
156 * return convention:
157 * guest r0, r1, r2 saved on the stack
158 * r0: vcpu pointer
159 * r1: exception code
160 */
161 save_guest_regs
162
163 @ Set VMID == 0
164 mov r2, #0
165 mov r3, #0
166 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
167
168 @ Don't trap coprocessor accesses for host kernel
169 set_hstr vmexit
170 set_hdcr vmexit
171 set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
172
173#ifdef CONFIG_VFPv3
174 @ Switch VFP/NEON hardware state to the host's
175 add r7, vcpu, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
176 store_vfp_state r7
177 add r7, vcpu, #VCPU_HOST_CTXT
178 ldr r7, [r7]
179 add r7, r7, #CPU_CTXT_VFP
180 restore_vfp_state r7
181
182after_vfp_restore:
183 @ Restore FPEXC_EN which we clobbered on entry
184 pop {r2}
185 VFPFMXR FPEXC, r2
186#else
187after_vfp_restore:
188#endif
189
190 @ Reset Hyp-role
191 configure_hyp_role vmexit
192
193 @ Let host read hardware MIDR
194 mrc p15, 0, r2, c0, c0, 0
195 mcr p15, 4, r2, c0, c0, 0
196
197 @ Back to hardware MPIDR
198 mrc p15, 0, r2, c0, c0, 5
199 mcr p15, 4, r2, c0, c0, 5
200
201 @ Store guest CP15 state and restore host state
202 read_cp15_state store_to_vcpu = 1
203 write_cp15_state read_from_vcpu = 0
204
205 save_timer_state
206 save_vgic_state
207
208 restore_host_regs
209 clrex @ Clear exclusive monitor
210 mov r0, r1 @ Return the return code
211 bx lr @ return to IOCTL
212 22
213/******************************************************************** 23/********************************************************************
214 * Call function in Hyp mode 24 * Call function in Hyp mode
@@ -239,281 +49,4 @@ after_vfp_restore:
239ENTRY(kvm_call_hyp) 49ENTRY(kvm_call_hyp)
240 hvc #0 50 hvc #0
241 bx lr 51 bx lr
242 52ENDPROC(kvm_call_hyp)
243/********************************************************************
244 * Hypervisor exception vector and handlers
245 *
246 *
247 * The KVM/ARM Hypervisor ABI is defined as follows:
248 *
249 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
250 * instruction is issued since all traps are disabled when running the host
251 * kernel as per the Hyp-mode initialization at boot time.
252 *
253 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
254 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
255 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
256 * instructions are called from within Hyp-mode.
257 *
258 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
259 * Switching to Hyp mode is done through a simple HVC #0 instruction. The
260 * exception vector code will check that the HVC comes from VMID==0 and if
261 * so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
262 * - r0 contains a pointer to a HYP function
263 * - r1, r2, and r3 contain arguments to the above function.
264 * - The HYP function will be called with its arguments in r0, r1 and r2.
265 * On HYP function return, we return directly to SVC.
266 *
267 * Note that the above is used to execute code in Hyp-mode from a host-kernel
268 * point of view, and is a different concept from performing a world-switch and
269 * executing guest code SVC mode (with a VMID != 0).
270 */
271
272/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
273.macro bad_exception exception_code, panic_str
274 push {r0-r2}
275 mrrc p15, 6, r0, r1, c2 @ Read VTTBR
276 lsr r1, r1, #16
277 ands r1, r1, #0xff
278 beq 99f
279
280 load_vcpu @ Load VCPU pointer
281 .if \exception_code == ARM_EXCEPTION_DATA_ABORT
282 mrc p15, 4, r2, c5, c2, 0 @ HSR
283 mrc p15, 4, r1, c6, c0, 0 @ HDFAR
284 str r2, [vcpu, #VCPU_HSR]
285 str r1, [vcpu, #VCPU_HxFAR]
286 .endif
287 .if \exception_code == ARM_EXCEPTION_PREF_ABORT
288 mrc p15, 4, r2, c5, c2, 0 @ HSR
289 mrc p15, 4, r1, c6, c0, 2 @ HIFAR
290 str r2, [vcpu, #VCPU_HSR]
291 str r1, [vcpu, #VCPU_HxFAR]
292 .endif
293 mov r1, #\exception_code
294 b __kvm_vcpu_return
295
296 @ We were in the host already. Let's craft a panic-ing return to SVC.
29799: mrs r2, cpsr
298 bic r2, r2, #MODE_MASK
299 orr r2, r2, #SVC_MODE
300THUMB( orr r2, r2, #PSR_T_BIT )
301 msr spsr_cxsf, r2
302 mrs r1, ELR_hyp
303 ldr r2, =panic
304 msr ELR_hyp, r2
305 ldr r0, =\panic_str
306 clrex @ Clear exclusive monitor
307 eret
308.endm
309
310 .align 5
311__kvm_hyp_vector:
312 .globl __kvm_hyp_vector
313
314 @ Hyp-mode exception vector
315 W(b) hyp_reset
316 W(b) hyp_undef
317 W(b) hyp_svc
318 W(b) hyp_pabt
319 W(b) hyp_dabt
320 W(b) hyp_hvc
321 W(b) hyp_irq
322 W(b) hyp_fiq
323
324 .align
325hyp_reset:
326 b hyp_reset
327
328 .align
329hyp_undef:
330 bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
331
332 .align
333hyp_svc:
334 bad_exception ARM_EXCEPTION_HVC, svc_die_str
335
336 .align
337hyp_pabt:
338 bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
339
340 .align
341hyp_dabt:
342 bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
343
344 .align
345hyp_hvc:
346 /*
347 * Getting here is either becuase of a trap from a guest or from calling
348 * HVC from the host kernel, which means "switch to Hyp mode".
349 */
350 push {r0, r1, r2}
351
352 @ Check syndrome register
353 mrc p15, 4, r1, c5, c2, 0 @ HSR
354 lsr r0, r1, #HSR_EC_SHIFT
355 cmp r0, #HSR_EC_HVC
356 bne guest_trap @ Not HVC instr.
357
358 /*
359 * Let's check if the HVC came from VMID 0 and allow simple
360 * switch to Hyp mode
361 */
362 mrrc p15, 6, r0, r2, c2
363 lsr r2, r2, #16
364 and r2, r2, #0xff
365 cmp r2, #0
366 bne guest_trap @ Guest called HVC
367
368 /*
369 * Getting here means host called HVC, we shift parameters and branch
370 * to Hyp function.
371 */
372 pop {r0, r1, r2}
373
374 /* Check for __hyp_get_vectors */
375 cmp r0, #-1
376 mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
377 beq 1f
378
379 push {lr}
380 mrs lr, SPSR
381 push {lr}
382
383 mov lr, r0
384 mov r0, r1
385 mov r1, r2
386 mov r2, r3
387
388THUMB( orr lr, #1)
389 blx lr @ Call the HYP function
390
391 pop {lr}
392 msr SPSR_csxf, lr
393 pop {lr}
3941: eret
395
396guest_trap:
397 load_vcpu @ Load VCPU pointer to r0
398 str r1, [vcpu, #VCPU_HSR]
399
400 @ Check if we need the fault information
401 lsr r1, r1, #HSR_EC_SHIFT
402#ifdef CONFIG_VFPv3
403 cmp r1, #HSR_EC_CP_0_13
404 beq switch_to_guest_vfp
405#endif
406 cmp r1, #HSR_EC_IABT
407 mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
408 beq 2f
409 cmp r1, #HSR_EC_DABT
410 bne 1f
411 mrc p15, 4, r2, c6, c0, 0 @ HDFAR
412
4132: str r2, [vcpu, #VCPU_HxFAR]
414
415 /*
416 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
417 *
418 * Abort on the stage 2 translation for a memory access from a
419 * Non-secure PL1 or PL0 mode:
420 *
421 * For any Access flag fault or Translation fault, and also for any
422 * Permission fault on the stage 2 translation of a memory access
423 * made as part of a translation table walk for a stage 1 translation,
424 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
425 * is UNKNOWN.
426 */
427
428 /* Check for permission fault, and S1PTW */
429 mrc p15, 4, r1, c5, c2, 0 @ HSR
430 and r0, r1, #HSR_FSC_TYPE
431 cmp r0, #FSC_PERM
432 tsteq r1, #(1 << 7) @ S1PTW
433 mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
434 bne 3f
435
436 /* Preserve PAR */
437 mrrc p15, 0, r0, r1, c7 @ PAR
438 push {r0, r1}
439
440 /* Resolve IPA using the xFAR */
441 mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
442 isb
443 mrrc p15, 0, r0, r1, c7 @ PAR
444 tst r0, #1
445 bne 4f @ Failed translation
446 ubfx r2, r0, #12, #20
447 lsl r2, r2, #4
448 orr r2, r2, r1, lsl #24
449
450 /* Restore PAR */
451 pop {r0, r1}
452 mcrr p15, 0, r0, r1, c7 @ PAR
453
4543: load_vcpu @ Load VCPU pointer to r0
455 str r2, [r0, #VCPU_HPFAR]
456
4571: mov r1, #ARM_EXCEPTION_HVC
458 b __kvm_vcpu_return
459
4604: pop {r0, r1} @ Failed translation, return to guest
461 mcrr p15, 0, r0, r1, c7 @ PAR
462 clrex
463 pop {r0, r1, r2}
464 eret
465
466/*
467 * If VFPv3 support is not available, then we will not switch the VFP
468 * registers; however cp10 and cp11 accesses will still trap and fallback
469 * to the regular coprocessor emulation code, which currently will
470 * inject an undefined exception to the guest.
471 */
472#ifdef CONFIG_VFPv3
473switch_to_guest_vfp:
474 push {r3-r7}
475
476 @ NEON/VFP used. Turn on VFP access.
477 set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
478
479 @ Switch VFP/NEON hardware state to the guest's
480 add r7, r0, #VCPU_HOST_CTXT
481 ldr r7, [r7]
482 add r7, r7, #CPU_CTXT_VFP
483 store_vfp_state r7
484 add r7, r0, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
485 restore_vfp_state r7
486
487 pop {r3-r7}
488 pop {r0-r2}
489 clrex
490 eret
491#endif
492
493 .align
494hyp_irq:
495 push {r0, r1, r2}
496 mov r1, #ARM_EXCEPTION_IRQ
497 load_vcpu @ Load VCPU pointer to r0
498 b __kvm_vcpu_return
499
500 .align
501hyp_fiq:
502 b hyp_fiq
503
504 .ltorg
505
506 .popsection
507
508 .pushsection ".rodata"
509
510und_die_str:
511 .ascii "unexpected undefined exception in Hyp mode at: %#08x\n"
512pabt_die_str:
513 .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n"
514dabt_die_str:
515 .ascii "unexpected data abort in Hyp mode at: %#08x\n"
516svc_die_str:
517 .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
518
519 .popsection
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
deleted file mode 100644
index e0943cb80ab3..000000000000
--- a/arch/arm/kvm/interrupts_head.S
+++ /dev/null
@@ -1,660 +0,0 @@
1#include <linux/irqchip/arm-gic.h>
2#include <asm/assembler.h>
3
4/* Compat macro, until we get rid of this file entierely */
5#define VCPU_GP_REGS (VCPU_GUEST_CTXT + CPU_CTXT_GP_REGS)
6#define VCPU_USR_REGS (VCPU_GP_REGS + GP_REGS_USR)
7#define VCPU_SVC_REGS (VCPU_GP_REGS + GP_REGS_SVC)
8#define VCPU_ABT_REGS (VCPU_GP_REGS + GP_REGS_ABT)
9#define VCPU_UND_REGS (VCPU_GP_REGS + GP_REGS_UND)
10#define VCPU_IRQ_REGS (VCPU_GP_REGS + GP_REGS_IRQ)
11#define VCPU_FIQ_REGS (VCPU_GP_REGS + GP_REGS_FIQ)
12#define VCPU_PC (VCPU_GP_REGS + GP_REGS_PC)
13#define VCPU_CPSR (VCPU_GP_REGS + GP_REGS_CPSR)
14
15#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
16#define VCPU_USR_SP (VCPU_USR_REG(13))
17#define VCPU_USR_LR (VCPU_USR_REG(14))
18#define VCPU_CP15_BASE (VCPU_GUEST_CTXT + CPU_CTXT_CP15)
19#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15_BASE + (_cp15_reg_idx * 4))
20
21/*
22 * Many of these macros need to access the VCPU structure, which is always
23 * held in r0. These macros should never clobber r1, as it is used to hold the
24 * exception code on the return path (except of course the macro that switches
25 * all the registers before the final jump to the VM).
26 */
27vcpu .req r0 @ vcpu pointer always in r0
28
29/* Clobbers {r2-r6} */
30.macro store_vfp_state vfp_base
31 @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
32 VFPFMRX r2, FPEXC
33 @ Make sure VFP is enabled so we can touch the registers.
34 orr r6, r2, #FPEXC_EN
35 VFPFMXR FPEXC, r6
36
37 VFPFMRX r3, FPSCR
38 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
39 beq 1f
40 @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
41 @ we only need to save them if FPEXC_EX is set.
42 VFPFMRX r4, FPINST
43 tst r2, #FPEXC_FP2V
44 VFPFMRX r5, FPINST2, ne @ vmrsne
45 bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
46 VFPFMXR FPEXC, r6
471:
48 VFPFSTMIA \vfp_base, r6 @ Save VFP registers
49 stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
50.endm
51
52/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
53.macro restore_vfp_state vfp_base
54 VFPFLDMIA \vfp_base, r6 @ Load VFP registers
55 ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
56
57 VFPFMXR FPSCR, r3
58 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
59 beq 1f
60 VFPFMXR FPINST, r4
61 tst r2, #FPEXC_FP2V
62 VFPFMXR FPINST2, r5, ne
631:
64 VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
65.endm
66
67/* These are simply for the macros to work - value don't have meaning */
68.equ usr, 0
69.equ svc, 1
70.equ abt, 2
71.equ und, 3
72.equ irq, 4
73.equ fiq, 5
74
75.macro push_host_regs_mode mode
76 mrs r2, SP_\mode
77 mrs r3, LR_\mode
78 mrs r4, SPSR_\mode
79 push {r2, r3, r4}
80.endm
81
82/*
83 * Store all host persistent registers on the stack.
84 * Clobbers all registers, in all modes, except r0 and r1.
85 */
86.macro save_host_regs
87 /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
88 mrs r2, ELR_hyp
89 push {r2}
90
91 /* usr regs */
92 push {r4-r12} @ r0-r3 are always clobbered
93 mrs r2, SP_usr
94 mov r3, lr
95 push {r2, r3}
96
97 push_host_regs_mode svc
98 push_host_regs_mode abt
99 push_host_regs_mode und
100 push_host_regs_mode irq
101
102 /* fiq regs */
103 mrs r2, r8_fiq
104 mrs r3, r9_fiq
105 mrs r4, r10_fiq
106 mrs r5, r11_fiq
107 mrs r6, r12_fiq
108 mrs r7, SP_fiq
109 mrs r8, LR_fiq
110 mrs r9, SPSR_fiq
111 push {r2-r9}
112.endm
113
114.macro pop_host_regs_mode mode
115 pop {r2, r3, r4}
116 msr SP_\mode, r2
117 msr LR_\mode, r3
118 msr SPSR_\mode, r4
119.endm
120
121/*
122 * Restore all host registers from the stack.
123 * Clobbers all registers, in all modes, except r0 and r1.
124 */
125.macro restore_host_regs
126 pop {r2-r9}
127 msr r8_fiq, r2
128 msr r9_fiq, r3
129 msr r10_fiq, r4
130 msr r11_fiq, r5
131 msr r12_fiq, r6
132 msr SP_fiq, r7
133 msr LR_fiq, r8
134 msr SPSR_fiq, r9
135
136 pop_host_regs_mode irq
137 pop_host_regs_mode und
138 pop_host_regs_mode abt
139 pop_host_regs_mode svc
140
141 pop {r2, r3}
142 msr SP_usr, r2
143 mov lr, r3
144 pop {r4-r12}
145
146 pop {r2}
147 msr ELR_hyp, r2
148.endm
149
150/*
151 * Restore SP, LR and SPSR for a given mode. offset is the offset of
152 * this mode's registers from the VCPU base.
153 *
154 * Assumes vcpu pointer in vcpu reg
155 *
156 * Clobbers r1, r2, r3, r4.
157 */
158.macro restore_guest_regs_mode mode, offset
159 add r1, vcpu, \offset
160 ldm r1, {r2, r3, r4}
161 msr SP_\mode, r2
162 msr LR_\mode, r3
163 msr SPSR_\mode, r4
164.endm
165
166/*
167 * Restore all guest registers from the vcpu struct.
168 *
169 * Assumes vcpu pointer in vcpu reg
170 *
171 * Clobbers *all* registers.
172 */
173.macro restore_guest_regs
174 restore_guest_regs_mode svc, #VCPU_SVC_REGS
175 restore_guest_regs_mode abt, #VCPU_ABT_REGS
176 restore_guest_regs_mode und, #VCPU_UND_REGS
177 restore_guest_regs_mode irq, #VCPU_IRQ_REGS
178
179 add r1, vcpu, #VCPU_FIQ_REGS
180 ldm r1, {r2-r9}
181 msr r8_fiq, r2
182 msr r9_fiq, r3
183 msr r10_fiq, r4
184 msr r11_fiq, r5
185 msr r12_fiq, r6
186 msr SP_fiq, r7
187 msr LR_fiq, r8
188 msr SPSR_fiq, r9
189
190 @ Load return state
191 ldr r2, [vcpu, #VCPU_PC]
192 ldr r3, [vcpu, #VCPU_CPSR]
193 msr ELR_hyp, r2
194 msr SPSR_cxsf, r3
195
196 @ Load user registers
197 ldr r2, [vcpu, #VCPU_USR_SP]
198 ldr r3, [vcpu, #VCPU_USR_LR]
199 msr SP_usr, r2
200 mov lr, r3
201 add vcpu, vcpu, #(VCPU_USR_REGS)
202 ldm vcpu, {r0-r12}
203.endm
204
205/*
206 * Save SP, LR and SPSR for a given mode. offset is the offset of
207 * this mode's registers from the VCPU base.
208 *
209 * Assumes vcpu pointer in vcpu reg
210 *
211 * Clobbers r2, r3, r4, r5.
212 */
213.macro save_guest_regs_mode mode, offset
214 add r2, vcpu, \offset
215 mrs r3, SP_\mode
216 mrs r4, LR_\mode
217 mrs r5, SPSR_\mode
218 stm r2, {r3, r4, r5}
219.endm
220
221/*
222 * Save all guest registers to the vcpu struct
223 * Expects guest's r0, r1, r2 on the stack.
224 *
225 * Assumes vcpu pointer in vcpu reg
226 *
227 * Clobbers r2, r3, r4, r5.
228 */
229.macro save_guest_regs
230 @ Store usr registers
231 add r2, vcpu, #VCPU_USR_REG(3)
232 stm r2, {r3-r12}
233 add r2, vcpu, #VCPU_USR_REG(0)
234 pop {r3, r4, r5} @ r0, r1, r2
235 stm r2, {r3, r4, r5}
236 mrs r2, SP_usr
237 mov r3, lr
238 str r2, [vcpu, #VCPU_USR_SP]
239 str r3, [vcpu, #VCPU_USR_LR]
240
241 @ Store return state
242 mrs r2, ELR_hyp
243 mrs r3, spsr
244 str r2, [vcpu, #VCPU_PC]
245 str r3, [vcpu, #VCPU_CPSR]
246
247 @ Store other guest registers
248 save_guest_regs_mode svc, #VCPU_SVC_REGS
249 save_guest_regs_mode abt, #VCPU_ABT_REGS
250 save_guest_regs_mode und, #VCPU_UND_REGS
251 save_guest_regs_mode irq, #VCPU_IRQ_REGS
252.endm
253
254/* Reads cp15 registers from hardware and stores them in memory
255 * @store_to_vcpu: If 0, registers are written in-order to the stack,
256 * otherwise to the VCPU struct pointed to by vcpup
257 *
258 * Assumes vcpu pointer in vcpu reg
259 *
260 * Clobbers r2 - r12
261 */
262.macro read_cp15_state store_to_vcpu
263 mrc p15, 0, r2, c1, c0, 0 @ SCTLR
264 mrc p15, 0, r3, c1, c0, 2 @ CPACR
265 mrc p15, 0, r4, c2, c0, 2 @ TTBCR
266 mrc p15, 0, r5, c3, c0, 0 @ DACR
267 mrrc p15, 0, r6, r7, c2 @ TTBR 0
268 mrrc p15, 1, r8, r9, c2 @ TTBR 1
269 mrc p15, 0, r10, c10, c2, 0 @ PRRR
270 mrc p15, 0, r11, c10, c2, 1 @ NMRR
271 mrc p15, 2, r12, c0, c0, 0 @ CSSELR
272
273 .if \store_to_vcpu == 0
274 push {r2-r12} @ Push CP15 registers
275 .else
276 str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
277 str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
278 str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
279 str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
280 add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
281 strd r6, r7, [r2]
282 add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
283 strd r8, r9, [r2]
284 str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
285 str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
286 str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
287 .endif
288
289 mrc p15, 0, r2, c13, c0, 1 @ CID
290 mrc p15, 0, r3, c13, c0, 2 @ TID_URW
291 mrc p15, 0, r4, c13, c0, 3 @ TID_URO
292 mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
293 mrc p15, 0, r6, c5, c0, 0 @ DFSR
294 mrc p15, 0, r7, c5, c0, 1 @ IFSR
295 mrc p15, 0, r8, c5, c1, 0 @ ADFSR
296 mrc p15, 0, r9, c5, c1, 1 @ AIFSR
297 mrc p15, 0, r10, c6, c0, 0 @ DFAR
298 mrc p15, 0, r11, c6, c0, 2 @ IFAR
299 mrc p15, 0, r12, c12, c0, 0 @ VBAR
300
301 .if \store_to_vcpu == 0
302 push {r2-r12} @ Push CP15 registers
303 .else
304 str r2, [vcpu, #CP15_OFFSET(c13_CID)]
305 str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
306 str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
307 str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
308 str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
309 str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
310 str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
311 str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
312 str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
313 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
314 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
315 .endif
316
317 mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
318 mrrc p15, 0, r4, r5, c7 @ PAR
319 mrc p15, 0, r6, c10, c3, 0 @ AMAIR0
320 mrc p15, 0, r7, c10, c3, 1 @ AMAIR1
321
322 .if \store_to_vcpu == 0
323 push {r2,r4-r7}
324 .else
325 str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
326 add r12, vcpu, #CP15_OFFSET(c7_PAR)
327 strd r4, r5, [r12]
328 str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
329 str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
330 .endif
331.endm
332
333/*
334 * Reads cp15 registers from memory and writes them to hardware
335 * @read_from_vcpu: If 0, registers are read in-order from the stack,
336 * otherwise from the VCPU struct pointed to by vcpup
337 *
338 * Assumes vcpu pointer in vcpu reg
339 */
340.macro write_cp15_state read_from_vcpu
341 .if \read_from_vcpu == 0
342 pop {r2,r4-r7}
343 .else
344 ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
345 add r12, vcpu, #CP15_OFFSET(c7_PAR)
346 ldrd r4, r5, [r12]
347 ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
348 ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
349 .endif
350
351 mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
352 mcrr p15, 0, r4, r5, c7 @ PAR
353 mcr p15, 0, r6, c10, c3, 0 @ AMAIR0
354 mcr p15, 0, r7, c10, c3, 1 @ AMAIR1
355
356 .if \read_from_vcpu == 0
357 pop {r2-r12}
358 .else
359 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
360 ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
361 ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
362 ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
363 ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
364 ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
365 ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
366 ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
367 ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
368 ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
369 ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
370 .endif
371
372 mcr p15, 0, r2, c13, c0, 1 @ CID
373 mcr p15, 0, r3, c13, c0, 2 @ TID_URW
374 mcr p15, 0, r4, c13, c0, 3 @ TID_URO
375 mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
376 mcr p15, 0, r6, c5, c0, 0 @ DFSR
377 mcr p15, 0, r7, c5, c0, 1 @ IFSR
378 mcr p15, 0, r8, c5, c1, 0 @ ADFSR
379 mcr p15, 0, r9, c5, c1, 1 @ AIFSR
380 mcr p15, 0, r10, c6, c0, 0 @ DFAR
381 mcr p15, 0, r11, c6, c0, 2 @ IFAR
382 mcr p15, 0, r12, c12, c0, 0 @ VBAR
383
384 .if \read_from_vcpu == 0
385 pop {r2-r12}
386 .else
387 ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
388 ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
389 ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
390 ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
391 add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
392 ldrd r6, r7, [r12]
393 add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
394 ldrd r8, r9, [r12]
395 ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
396 ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
397 ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
398 .endif
399
400 mcr p15, 0, r2, c1, c0, 0 @ SCTLR
401 mcr p15, 0, r3, c1, c0, 2 @ CPACR
402 mcr p15, 0, r4, c2, c0, 2 @ TTBCR
403 mcr p15, 0, r5, c3, c0, 0 @ DACR
404 mcrr p15, 0, r6, r7, c2 @ TTBR 0
405 mcrr p15, 1, r8, r9, c2 @ TTBR 1
406 mcr p15, 0, r10, c10, c2, 0 @ PRRR
407 mcr p15, 0, r11, c10, c2, 1 @ NMRR
408 mcr p15, 2, r12, c0, c0, 0 @ CSSELR
409.endm
410
411/*
412 * Save the VGIC CPU state into memory
413 *
414 * Assumes vcpu pointer in vcpu reg
415 */
416.macro save_vgic_state
417 /* Get VGIC VCTRL base into r2 */
418 ldr r2, [vcpu, #VCPU_KVM]
419 ldr r2, [r2, #KVM_VGIC_VCTRL]
420 cmp r2, #0
421 beq 2f
422
423 /* Compute the address of struct vgic_cpu */
424 add r11, vcpu, #VCPU_VGIC_CPU
425
426 /* Save all interesting registers */
427 ldr r4, [r2, #GICH_VMCR]
428 ldr r5, [r2, #GICH_MISR]
429 ldr r6, [r2, #GICH_EISR0]
430 ldr r7, [r2, #GICH_EISR1]
431 ldr r8, [r2, #GICH_ELRSR0]
432 ldr r9, [r2, #GICH_ELRSR1]
433 ldr r10, [r2, #GICH_APR]
434ARM_BE8(rev r4, r4 )
435ARM_BE8(rev r5, r5 )
436ARM_BE8(rev r6, r6 )
437ARM_BE8(rev r7, r7 )
438ARM_BE8(rev r8, r8 )
439ARM_BE8(rev r9, r9 )
440ARM_BE8(rev r10, r10 )
441
442 str r4, [r11, #VGIC_V2_CPU_VMCR]
443 str r5, [r11, #VGIC_V2_CPU_MISR]
444#ifdef CONFIG_CPU_ENDIAN_BE8
445 str r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
446 str r7, [r11, #VGIC_V2_CPU_EISR]
447 str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
448 str r9, [r11, #VGIC_V2_CPU_ELRSR]
449#else
450 str r6, [r11, #VGIC_V2_CPU_EISR]
451 str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
452 str r8, [r11, #VGIC_V2_CPU_ELRSR]
453 str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
454#endif
455 str r10, [r11, #VGIC_V2_CPU_APR]
456
457 /* Clear GICH_HCR */
458 mov r5, #0
459 str r5, [r2, #GICH_HCR]
460
461 /* Save list registers */
462 add r2, r2, #GICH_LR0
463 add r3, r11, #VGIC_V2_CPU_LR
464 ldr r4, [r11, #VGIC_CPU_NR_LR]
4651: ldr r6, [r2], #4
466ARM_BE8(rev r6, r6 )
467 str r6, [r3], #4
468 subs r4, r4, #1
469 bne 1b
4702:
471.endm
472
473/*
474 * Restore the VGIC CPU state from memory
475 *
476 * Assumes vcpu pointer in vcpu reg
477 */
478.macro restore_vgic_state
479 /* Get VGIC VCTRL base into r2 */
480 ldr r2, [vcpu, #VCPU_KVM]
481 ldr r2, [r2, #KVM_VGIC_VCTRL]
482 cmp r2, #0
483 beq 2f
484
485 /* Compute the address of struct vgic_cpu */
486 add r11, vcpu, #VCPU_VGIC_CPU
487
488 /* We only restore a minimal set of registers */
489 ldr r3, [r11, #VGIC_V2_CPU_HCR]
490 ldr r4, [r11, #VGIC_V2_CPU_VMCR]
491 ldr r8, [r11, #VGIC_V2_CPU_APR]
492ARM_BE8(rev r3, r3 )
493ARM_BE8(rev r4, r4 )
494ARM_BE8(rev r8, r8 )
495
496 str r3, [r2, #GICH_HCR]
497 str r4, [r2, #GICH_VMCR]
498 str r8, [r2, #GICH_APR]
499
500 /* Restore list registers */
501 add r2, r2, #GICH_LR0
502 add r3, r11, #VGIC_V2_CPU_LR
503 ldr r4, [r11, #VGIC_CPU_NR_LR]
5041: ldr r6, [r3], #4
505ARM_BE8(rev r6, r6 )
506 str r6, [r2], #4
507 subs r4, r4, #1
508 bne 1b
5092:
510.endm
511
512#define CNTHCTL_PL1PCTEN (1 << 0)
513#define CNTHCTL_PL1PCEN (1 << 1)
514
515/*
516 * Save the timer state onto the VCPU and allow physical timer/counter access
517 * for the host.
518 *
519 * Assumes vcpu pointer in vcpu reg
520 * Clobbers r2-r5
521 */
522.macro save_timer_state
523 ldr r4, [vcpu, #VCPU_KVM]
524 ldr r2, [r4, #KVM_TIMER_ENABLED]
525 cmp r2, #0
526 beq 1f
527
528 mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
529 str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
530
531 isb
532
533 mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
534 ldr r4, =VCPU_TIMER_CNTV_CVAL
535 add r5, vcpu, r4
536 strd r2, r3, [r5]
537
538 @ Ensure host CNTVCT == CNTPCT
539 mov r2, #0
540 mcrr p15, 4, r2, r2, c14 @ CNTVOFF
541
5421:
543 mov r2, #0 @ Clear ENABLE
544 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
545
546 @ Allow physical timer/counter access for the host
547 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
548 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
549 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
550.endm
551
552/*
553 * Load the timer state from the VCPU and deny physical timer/counter access
554 * for the host.
555 *
556 * Assumes vcpu pointer in vcpu reg
557 * Clobbers r2-r5
558 */
559.macro restore_timer_state
560 @ Disallow physical timer access for the guest
561 @ Physical counter access is allowed
562 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
563 orr r2, r2, #CNTHCTL_PL1PCTEN
564 bic r2, r2, #CNTHCTL_PL1PCEN
565 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
566
567 ldr r4, [vcpu, #VCPU_KVM]
568 ldr r2, [r4, #KVM_TIMER_ENABLED]
569 cmp r2, #0
570 beq 1f
571
572 ldr r2, [r4, #KVM_TIMER_CNTVOFF]
573 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
574 mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF
575
576 ldr r4, =VCPU_TIMER_CNTV_CVAL
577 add r5, vcpu, r4
578 ldrd r2, r3, [r5]
579 mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
580 isb
581
582 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
583 and r2, r2, #3
584 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
5851:
586.endm
587
588.equ vmentry, 0
589.equ vmexit, 1
590
591/* Configures the HSTR (Hyp System Trap Register) on entry/return
592 * (hardware reset value is 0) */
593.macro set_hstr operation
594 mrc p15, 4, r2, c1, c1, 3
595 ldr r3, =HSTR_T(15)
596 .if \operation == vmentry
597 orr r2, r2, r3 @ Trap CR{15}
598 .else
599 bic r2, r2, r3 @ Don't trap any CRx accesses
600 .endif
601 mcr p15, 4, r2, c1, c1, 3
602.endm
603
604/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
605 * (hardware reset value is 0). Keep previous value in r2.
606 * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
607 * VFP wasn't already enabled (always executed on vmtrap).
608 * If a label is specified with vmexit, it is branched to if VFP wasn't
609 * enabled.
610 */
611.macro set_hcptr operation, mask, label = none
612 mrc p15, 4, r2, c1, c1, 2
613 ldr r3, =\mask
614 .if \operation == vmentry
615 orr r3, r2, r3 @ Trap coproc-accesses defined in mask
616 .else
617 bic r3, r2, r3 @ Don't trap defined coproc-accesses
618 .endif
619 mcr p15, 4, r3, c1, c1, 2
620 .if \operation != vmentry
621 .if \operation == vmexit
622 tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
623 beq 1f
624 .endif
625 isb
626 .if \label != none
627 b \label
628 .endif
6291:
630 .endif
631.endm
632
633/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
634 * (hardware reset value is 0) */
635.macro set_hdcr operation
636 mrc p15, 4, r2, c1, c1, 1
637 ldr r3, =(HDCR_TPM|HDCR_TPMCR)
638 .if \operation == vmentry
639 orr r2, r2, r3 @ Trap some perfmon accesses
640 .else
641 bic r2, r2, r3 @ Don't trap any perfmon accesses
642 .endif
643 mcr p15, 4, r2, c1, c1, 1
644.endm
645
646/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
647.macro configure_hyp_role operation
648 .if \operation == vmentry
649 ldr r2, [vcpu, #VCPU_HCR]
650 ldr r3, [vcpu, #VCPU_IRQ_LINES]
651 orr r2, r2, r3
652 .else
653 mov r2, #0
654 .endif
655 mcr p15, 4, r2, c1, c1, 0 @ HCR
656.endm
657
658.macro load_vcpu
659 mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
660.endm