diff options
Diffstat (limited to 'arch/arm/kvm/interrupts.S')
-rw-r--r-- | arch/arm/kvm/interrupts.S | 478 |
1 files changed, 478 insertions, 0 deletions
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S new file mode 100644 index 000000000000..c5400d2e97ca --- /dev/null +++ b/arch/arm/kvm/interrupts.S | |||
@@ -0,0 +1,478 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/linkage.h> | ||
20 | #include <linux/const.h> | ||
21 | #include <asm/unified.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <asm/ptrace.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | #include <asm/kvm_asm.h> | ||
26 | #include <asm/kvm_arm.h> | ||
27 | #include <asm/vfpmacros.h> | ||
28 | #include "interrupts_head.S" | ||
29 | |||
30 | .text | ||
31 | |||
32 | __kvm_hyp_code_start: | ||
33 | .globl __kvm_hyp_code_start | ||
34 | |||
35 | /******************************************************************** | ||
36 | * Flush per-VMID TLBs | ||
37 | * | ||
38 | * void __kvm_tlb_flush_vmid(struct kvm *kvm); | ||
39 | * | ||
40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs | ||
41 | * inside the inner-shareable domain (which is the case for all v7 | ||
42 | * implementations). If we come across a non-IS SMP implementation, we'll | ||
43 | * have to use an IPI based mechanism. Until then, we stick to the simple | ||
44 | * hardware assisted version. | ||
45 | */ | ||
46 | ENTRY(__kvm_tlb_flush_vmid) | ||
47 | push {r2, r3} | ||
48 | |||
49 | add r0, r0, #KVM_VTTBR | ||
50 | ldrd r2, r3, [r0] | ||
51 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR | ||
52 | isb | ||
53 | mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) | ||
54 | dsb | ||
55 | isb | ||
56 | mov r2, #0 | ||
57 | mov r3, #0 | ||
58 | mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 | ||
59 | isb @ Not necessary if followed by eret | ||
60 | |||
61 | pop {r2, r3} | ||
62 | bx lr | ||
63 | ENDPROC(__kvm_tlb_flush_vmid) | ||
64 | |||
65 | /******************************************************************** | ||
66 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable | ||
67 | * domain, for all VMIDs | ||
68 | * | ||
69 | * void __kvm_flush_vm_context(void); | ||
70 | */ | ||
71 | ENTRY(__kvm_flush_vm_context) | ||
72 | mov r0, #0 @ rn parameter for c15 flushes is SBZ | ||
73 | |||
74 | /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ | ||
75 | mcr p15, 4, r0, c8, c3, 4 | ||
76 | /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ | ||
77 | mcr p15, 0, r0, c7, c1, 0 | ||
78 | dsb | ||
79 | isb @ Not necessary if followed by eret | ||
80 | |||
81 | bx lr | ||
82 | ENDPROC(__kvm_flush_vm_context) | ||
83 | |||
84 | |||
85 | /******************************************************************** | ||
86 | * Hypervisor world-switch code | ||
87 | * | ||
88 | * | ||
89 | * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) | ||
90 | */ | ||
91 | ENTRY(__kvm_vcpu_run) | ||
92 | @ Save the vcpu pointer | ||
93 | mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR | ||
94 | |||
95 | save_host_regs | ||
96 | |||
97 | @ Store hardware CP15 state and load guest state | ||
98 | read_cp15_state store_to_vcpu = 0 | ||
99 | write_cp15_state read_from_vcpu = 1 | ||
100 | |||
101 | @ If the host kernel has not been configured with VFPv3 support, | ||
102 | @ then it is safer if we deny guests from using it as well. | ||
103 | #ifdef CONFIG_VFPv3 | ||
104 | @ Set FPEXC_EN so the guest doesn't trap floating point instructions | ||
105 | VFPFMRX r2, FPEXC @ VMRS | ||
106 | push {r2} | ||
107 | orr r2, r2, #FPEXC_EN | ||
108 | VFPFMXR FPEXC, r2 @ VMSR | ||
109 | #endif | ||
110 | |||
111 | @ Configure Hyp-role | ||
112 | configure_hyp_role vmentry | ||
113 | |||
114 | @ Trap coprocessor CRx accesses | ||
115 | set_hstr vmentry | ||
116 | set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) | ||
117 | set_hdcr vmentry | ||
118 | |||
119 | @ Write configured ID register into MIDR alias | ||
120 | ldr r1, [vcpu, #VCPU_MIDR] | ||
121 | mcr p15, 4, r1, c0, c0, 0 | ||
122 | |||
123 | @ Write guest view of MPIDR into VMPIDR | ||
124 | ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] | ||
125 | mcr p15, 4, r1, c0, c0, 5 | ||
126 | |||
127 | @ Set up guest memory translation | ||
128 | ldr r1, [vcpu, #VCPU_KVM] | ||
129 | add r1, r1, #KVM_VTTBR | ||
130 | ldrd r2, r3, [r1] | ||
131 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR | ||
132 | |||
133 | @ We're all done, just restore the GPRs and go to the guest | ||
134 | restore_guest_regs | ||
135 | clrex @ Clear exclusive monitor | ||
136 | eret | ||
137 | |||
138 | __kvm_vcpu_return: | ||
139 | /* | ||
140 | * return convention: | ||
141 | * guest r0, r1, r2 saved on the stack | ||
142 | * r0: vcpu pointer | ||
143 | * r1: exception code | ||
144 | */ | ||
145 | save_guest_regs | ||
146 | |||
147 | @ Set VMID == 0 | ||
148 | mov r2, #0 | ||
149 | mov r3, #0 | ||
150 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR | ||
151 | |||
152 | @ Don't trap coprocessor accesses for host kernel | ||
153 | set_hstr vmexit | ||
154 | set_hdcr vmexit | ||
155 | set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) | ||
156 | |||
157 | #ifdef CONFIG_VFPv3 | ||
158 | @ Save floating point registers we if let guest use them. | ||
159 | tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) | ||
160 | bne after_vfp_restore | ||
161 | |||
162 | @ Switch VFP/NEON hardware state to the host's | ||
163 | add r7, vcpu, #VCPU_VFP_GUEST | ||
164 | store_vfp_state r7 | ||
165 | add r7, vcpu, #VCPU_VFP_HOST | ||
166 | ldr r7, [r7] | ||
167 | restore_vfp_state r7 | ||
168 | |||
169 | after_vfp_restore: | ||
170 | @ Restore FPEXC_EN which we clobbered on entry | ||
171 | pop {r2} | ||
172 | VFPFMXR FPEXC, r2 | ||
173 | #endif | ||
174 | |||
175 | @ Reset Hyp-role | ||
176 | configure_hyp_role vmexit | ||
177 | |||
178 | @ Let host read hardware MIDR | ||
179 | mrc p15, 0, r2, c0, c0, 0 | ||
180 | mcr p15, 4, r2, c0, c0, 0 | ||
181 | |||
182 | @ Back to hardware MPIDR | ||
183 | mrc p15, 0, r2, c0, c0, 5 | ||
184 | mcr p15, 4, r2, c0, c0, 5 | ||
185 | |||
186 | @ Store guest CP15 state and restore host state | ||
187 | read_cp15_state store_to_vcpu = 1 | ||
188 | write_cp15_state read_from_vcpu = 0 | ||
189 | |||
190 | restore_host_regs | ||
191 | clrex @ Clear exclusive monitor | ||
192 | mov r0, r1 @ Return the return code | ||
193 | mov r1, #0 @ Clear upper bits in return value | ||
194 | bx lr @ return to IOCTL | ||
195 | |||
196 | /******************************************************************** | ||
197 | * Call function in Hyp mode | ||
198 | * | ||
199 | * | ||
200 | * u64 kvm_call_hyp(void *hypfn, ...); | ||
201 | * | ||
202 | * This is not really a variadic function in the classic C-way and care must | ||
203 | * be taken when calling this to ensure parameters are passed in registers | ||
204 | * only, since the stack will change between the caller and the callee. | ||
205 | * | ||
206 | * Call the function with the first argument containing a pointer to the | ||
207 | * function you wish to call in Hyp mode, and subsequent arguments will be | ||
208 | * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the | ||
209 | * function pointer can be passed). The function being called must be mapped | ||
210 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are | ||
211 | * passed in r0 and r1. | ||
212 | * | ||
213 | * The calling convention follows the standard AAPCS: | ||
214 | * r0 - r3: caller save | ||
215 | * r12: caller save | ||
216 | * rest: callee save | ||
217 | */ | ||
218 | ENTRY(kvm_call_hyp) | ||
219 | hvc #0 | ||
220 | bx lr | ||
221 | |||
222 | /******************************************************************** | ||
223 | * Hypervisor exception vector and handlers | ||
224 | * | ||
225 | * | ||
226 | * The KVM/ARM Hypervisor ABI is defined as follows: | ||
227 | * | ||
228 | * Entry to Hyp mode from the host kernel will happen _only_ when an HVC | ||
229 | * instruction is issued since all traps are disabled when running the host | ||
230 | * kernel as per the Hyp-mode initialization at boot time. | ||
231 | * | ||
232 | * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc | ||
233 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the | ||
234 | * host kernel) and they cause a trap to the vector page + offset 0xc when HVC | ||
235 | * instructions are called from within Hyp-mode. | ||
236 | * | ||
237 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): | ||
238 | * Switching to Hyp mode is done through a simple HVC #0 instruction. The | ||
239 | * exception vector code will check that the HVC comes from VMID==0 and if | ||
240 | * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. | ||
241 | * - r0 contains a pointer to a HYP function | ||
242 | * - r1, r2, and r3 contain arguments to the above function. | ||
243 | * - The HYP function will be called with its arguments in r0, r1 and r2. | ||
244 | * On HYP function return, we return directly to SVC. | ||
245 | * | ||
246 | * Note that the above is used to execute code in Hyp-mode from a host-kernel | ||
247 | * point of view, and is a different concept from performing a world-switch and | ||
248 | * executing guest code SVC mode (with a VMID != 0). | ||
249 | */ | ||
250 | |||
251 | /* Handle undef, svc, pabt, or dabt by crashing with a user notice */ | ||
252 | .macro bad_exception exception_code, panic_str | ||
253 | push {r0-r2} | ||
254 | mrrc p15, 6, r0, r1, c2 @ Read VTTBR | ||
255 | lsr r1, r1, #16 | ||
256 | ands r1, r1, #0xff | ||
257 | beq 99f | ||
258 | |||
259 | load_vcpu @ Load VCPU pointer | ||
260 | .if \exception_code == ARM_EXCEPTION_DATA_ABORT | ||
261 | mrc p15, 4, r2, c5, c2, 0 @ HSR | ||
262 | mrc p15, 4, r1, c6, c0, 0 @ HDFAR | ||
263 | str r2, [vcpu, #VCPU_HSR] | ||
264 | str r1, [vcpu, #VCPU_HxFAR] | ||
265 | .endif | ||
266 | .if \exception_code == ARM_EXCEPTION_PREF_ABORT | ||
267 | mrc p15, 4, r2, c5, c2, 0 @ HSR | ||
268 | mrc p15, 4, r1, c6, c0, 2 @ HIFAR | ||
269 | str r2, [vcpu, #VCPU_HSR] | ||
270 | str r1, [vcpu, #VCPU_HxFAR] | ||
271 | .endif | ||
272 | mov r1, #\exception_code | ||
273 | b __kvm_vcpu_return | ||
274 | |||
275 | @ We were in the host already. Let's craft a panic-ing return to SVC. | ||
276 | 99: mrs r2, cpsr | ||
277 | bic r2, r2, #MODE_MASK | ||
278 | orr r2, r2, #SVC_MODE | ||
279 | THUMB( orr r2, r2, #PSR_T_BIT ) | ||
280 | msr spsr_cxsf, r2 | ||
281 | mrs r1, ELR_hyp | ||
282 | ldr r2, =BSYM(panic) | ||
283 | msr ELR_hyp, r2 | ||
284 | ldr r0, =\panic_str | ||
285 | eret | ||
286 | .endm | ||
287 | |||
288 | .text | ||
289 | |||
290 | .align 5 | ||
291 | __kvm_hyp_vector: | ||
292 | .globl __kvm_hyp_vector | ||
293 | |||
294 | @ Hyp-mode exception vector | ||
295 | W(b) hyp_reset | ||
296 | W(b) hyp_undef | ||
297 | W(b) hyp_svc | ||
298 | W(b) hyp_pabt | ||
299 | W(b) hyp_dabt | ||
300 | W(b) hyp_hvc | ||
301 | W(b) hyp_irq | ||
302 | W(b) hyp_fiq | ||
303 | |||
304 | .align | ||
305 | hyp_reset: | ||
306 | b hyp_reset | ||
307 | |||
308 | .align | ||
309 | hyp_undef: | ||
310 | bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str | ||
311 | |||
312 | .align | ||
313 | hyp_svc: | ||
314 | bad_exception ARM_EXCEPTION_HVC, svc_die_str | ||
315 | |||
316 | .align | ||
317 | hyp_pabt: | ||
318 | bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str | ||
319 | |||
320 | .align | ||
321 | hyp_dabt: | ||
322 | bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str | ||
323 | |||
324 | .align | ||
325 | hyp_hvc: | ||
326 | /* | ||
327 | * Getting here is either becuase of a trap from a guest or from calling | ||
328 | * HVC from the host kernel, which means "switch to Hyp mode". | ||
329 | */ | ||
330 | push {r0, r1, r2} | ||
331 | |||
332 | @ Check syndrome register | ||
333 | mrc p15, 4, r1, c5, c2, 0 @ HSR | ||
334 | lsr r0, r1, #HSR_EC_SHIFT | ||
335 | #ifdef CONFIG_VFPv3 | ||
336 | cmp r0, #HSR_EC_CP_0_13 | ||
337 | beq switch_to_guest_vfp | ||
338 | #endif | ||
339 | cmp r0, #HSR_EC_HVC | ||
340 | bne guest_trap @ Not HVC instr. | ||
341 | |||
342 | /* | ||
343 | * Let's check if the HVC came from VMID 0 and allow simple | ||
344 | * switch to Hyp mode | ||
345 | */ | ||
346 | mrrc p15, 6, r0, r2, c2 | ||
347 | lsr r2, r2, #16 | ||
348 | and r2, r2, #0xff | ||
349 | cmp r2, #0 | ||
350 | bne guest_trap @ Guest called HVC | ||
351 | |||
352 | host_switch_to_hyp: | ||
353 | pop {r0, r1, r2} | ||
354 | |||
355 | push {lr} | ||
356 | mrs lr, SPSR | ||
357 | push {lr} | ||
358 | |||
359 | mov lr, r0 | ||
360 | mov r0, r1 | ||
361 | mov r1, r2 | ||
362 | mov r2, r3 | ||
363 | |||
364 | THUMB( orr lr, #1) | ||
365 | blx lr @ Call the HYP function | ||
366 | |||
367 | pop {lr} | ||
368 | msr SPSR_csxf, lr | ||
369 | pop {lr} | ||
370 | eret | ||
371 | |||
372 | guest_trap: | ||
373 | load_vcpu @ Load VCPU pointer to r0 | ||
374 | str r1, [vcpu, #VCPU_HSR] | ||
375 | |||
376 | @ Check if we need the fault information | ||
377 | lsr r1, r1, #HSR_EC_SHIFT | ||
378 | cmp r1, #HSR_EC_IABT | ||
379 | mrceq p15, 4, r2, c6, c0, 2 @ HIFAR | ||
380 | beq 2f | ||
381 | cmp r1, #HSR_EC_DABT | ||
382 | bne 1f | ||
383 | mrc p15, 4, r2, c6, c0, 0 @ HDFAR | ||
384 | |||
385 | 2: str r2, [vcpu, #VCPU_HxFAR] | ||
386 | |||
387 | /* | ||
388 | * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: | ||
389 | * | ||
390 | * Abort on the stage 2 translation for a memory access from a | ||
391 | * Non-secure PL1 or PL0 mode: | ||
392 | * | ||
393 | * For any Access flag fault or Translation fault, and also for any | ||
394 | * Permission fault on the stage 2 translation of a memory access | ||
395 | * made as part of a translation table walk for a stage 1 translation, | ||
396 | * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR | ||
397 | * is UNKNOWN. | ||
398 | */ | ||
399 | |||
400 | /* Check for permission fault, and S1PTW */ | ||
401 | mrc p15, 4, r1, c5, c2, 0 @ HSR | ||
402 | and r0, r1, #HSR_FSC_TYPE | ||
403 | cmp r0, #FSC_PERM | ||
404 | tsteq r1, #(1 << 7) @ S1PTW | ||
405 | mrcne p15, 4, r2, c6, c0, 4 @ HPFAR | ||
406 | bne 3f | ||
407 | |||
408 | /* Resolve IPA using the xFAR */ | ||
409 | mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR | ||
410 | isb | ||
411 | mrrc p15, 0, r0, r1, c7 @ PAR | ||
412 | tst r0, #1 | ||
413 | bne 4f @ Failed translation | ||
414 | ubfx r2, r0, #12, #20 | ||
415 | lsl r2, r2, #4 | ||
416 | orr r2, r2, r1, lsl #24 | ||
417 | |||
418 | 3: load_vcpu @ Load VCPU pointer to r0 | ||
419 | str r2, [r0, #VCPU_HPFAR] | ||
420 | |||
421 | 1: mov r1, #ARM_EXCEPTION_HVC | ||
422 | b __kvm_vcpu_return | ||
423 | |||
424 | 4: pop {r0, r1, r2} @ Failed translation, return to guest | ||
425 | eret | ||
426 | |||
427 | /* | ||
428 | * If VFPv3 support is not available, then we will not switch the VFP | ||
429 | * registers; however cp10 and cp11 accesses will still trap and fallback | ||
430 | * to the regular coprocessor emulation code, which currently will | ||
431 | * inject an undefined exception to the guest. | ||
432 | */ | ||
433 | #ifdef CONFIG_VFPv3 | ||
434 | switch_to_guest_vfp: | ||
435 | load_vcpu @ Load VCPU pointer to r0 | ||
436 | push {r3-r7} | ||
437 | |||
438 | @ NEON/VFP used. Turn on VFP access. | ||
439 | set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) | ||
440 | |||
441 | @ Switch VFP/NEON hardware state to the guest's | ||
442 | add r7, r0, #VCPU_VFP_HOST | ||
443 | ldr r7, [r7] | ||
444 | store_vfp_state r7 | ||
445 | add r7, r0, #VCPU_VFP_GUEST | ||
446 | restore_vfp_state r7 | ||
447 | |||
448 | pop {r3-r7} | ||
449 | pop {r0-r2} | ||
450 | eret | ||
451 | #endif | ||
452 | |||
453 | .align | ||
454 | hyp_irq: | ||
455 | push {r0, r1, r2} | ||
456 | mov r1, #ARM_EXCEPTION_IRQ | ||
457 | load_vcpu @ Load VCPU pointer to r0 | ||
458 | b __kvm_vcpu_return | ||
459 | |||
460 | .align | ||
461 | hyp_fiq: | ||
462 | b hyp_fiq | ||
463 | |||
464 | .ltorg | ||
465 | |||
466 | __kvm_hyp_code_end: | ||
467 | .globl __kvm_hyp_code_end | ||
468 | |||
469 | .section ".rodata" | ||
470 | |||
471 | und_die_str: | ||
472 | .ascii "unexpected undefined exception in Hyp mode at: %#08x" | ||
473 | pabt_die_str: | ||
474 | .ascii "unexpected prefetch abort in Hyp mode at: %#08x" | ||
475 | dabt_die_str: | ||
476 | .ascii "unexpected data abort in Hyp mode at: %#08x" | ||
477 | svc_die_str: | ||
478 | .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x" | ||