diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2015-10-25 11:51:41 -0400 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2015-12-14 06:30:43 -0500 |
commit | 1ea66d27e7b01086669ff2abdc3ac89dc90eae51 (patch) | |
tree | f864e2a46fedf1f683c41c73ee7e175d8ce2ef73 | |
parent | 910917bb7db070cc67557a6b3c8fcceaa5c398a7 (diff) |
arm64: KVM: Move away from the assembly version of the world switch
This is it. We remove all of the code that has now been rewritten.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r-- | arch/arm64/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp.S | 1081 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic-v2-switch.S | 134 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic-v3-switch.S | 269 |
4 files changed, 1 insertions, 1485 deletions
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index d31e4e58e961..caee9ee8e12a 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile | |||
@@ -23,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generi | |||
23 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o | 23 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o |
24 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o | 24 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o |
25 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o | 25 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o |
26 | kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o | ||
27 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o | 26 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o |
28 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o | 27 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o |
29 | kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o | ||
30 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o | 28 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 86c289832272..0ccdcbbef3c2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -17,910 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | 19 | ||
20 | #include <asm/alternative.h> | ||
21 | #include <asm/asm-offsets.h> | ||
22 | #include <asm/assembler.h> | 20 | #include <asm/assembler.h> |
23 | #include <asm/cpufeature.h> | ||
24 | #include <asm/debug-monitors.h> | ||
25 | #include <asm/esr.h> | ||
26 | #include <asm/fpsimdmacros.h> | ||
27 | #include <asm/kvm.h> | ||
28 | #include <asm/kvm_arm.h> | ||
29 | #include <asm/kvm_asm.h> | ||
30 | #include <asm/kvm_mmu.h> | ||
31 | #include <asm/memory.h> | ||
32 | |||
33 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) | ||
34 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) | ||
35 | #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) | ||
36 | #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) | ||
37 | |||
38 | .text | ||
39 | .pushsection .hyp.text, "ax" | ||
40 | .align PAGE_SHIFT | ||
41 | |||
42 | .macro save_common_regs | ||
43 | // x2: base address for cpu context | ||
44 | // x3: tmp register | ||
45 | |||
46 | add x3, x2, #CPU_XREG_OFFSET(19) | ||
47 | stp x19, x20, [x3] | ||
48 | stp x21, x22, [x3, #16] | ||
49 | stp x23, x24, [x3, #32] | ||
50 | stp x25, x26, [x3, #48] | ||
51 | stp x27, x28, [x3, #64] | ||
52 | stp x29, lr, [x3, #80] | ||
53 | |||
54 | mrs x19, sp_el0 | ||
55 | mrs x20, elr_el2 // pc before entering el2 | ||
56 | mrs x21, spsr_el2 // pstate before entering el2 | ||
57 | |||
58 | stp x19, x20, [x3, #96] | ||
59 | str x21, [x3, #112] | ||
60 | |||
61 | mrs x22, sp_el1 | ||
62 | mrs x23, elr_el1 | ||
63 | mrs x24, spsr_el1 | ||
64 | |||
65 | str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | ||
66 | str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | ||
67 | str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | ||
68 | .endm | ||
69 | |||
70 | .macro restore_common_regs | ||
71 | // x2: base address for cpu context | ||
72 | // x3: tmp register | ||
73 | |||
74 | ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | ||
75 | ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | ||
76 | ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | ||
77 | |||
78 | msr sp_el1, x22 | ||
79 | msr elr_el1, x23 | ||
80 | msr spsr_el1, x24 | ||
81 | |||
82 | add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 | ||
83 | ldp x19, x20, [x3] | ||
84 | ldr x21, [x3, #16] | ||
85 | |||
86 | msr sp_el0, x19 | ||
87 | msr elr_el2, x20 // pc on return from el2 | ||
88 | msr spsr_el2, x21 // pstate on return from el2 | ||
89 | |||
90 | add x3, x2, #CPU_XREG_OFFSET(19) | ||
91 | ldp x19, x20, [x3] | ||
92 | ldp x21, x22, [x3, #16] | ||
93 | ldp x23, x24, [x3, #32] | ||
94 | ldp x25, x26, [x3, #48] | ||
95 | ldp x27, x28, [x3, #64] | ||
96 | ldp x29, lr, [x3, #80] | ||
97 | .endm | ||
98 | |||
99 | .macro save_host_regs | ||
100 | save_common_regs | ||
101 | .endm | ||
102 | |||
103 | .macro restore_host_regs | ||
104 | restore_common_regs | ||
105 | .endm | ||
106 | |||
107 | .macro save_fpsimd | ||
108 | // x2: cpu context address | ||
109 | // x3, x4: tmp regs | ||
110 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | ||
111 | fpsimd_save x3, 4 | ||
112 | .endm | ||
113 | |||
114 | .macro restore_fpsimd | ||
115 | // x2: cpu context address | ||
116 | // x3, x4: tmp regs | ||
117 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | ||
118 | fpsimd_restore x3, 4 | ||
119 | .endm | ||
120 | |||
121 | .macro save_guest_regs | ||
122 | // x0 is the vcpu address | ||
123 | // x1 is the return code, do not corrupt! | ||
124 | // x2 is the cpu context | ||
125 | // x3 is a tmp register | ||
126 | // Guest's x0-x3 are on the stack | ||
127 | |||
128 | // Compute base to save registers | ||
129 | add x3, x2, #CPU_XREG_OFFSET(4) | ||
130 | stp x4, x5, [x3] | ||
131 | stp x6, x7, [x3, #16] | ||
132 | stp x8, x9, [x3, #32] | ||
133 | stp x10, x11, [x3, #48] | ||
134 | stp x12, x13, [x3, #64] | ||
135 | stp x14, x15, [x3, #80] | ||
136 | stp x16, x17, [x3, #96] | ||
137 | str x18, [x3, #112] | ||
138 | |||
139 | pop x6, x7 // x2, x3 | ||
140 | pop x4, x5 // x0, x1 | ||
141 | |||
142 | add x3, x2, #CPU_XREG_OFFSET(0) | ||
143 | stp x4, x5, [x3] | ||
144 | stp x6, x7, [x3, #16] | ||
145 | |||
146 | save_common_regs | ||
147 | .endm | ||
148 | |||
149 | .macro restore_guest_regs | ||
150 | // x0 is the vcpu address. | ||
151 | // x2 is the cpu context | ||
152 | // x3 is a tmp register | ||
153 | |||
154 | // Prepare x0-x3 for later restore | ||
155 | add x3, x2, #CPU_XREG_OFFSET(0) | ||
156 | ldp x4, x5, [x3] | ||
157 | ldp x6, x7, [x3, #16] | ||
158 | push x4, x5 // Push x0-x3 on the stack | ||
159 | push x6, x7 | ||
160 | |||
161 | // x4-x18 | ||
162 | ldp x4, x5, [x3, #32] | ||
163 | ldp x6, x7, [x3, #48] | ||
164 | ldp x8, x9, [x3, #64] | ||
165 | ldp x10, x11, [x3, #80] | ||
166 | ldp x12, x13, [x3, #96] | ||
167 | ldp x14, x15, [x3, #112] | ||
168 | ldp x16, x17, [x3, #128] | ||
169 | ldr x18, [x3, #144] | ||
170 | |||
171 | // x19-x29, lr, sp*, elr*, spsr* | ||
172 | restore_common_regs | ||
173 | |||
174 | // Last bits of the 64bit state | ||
175 | pop x2, x3 | ||
176 | pop x0, x1 | ||
177 | |||
178 | // Do not touch any register after this! | ||
179 | .endm | ||
180 | |||
181 | /* | ||
182 | * Macros to perform system register save/restore. | ||
183 | * | ||
184 | * Ordering here is absolutely critical, and must be kept consistent | ||
185 | * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, | ||
186 | * and in kvm_asm.h. | ||
187 | * | ||
188 | * In other words, don't touch any of these unless you know what | ||
189 | * you are doing. | ||
190 | */ | ||
191 | .macro save_sysregs | ||
192 | // x2: base address for cpu context | ||
193 | // x3: tmp register | ||
194 | |||
195 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | ||
196 | |||
197 | mrs x4, vmpidr_el2 | ||
198 | mrs x5, csselr_el1 | ||
199 | mrs x6, sctlr_el1 | ||
200 | mrs x7, actlr_el1 | ||
201 | mrs x8, cpacr_el1 | ||
202 | mrs x9, ttbr0_el1 | ||
203 | mrs x10, ttbr1_el1 | ||
204 | mrs x11, tcr_el1 | ||
205 | mrs x12, esr_el1 | ||
206 | mrs x13, afsr0_el1 | ||
207 | mrs x14, afsr1_el1 | ||
208 | mrs x15, far_el1 | ||
209 | mrs x16, mair_el1 | ||
210 | mrs x17, vbar_el1 | ||
211 | mrs x18, contextidr_el1 | ||
212 | mrs x19, tpidr_el0 | ||
213 | mrs x20, tpidrro_el0 | ||
214 | mrs x21, tpidr_el1 | ||
215 | mrs x22, amair_el1 | ||
216 | mrs x23, cntkctl_el1 | ||
217 | mrs x24, par_el1 | ||
218 | mrs x25, mdscr_el1 | ||
219 | |||
220 | stp x4, x5, [x3] | ||
221 | stp x6, x7, [x3, #16] | ||
222 | stp x8, x9, [x3, #32] | ||
223 | stp x10, x11, [x3, #48] | ||
224 | stp x12, x13, [x3, #64] | ||
225 | stp x14, x15, [x3, #80] | ||
226 | stp x16, x17, [x3, #96] | ||
227 | stp x18, x19, [x3, #112] | ||
228 | stp x20, x21, [x3, #128] | ||
229 | stp x22, x23, [x3, #144] | ||
230 | stp x24, x25, [x3, #160] | ||
231 | .endm | ||
232 | |||
233 | .macro save_debug type | ||
234 | // x4: pointer to register set | ||
235 | // x5: number of registers to skip | ||
236 | // x6..x22 trashed | ||
237 | |||
238 | adr x22, 1f | ||
239 | add x22, x22, x5, lsl #2 | ||
240 | br x22 | ||
241 | 1: | ||
242 | mrs x21, \type\()15_el1 | ||
243 | mrs x20, \type\()14_el1 | ||
244 | mrs x19, \type\()13_el1 | ||
245 | mrs x18, \type\()12_el1 | ||
246 | mrs x17, \type\()11_el1 | ||
247 | mrs x16, \type\()10_el1 | ||
248 | mrs x15, \type\()9_el1 | ||
249 | mrs x14, \type\()8_el1 | ||
250 | mrs x13, \type\()7_el1 | ||
251 | mrs x12, \type\()6_el1 | ||
252 | mrs x11, \type\()5_el1 | ||
253 | mrs x10, \type\()4_el1 | ||
254 | mrs x9, \type\()3_el1 | ||
255 | mrs x8, \type\()2_el1 | ||
256 | mrs x7, \type\()1_el1 | ||
257 | mrs x6, \type\()0_el1 | ||
258 | |||
259 | adr x22, 1f | ||
260 | add x22, x22, x5, lsl #2 | ||
261 | br x22 | ||
262 | 1: | ||
263 | str x21, [x4, #(15 * 8)] | ||
264 | str x20, [x4, #(14 * 8)] | ||
265 | str x19, [x4, #(13 * 8)] | ||
266 | str x18, [x4, #(12 * 8)] | ||
267 | str x17, [x4, #(11 * 8)] | ||
268 | str x16, [x4, #(10 * 8)] | ||
269 | str x15, [x4, #(9 * 8)] | ||
270 | str x14, [x4, #(8 * 8)] | ||
271 | str x13, [x4, #(7 * 8)] | ||
272 | str x12, [x4, #(6 * 8)] | ||
273 | str x11, [x4, #(5 * 8)] | ||
274 | str x10, [x4, #(4 * 8)] | ||
275 | str x9, [x4, #(3 * 8)] | ||
276 | str x8, [x4, #(2 * 8)] | ||
277 | str x7, [x4, #(1 * 8)] | ||
278 | str x6, [x4, #(0 * 8)] | ||
279 | .endm | ||
280 | |||
281 | .macro restore_sysregs | ||
282 | // x2: base address for cpu context | ||
283 | // x3: tmp register | ||
284 | |||
285 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | ||
286 | |||
287 | ldp x4, x5, [x3] | ||
288 | ldp x6, x7, [x3, #16] | ||
289 | ldp x8, x9, [x3, #32] | ||
290 | ldp x10, x11, [x3, #48] | ||
291 | ldp x12, x13, [x3, #64] | ||
292 | ldp x14, x15, [x3, #80] | ||
293 | ldp x16, x17, [x3, #96] | ||
294 | ldp x18, x19, [x3, #112] | ||
295 | ldp x20, x21, [x3, #128] | ||
296 | ldp x22, x23, [x3, #144] | ||
297 | ldp x24, x25, [x3, #160] | ||
298 | |||
299 | msr vmpidr_el2, x4 | ||
300 | msr csselr_el1, x5 | ||
301 | msr sctlr_el1, x6 | ||
302 | msr actlr_el1, x7 | ||
303 | msr cpacr_el1, x8 | ||
304 | msr ttbr0_el1, x9 | ||
305 | msr ttbr1_el1, x10 | ||
306 | msr tcr_el1, x11 | ||
307 | msr esr_el1, x12 | ||
308 | msr afsr0_el1, x13 | ||
309 | msr afsr1_el1, x14 | ||
310 | msr far_el1, x15 | ||
311 | msr mair_el1, x16 | ||
312 | msr vbar_el1, x17 | ||
313 | msr contextidr_el1, x18 | ||
314 | msr tpidr_el0, x19 | ||
315 | msr tpidrro_el0, x20 | ||
316 | msr tpidr_el1, x21 | ||
317 | msr amair_el1, x22 | ||
318 | msr cntkctl_el1, x23 | ||
319 | msr par_el1, x24 | ||
320 | msr mdscr_el1, x25 | ||
321 | .endm | ||
322 | |||
323 | .macro restore_debug type | ||
324 | // x4: pointer to register set | ||
325 | // x5: number of registers to skip | ||
326 | // x6..x22 trashed | ||
327 | |||
328 | adr x22, 1f | ||
329 | add x22, x22, x5, lsl #2 | ||
330 | br x22 | ||
331 | 1: | ||
332 | ldr x21, [x4, #(15 * 8)] | ||
333 | ldr x20, [x4, #(14 * 8)] | ||
334 | ldr x19, [x4, #(13 * 8)] | ||
335 | ldr x18, [x4, #(12 * 8)] | ||
336 | ldr x17, [x4, #(11 * 8)] | ||
337 | ldr x16, [x4, #(10 * 8)] | ||
338 | ldr x15, [x4, #(9 * 8)] | ||
339 | ldr x14, [x4, #(8 * 8)] | ||
340 | ldr x13, [x4, #(7 * 8)] | ||
341 | ldr x12, [x4, #(6 * 8)] | ||
342 | ldr x11, [x4, #(5 * 8)] | ||
343 | ldr x10, [x4, #(4 * 8)] | ||
344 | ldr x9, [x4, #(3 * 8)] | ||
345 | ldr x8, [x4, #(2 * 8)] | ||
346 | ldr x7, [x4, #(1 * 8)] | ||
347 | ldr x6, [x4, #(0 * 8)] | ||
348 | |||
349 | adr x22, 1f | ||
350 | add x22, x22, x5, lsl #2 | ||
351 | br x22 | ||
352 | 1: | ||
353 | msr \type\()15_el1, x21 | ||
354 | msr \type\()14_el1, x20 | ||
355 | msr \type\()13_el1, x19 | ||
356 | msr \type\()12_el1, x18 | ||
357 | msr \type\()11_el1, x17 | ||
358 | msr \type\()10_el1, x16 | ||
359 | msr \type\()9_el1, x15 | ||
360 | msr \type\()8_el1, x14 | ||
361 | msr \type\()7_el1, x13 | ||
362 | msr \type\()6_el1, x12 | ||
363 | msr \type\()5_el1, x11 | ||
364 | msr \type\()4_el1, x10 | ||
365 | msr \type\()3_el1, x9 | ||
366 | msr \type\()2_el1, x8 | ||
367 | msr \type\()1_el1, x7 | ||
368 | msr \type\()0_el1, x6 | ||
369 | .endm | ||
370 | |||
371 | .macro skip_32bit_state tmp, target | ||
372 | // Skip 32bit state if not needed | ||
373 | mrs \tmp, hcr_el2 | ||
374 | tbnz \tmp, #HCR_RW_SHIFT, \target | ||
375 | .endm | ||
376 | |||
377 | .macro skip_tee_state tmp, target | ||
378 | // Skip ThumbEE state if not needed | ||
379 | mrs \tmp, id_pfr0_el1 | ||
380 | tbz \tmp, #12, \target | ||
381 | .endm | ||
382 | |||
383 | .macro skip_debug_state tmp, target | ||
384 | ldr \tmp, [x0, #VCPU_DEBUG_FLAGS] | ||
385 | tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target | ||
386 | .endm | ||
387 | |||
388 | /* | ||
389 | * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled) | ||
390 | */ | ||
391 | .macro skip_fpsimd_state tmp, target | ||
392 | mrs \tmp, cptr_el2 | ||
393 | tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target | ||
394 | .endm | ||
395 | |||
396 | .macro compute_debug_state target | ||
397 | // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY | ||
398 | // is set, we do a full save/restore cycle and disable trapping. | ||
399 | add x25, x0, #VCPU_CONTEXT | ||
400 | |||
401 | // Check the state of MDSCR_EL1 | ||
402 | ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)] | ||
403 | and x26, x25, #DBG_MDSCR_KDE | ||
404 | and x25, x25, #DBG_MDSCR_MDE | ||
405 | adds xzr, x25, x26 | ||
406 | b.eq 9998f // Nothing to see there | ||
407 | |||
408 | // If any interesting bits was set, we must set the flag | ||
409 | mov x26, #KVM_ARM64_DEBUG_DIRTY | ||
410 | str x26, [x0, #VCPU_DEBUG_FLAGS] | ||
411 | b 9999f // Don't skip restore | ||
412 | |||
413 | 9998: | ||
414 | // Otherwise load the flags from memory in case we recently | ||
415 | // trapped | ||
416 | skip_debug_state x25, \target | ||
417 | 9999: | ||
418 | .endm | ||
419 | |||
420 | .macro save_guest_32bit_state | ||
421 | skip_32bit_state x3, 1f | ||
422 | |||
423 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | ||
424 | mrs x4, spsr_abt | ||
425 | mrs x5, spsr_und | ||
426 | mrs x6, spsr_irq | ||
427 | mrs x7, spsr_fiq | ||
428 | stp x4, x5, [x3] | ||
429 | stp x6, x7, [x3, #16] | ||
430 | |||
431 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | ||
432 | mrs x4, dacr32_el2 | ||
433 | mrs x5, ifsr32_el2 | ||
434 | stp x4, x5, [x3] | ||
435 | |||
436 | skip_fpsimd_state x8, 2f | ||
437 | mrs x6, fpexc32_el2 | ||
438 | str x6, [x3, #16] | ||
439 | 2: | ||
440 | skip_debug_state x8, 1f | ||
441 | mrs x7, dbgvcr32_el2 | ||
442 | str x7, [x3, #24] | ||
443 | 1: | ||
444 | .endm | ||
445 | |||
446 | .macro restore_guest_32bit_state | ||
447 | skip_32bit_state x3, 1f | ||
448 | |||
449 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | ||
450 | ldp x4, x5, [x3] | ||
451 | ldp x6, x7, [x3, #16] | ||
452 | msr spsr_abt, x4 | ||
453 | msr spsr_und, x5 | ||
454 | msr spsr_irq, x6 | ||
455 | msr spsr_fiq, x7 | ||
456 | |||
457 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | ||
458 | ldp x4, x5, [x3] | ||
459 | msr dacr32_el2, x4 | ||
460 | msr ifsr32_el2, x5 | ||
461 | |||
462 | skip_debug_state x8, 1f | ||
463 | ldr x7, [x3, #24] | ||
464 | msr dbgvcr32_el2, x7 | ||
465 | 1: | ||
466 | .endm | ||
467 | |||
468 | .macro activate_traps | ||
469 | ldr x2, [x0, #VCPU_HCR_EL2] | ||
470 | |||
471 | /* | ||
472 | * We are about to set CPTR_EL2.TFP to trap all floating point | ||
473 | * register accesses to EL2, however, the ARM ARM clearly states that | ||
474 | * traps are only taken to EL2 if the operation would not otherwise | ||
475 | * trap to EL1. Therefore, always make sure that for 32-bit guests, | ||
476 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. | ||
477 | */ | ||
478 | tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state | ||
479 | mov x3, #(1 << 30) | ||
480 | msr fpexc32_el2, x3 | ||
481 | isb | ||
482 | 99: | ||
483 | msr hcr_el2, x2 | ||
484 | mov x2, #CPTR_EL2_TTA | ||
485 | orr x2, x2, #CPTR_EL2_TFP | ||
486 | msr cptr_el2, x2 | ||
487 | |||
488 | mov x2, #(1 << 15) // Trap CP15 Cr=15 | ||
489 | msr hstr_el2, x2 | ||
490 | |||
491 | // Monitor Debug Config - see kvm_arm_setup_debug() | ||
492 | ldr x2, [x0, #VCPU_MDCR_EL2] | ||
493 | msr mdcr_el2, x2 | ||
494 | .endm | ||
495 | |||
496 | .macro deactivate_traps | ||
497 | mov x2, #HCR_RW | ||
498 | msr hcr_el2, x2 | ||
499 | msr hstr_el2, xzr | ||
500 | |||
501 | mrs x2, mdcr_el2 | ||
502 | and x2, x2, #MDCR_EL2_HPMN_MASK | ||
503 | msr mdcr_el2, x2 | ||
504 | .endm | ||
505 | |||
506 | .macro activate_vm | ||
507 | ldr x1, [x0, #VCPU_KVM] | ||
508 | kern_hyp_va x1 | ||
509 | ldr x2, [x1, #KVM_VTTBR] | ||
510 | msr vttbr_el2, x2 | ||
511 | .endm | ||
512 | |||
513 | .macro deactivate_vm | ||
514 | msr vttbr_el2, xzr | ||
515 | .endm | ||
516 | |||
517 | /* | ||
518 | * Call into the vgic backend for state saving | ||
519 | */ | ||
520 | .macro save_vgic_state | ||
521 | alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF | ||
522 | bl __save_vgic_v2_state | ||
523 | alternative_else | ||
524 | bl __save_vgic_v3_state | ||
525 | alternative_endif | ||
526 | mrs x24, hcr_el2 | ||
527 | mov x25, #HCR_INT_OVERRIDE | ||
528 | neg x25, x25 | ||
529 | and x24, x24, x25 | ||
530 | msr hcr_el2, x24 | ||
531 | .endm | ||
532 | |||
533 | /* | ||
534 | * Call into the vgic backend for state restoring | ||
535 | */ | ||
536 | .macro restore_vgic_state | ||
537 | mrs x24, hcr_el2 | ||
538 | ldr x25, [x0, #VCPU_IRQ_LINES] | ||
539 | orr x24, x24, #HCR_INT_OVERRIDE | ||
540 | orr x24, x24, x25 | ||
541 | msr hcr_el2, x24 | ||
542 | alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF | ||
543 | bl __restore_vgic_v2_state | ||
544 | alternative_else | ||
545 | bl __restore_vgic_v3_state | ||
546 | alternative_endif | ||
547 | .endm | ||
548 | |||
549 | .macro save_timer_state | ||
550 | // x0: vcpu pointer | ||
551 | ldr x2, [x0, #VCPU_KVM] | ||
552 | kern_hyp_va x2 | ||
553 | ldr w3, [x2, #KVM_TIMER_ENABLED] | ||
554 | cbz w3, 1f | ||
555 | |||
556 | mrs x3, cntv_ctl_el0 | ||
557 | and x3, x3, #3 | ||
558 | str w3, [x0, #VCPU_TIMER_CNTV_CTL] | ||
559 | |||
560 | isb | ||
561 | |||
562 | mrs x3, cntv_cval_el0 | ||
563 | str x3, [x0, #VCPU_TIMER_CNTV_CVAL] | ||
564 | |||
565 | 1: | ||
566 | // Disable the virtual timer | ||
567 | msr cntv_ctl_el0, xzr | ||
568 | |||
569 | // Allow physical timer/counter access for the host | ||
570 | mrs x2, cnthctl_el2 | ||
571 | orr x2, x2, #3 | ||
572 | msr cnthctl_el2, x2 | ||
573 | |||
574 | // Clear cntvoff for the host | ||
575 | msr cntvoff_el2, xzr | ||
576 | .endm | ||
577 | |||
578 | .macro restore_timer_state | ||
579 | // x0: vcpu pointer | ||
580 | // Disallow physical timer access for the guest | ||
581 | // Physical counter access is allowed | ||
582 | mrs x2, cnthctl_el2 | ||
583 | orr x2, x2, #1 | ||
584 | bic x2, x2, #2 | ||
585 | msr cnthctl_el2, x2 | ||
586 | |||
587 | ldr x2, [x0, #VCPU_KVM] | ||
588 | kern_hyp_va x2 | ||
589 | ldr w3, [x2, #KVM_TIMER_ENABLED] | ||
590 | cbz w3, 1f | ||
591 | |||
592 | ldr x3, [x2, #KVM_TIMER_CNTVOFF] | ||
593 | msr cntvoff_el2, x3 | ||
594 | ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL] | ||
595 | msr cntv_cval_el0, x2 | ||
596 | isb | ||
597 | |||
598 | ldr w2, [x0, #VCPU_TIMER_CNTV_CTL] | ||
599 | and x2, x2, #3 | ||
600 | msr cntv_ctl_el0, x2 | ||
601 | 1: | ||
602 | .endm | ||
603 | |||
604 | __save_sysregs: | ||
605 | save_sysregs | ||
606 | ret | ||
607 | |||
608 | __restore_sysregs: | ||
609 | restore_sysregs | ||
610 | ret | ||
611 | |||
612 | /* Save debug state */ | ||
613 | __save_debug: | ||
614 | // x2: ptr to CPU context | ||
615 | // x3: ptr to debug reg struct | ||
616 | // x4/x5/x6-22/x24-26: trashed | ||
617 | |||
618 | mrs x26, id_aa64dfr0_el1 | ||
619 | ubfx x24, x26, #12, #4 // Extract BRPs | ||
620 | ubfx x25, x26, #20, #4 // Extract WRPs | ||
621 | mov w26, #15 | ||
622 | sub w24, w26, w24 // How many BPs to skip | ||
623 | sub w25, w26, w25 // How many WPs to skip | ||
624 | |||
625 | mov x5, x24 | ||
626 | add x4, x3, #DEBUG_BCR | ||
627 | save_debug dbgbcr | ||
628 | add x4, x3, #DEBUG_BVR | ||
629 | save_debug dbgbvr | ||
630 | |||
631 | mov x5, x25 | ||
632 | add x4, x3, #DEBUG_WCR | ||
633 | save_debug dbgwcr | ||
634 | add x4, x3, #DEBUG_WVR | ||
635 | save_debug dbgwvr | ||
636 | |||
637 | mrs x21, mdccint_el1 | ||
638 | str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] | ||
639 | ret | ||
640 | |||
641 | /* Restore debug state */ | ||
642 | __restore_debug: | ||
643 | // x2: ptr to CPU context | ||
644 | // x3: ptr to debug reg struct | ||
645 | // x4/x5/x6-22/x24-26: trashed | ||
646 | |||
647 | mrs x26, id_aa64dfr0_el1 | ||
648 | ubfx x24, x26, #12, #4 // Extract BRPs | ||
649 | ubfx x25, x26, #20, #4 // Extract WRPs | ||
650 | mov w26, #15 | ||
651 | sub w24, w26, w24 // How many BPs to skip | ||
652 | sub w25, w26, w25 // How many WPs to skip | ||
653 | |||
654 | mov x5, x24 | ||
655 | add x4, x3, #DEBUG_BCR | ||
656 | restore_debug dbgbcr | ||
657 | add x4, x3, #DEBUG_BVR | ||
658 | restore_debug dbgbvr | ||
659 | |||
660 | mov x5, x25 | ||
661 | add x4, x3, #DEBUG_WCR | ||
662 | restore_debug dbgwcr | ||
663 | add x4, x3, #DEBUG_WVR | ||
664 | restore_debug dbgwvr | ||
665 | |||
666 | ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] | ||
667 | msr mdccint_el1, x21 | ||
668 | |||
669 | ret | ||
670 | |||
671 | __save_fpsimd: | ||
672 | skip_fpsimd_state x3, 1f | ||
673 | save_fpsimd | ||
674 | 1: ret | ||
675 | |||
676 | __restore_fpsimd: | ||
677 | skip_fpsimd_state x3, 1f | ||
678 | restore_fpsimd | ||
679 | 1: ret | ||
680 | |||
681 | switch_to_guest_fpsimd: | ||
682 | push x4, lr | ||
683 | |||
684 | mrs x2, cptr_el2 | ||
685 | bic x2, x2, #CPTR_EL2_TFP | ||
686 | msr cptr_el2, x2 | ||
687 | isb | ||
688 | |||
689 | mrs x0, tpidr_el2 | ||
690 | |||
691 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | ||
692 | kern_hyp_va x2 | ||
693 | bl __save_fpsimd | ||
694 | |||
695 | add x2, x0, #VCPU_CONTEXT | ||
696 | bl __restore_fpsimd | ||
697 | |||
698 | skip_32bit_state x3, 1f | ||
699 | ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)] | ||
700 | msr fpexc32_el2, x4 | ||
701 | 1: | ||
702 | pop x4, lr | ||
703 | pop x2, x3 | ||
704 | pop x0, x1 | ||
705 | |||
706 | eret | ||
707 | |||
708 | /* | ||
709 | * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); | ||
710 | * | ||
711 | * This is the world switch. The first half of the function | ||
712 | * deals with entering the guest, and anything from __kvm_vcpu_return | ||
713 | * to the end of the function deals with reentering the host. | ||
714 | * On the enter path, only x0 (vcpu pointer) must be preserved until | ||
715 | * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception | ||
716 | * code) must both be preserved until the epilogue. | ||
717 | * In both cases, x2 points to the CPU context we're saving/restoring from/to. | ||
718 | */ | ||
719 | ENTRY(__kvm_vcpu_run) | ||
720 | kern_hyp_va x0 | ||
721 | msr tpidr_el2, x0 // Save the vcpu register | ||
722 | |||
723 | // Host context | ||
724 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | ||
725 | kern_hyp_va x2 | ||
726 | |||
727 | save_host_regs | ||
728 | bl __save_sysregs | ||
729 | |||
730 | compute_debug_state 1f | ||
731 | add x3, x0, #VCPU_HOST_DEBUG_STATE | ||
732 | bl __save_debug | ||
733 | 1: | ||
734 | activate_traps | ||
735 | activate_vm | ||
736 | |||
737 | restore_vgic_state | ||
738 | restore_timer_state | ||
739 | |||
740 | // Guest context | ||
741 | add x2, x0, #VCPU_CONTEXT | ||
742 | |||
743 | // We must restore the 32-bit state before the sysregs, thanks | ||
744 | // to Cortex-A57 erratum #852523. | ||
745 | restore_guest_32bit_state | ||
746 | bl __restore_sysregs | ||
747 | |||
748 | skip_debug_state x3, 1f | ||
749 | ldr x3, [x0, #VCPU_DEBUG_PTR] | ||
750 | kern_hyp_va x3 | ||
751 | bl __restore_debug | ||
752 | 1: | ||
753 | restore_guest_regs | ||
754 | |||
755 | // That's it, no more messing around. | ||
756 | eret | ||
757 | |||
758 | __kvm_vcpu_return: | ||
759 | // Assume x0 is the vcpu pointer, x1 the return code | ||
760 | // Guest's x0-x3 are on the stack | ||
761 | |||
762 | // Guest context | ||
763 | add x2, x0, #VCPU_CONTEXT | ||
764 | |||
765 | save_guest_regs | ||
766 | bl __save_fpsimd | ||
767 | bl __save_sysregs | ||
768 | |||
769 | skip_debug_state x3, 1f | ||
770 | ldr x3, [x0, #VCPU_DEBUG_PTR] | ||
771 | kern_hyp_va x3 | ||
772 | bl __save_debug | ||
773 | 1: | ||
774 | save_guest_32bit_state | ||
775 | |||
776 | save_timer_state | ||
777 | save_vgic_state | ||
778 | |||
779 | deactivate_traps | ||
780 | deactivate_vm | ||
781 | |||
782 | // Host context | ||
783 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | ||
784 | kern_hyp_va x2 | ||
785 | |||
786 | bl __restore_sysregs | ||
787 | bl __restore_fpsimd | ||
788 | /* Clear FPSIMD and Trace trapping */ | ||
789 | msr cptr_el2, xzr | ||
790 | |||
791 | skip_debug_state x3, 1f | ||
792 | // Clear the dirty flag for the next run, as all the state has | ||
793 | // already been saved. Note that we nuke the whole 64bit word. | ||
794 | // If we ever add more flags, we'll have to be more careful... | ||
795 | str xzr, [x0, #VCPU_DEBUG_FLAGS] | ||
796 | add x3, x0, #VCPU_HOST_DEBUG_STATE | ||
797 | bl __restore_debug | ||
798 | 1: | ||
799 | restore_host_regs | ||
800 | |||
801 | mov x0, x1 | ||
802 | ret | ||
803 | END(__kvm_vcpu_run) | ||
804 | |||
805 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | ||
806 | ENTRY(__kvm_tlb_flush_vmid_ipa) | ||
807 | dsb ishst | ||
808 | |||
809 | kern_hyp_va x0 | ||
810 | ldr x2, [x0, #KVM_VTTBR] | ||
811 | msr vttbr_el2, x2 | ||
812 | isb | ||
813 | |||
814 | /* | ||
815 | * We could do so much better if we had the VA as well. | ||
816 | * Instead, we invalidate Stage-2 for this IPA, and the | ||
817 | * whole of Stage-1. Weep... | ||
818 | */ | ||
819 | lsr x1, x1, #12 | ||
820 | tlbi ipas2e1is, x1 | ||
821 | /* | ||
822 | * We have to ensure completion of the invalidation at Stage-2, | ||
823 | * since a table walk on another CPU could refill a TLB with a | ||
824 | * complete (S1 + S2) walk based on the old Stage-2 mapping if | ||
825 | * the Stage-1 invalidation happened first. | ||
826 | */ | ||
827 | dsb ish | ||
828 | tlbi vmalle1is | ||
829 | dsb ish | ||
830 | isb | ||
831 | |||
832 | msr vttbr_el2, xzr | ||
833 | ret | ||
834 | ENDPROC(__kvm_tlb_flush_vmid_ipa) | ||
835 | |||
836 | /** | ||
837 | * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs | ||
838 | * @struct kvm *kvm - pointer to kvm structure | ||
839 | * | ||
840 | * Invalidates all Stage 1 and 2 TLB entries for current VMID. | ||
841 | */ | ||
842 | ENTRY(__kvm_tlb_flush_vmid) | ||
843 | dsb ishst | ||
844 | |||
845 | kern_hyp_va x0 | ||
846 | ldr x2, [x0, #KVM_VTTBR] | ||
847 | msr vttbr_el2, x2 | ||
848 | isb | ||
849 | |||
850 | tlbi vmalls12e1is | ||
851 | dsb ish | ||
852 | isb | ||
853 | |||
854 | msr vttbr_el2, xzr | ||
855 | ret | ||
856 | ENDPROC(__kvm_tlb_flush_vmid) | ||
857 | |||
858 | ENTRY(__kvm_flush_vm_context) | ||
859 | dsb ishst | ||
860 | tlbi alle1is | ||
861 | ic ialluis | ||
862 | dsb ish | ||
863 | ret | ||
864 | ENDPROC(__kvm_flush_vm_context) | ||
865 | |||
866 | __kvm_hyp_panic: | ||
867 | // Stash PAR_EL1 before corrupting it in __restore_sysregs | ||
868 | mrs x0, par_el1 | ||
869 | push x0, xzr | ||
870 | |||
871 | // Guess the context by looking at VTTBR: | ||
872 | // If zero, then we're already a host. | ||
873 | // Otherwise restore a minimal host context before panicing. | ||
874 | mrs x0, vttbr_el2 | ||
875 | cbz x0, 1f | ||
876 | |||
877 | mrs x0, tpidr_el2 | ||
878 | |||
879 | deactivate_traps | ||
880 | deactivate_vm | ||
881 | |||
882 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | ||
883 | kern_hyp_va x2 | ||
884 | |||
885 | bl __restore_sysregs | ||
886 | |||
887 | /* | ||
888 | * Make sure we have a valid host stack, and don't leave junk in the | ||
889 | * frame pointer that will give us a misleading host stack unwinding. | ||
890 | */ | ||
891 | ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | ||
892 | msr sp_el1, x22 | ||
893 | mov x29, xzr | ||
894 | |||
895 | 1: adr x0, __hyp_panic_str | ||
896 | adr x1, 2f | ||
897 | ldp x2, x3, [x1] | ||
898 | sub x0, x0, x2 | ||
899 | add x0, x0, x3 | ||
900 | mrs x1, spsr_el2 | ||
901 | mrs x2, elr_el2 | ||
902 | mrs x3, esr_el2 | ||
903 | mrs x4, far_el2 | ||
904 | mrs x5, hpfar_el2 | ||
905 | pop x6, xzr // active context PAR_EL1 | ||
906 | mrs x7, tpidr_el2 | ||
907 | |||
908 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | ||
909 | PSR_MODE_EL1h) | ||
910 | msr spsr_el2, lr | ||
911 | ldr lr, =panic | ||
912 | msr elr_el2, lr | ||
913 | eret | ||
914 | |||
915 | .align 3 | ||
916 | 2: .quad HYP_PAGE_OFFSET | ||
917 | .quad PAGE_OFFSET | ||
918 | ENDPROC(__kvm_hyp_panic) | ||
919 | |||
920 | __hyp_panic_str: | ||
921 | .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" | ||
922 | |||
923 | .align 2 | ||
924 | 21 | ||
925 | /* | 22 | /* |
926 | * u64 kvm_call_hyp(void *hypfn, ...); | 23 | * u64 kvm_call_hyp(void *hypfn, ...); |
@@ -934,7 +31,7 @@ __hyp_panic_str: | |||
934 | * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the | 31 | * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the |
935 | * function pointer can be passed). The function being called must be mapped | 32 | * function pointer can be passed). The function being called must be mapped |
936 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are | 33 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are |
937 | * passed in r0 and r1. | 34 | * passed in x0. |
938 | * | 35 | * |
939 | * A function pointer with a value of 0 has a special meaning, and is | 36 | * A function pointer with a value of 0 has a special meaning, and is |
940 | * used to implement __hyp_get_vectors in the same way as in | 37 | * used to implement __hyp_get_vectors in the same way as in |
@@ -944,179 +41,3 @@ ENTRY(kvm_call_hyp) | |||
944 | hvc #0 | 41 | hvc #0 |
945 | ret | 42 | ret |
946 | ENDPROC(kvm_call_hyp) | 43 | ENDPROC(kvm_call_hyp) |
947 | |||
948 | .macro invalid_vector label, target | ||
949 | .align 2 | ||
950 | \label: | ||
951 | b \target | ||
952 | ENDPROC(\label) | ||
953 | .endm | ||
954 | |||
955 | /* None of these should ever happen */ | ||
956 | invalid_vector el2t_sync_invalid, __kvm_hyp_panic | ||
957 | invalid_vector el2t_irq_invalid, __kvm_hyp_panic | ||
958 | invalid_vector el2t_fiq_invalid, __kvm_hyp_panic | ||
959 | invalid_vector el2t_error_invalid, __kvm_hyp_panic | ||
960 | invalid_vector el2h_sync_invalid, __kvm_hyp_panic | ||
961 | invalid_vector el2h_irq_invalid, __kvm_hyp_panic | ||
962 | invalid_vector el2h_fiq_invalid, __kvm_hyp_panic | ||
963 | invalid_vector el2h_error_invalid, __kvm_hyp_panic | ||
964 | invalid_vector el1_sync_invalid, __kvm_hyp_panic | ||
965 | invalid_vector el1_irq_invalid, __kvm_hyp_panic | ||
966 | invalid_vector el1_fiq_invalid, __kvm_hyp_panic | ||
967 | invalid_vector el1_error_invalid, __kvm_hyp_panic | ||
968 | |||
969 | el1_sync: // Guest trapped into EL2 | ||
970 | push x0, x1 | ||
971 | push x2, x3 | ||
972 | |||
973 | mrs x1, esr_el2 | ||
974 | lsr x2, x1, #ESR_ELx_EC_SHIFT | ||
975 | |||
976 | cmp x2, #ESR_ELx_EC_HVC64 | ||
977 | b.ne el1_trap | ||
978 | |||
979 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest | ||
980 | cbnz x3, el1_trap // called HVC | ||
981 | |||
982 | /* Here, we're pretty sure the host called HVC. */ | ||
983 | pop x2, x3 | ||
984 | pop x0, x1 | ||
985 | |||
986 | /* Check for __hyp_get_vectors */ | ||
987 | cbnz x0, 1f | ||
988 | mrs x0, vbar_el2 | ||
989 | b 2f | ||
990 | |||
991 | 1: push lr, xzr | ||
992 | |||
993 | /* | ||
994 | * Compute the function address in EL2, and shuffle the parameters. | ||
995 | */ | ||
996 | kern_hyp_va x0 | ||
997 | mov lr, x0 | ||
998 | mov x0, x1 | ||
999 | mov x1, x2 | ||
1000 | mov x2, x3 | ||
1001 | blr lr | ||
1002 | |||
1003 | pop lr, xzr | ||
1004 | 2: eret | ||
1005 | |||
1006 | el1_trap: | ||
1007 | /* | ||
1008 | * x1: ESR | ||
1009 | * x2: ESR_EC | ||
1010 | */ | ||
1011 | |||
1012 | /* Guest accessed VFP/SIMD registers, save host, restore Guest */ | ||
1013 | cmp x2, #ESR_ELx_EC_FP_ASIMD | ||
1014 | b.eq switch_to_guest_fpsimd | ||
1015 | |||
1016 | cmp x2, #ESR_ELx_EC_DABT_LOW | ||
1017 | mov x0, #ESR_ELx_EC_IABT_LOW | ||
1018 | ccmp x2, x0, #4, ne | ||
1019 | b.ne 1f // Not an abort we care about | ||
1020 | |||
1021 | /* This is an abort. Check for permission fault */ | ||
1022 | alternative_if_not ARM64_WORKAROUND_834220 | ||
1023 | and x2, x1, #ESR_ELx_FSC_TYPE | ||
1024 | cmp x2, #FSC_PERM | ||
1025 | b.ne 1f // Not a permission fault | ||
1026 | alternative_else | ||
1027 | nop // Use the permission fault path to | ||
1028 | nop // check for a valid S1 translation, | ||
1029 | nop // regardless of the ESR value. | ||
1030 | alternative_endif | ||
1031 | |||
1032 | /* | ||
1033 | * Check for Stage-1 page table walk, which is guaranteed | ||
1034 | * to give a valid HPFAR_EL2. | ||
1035 | */ | ||
1036 | tbnz x1, #7, 1f // S1PTW is set | ||
1037 | |||
1038 | /* Preserve PAR_EL1 */ | ||
1039 | mrs x3, par_el1 | ||
1040 | push x3, xzr | ||
1041 | |||
1042 | /* | ||
1043 | * Permission fault, HPFAR_EL2 is invalid. | ||
1044 | * Resolve the IPA the hard way using the guest VA. | ||
1045 | * Stage-1 translation already validated the memory access rights. | ||
1046 | * As such, we can use the EL1 translation regime, and don't have | ||
1047 | * to distinguish between EL0 and EL1 access. | ||
1048 | */ | ||
1049 | mrs x2, far_el2 | ||
1050 | at s1e1r, x2 | ||
1051 | isb | ||
1052 | |||
1053 | /* Read result */ | ||
1054 | mrs x3, par_el1 | ||
1055 | pop x0, xzr // Restore PAR_EL1 from the stack | ||
1056 | msr par_el1, x0 | ||
1057 | tbnz x3, #0, 3f // Bail out if we failed the translation | ||
1058 | ubfx x3, x3, #12, #36 // Extract IPA | ||
1059 | lsl x3, x3, #4 // and present it like HPFAR | ||
1060 | b 2f | ||
1061 | |||
1062 | 1: mrs x3, hpfar_el2 | ||
1063 | mrs x2, far_el2 | ||
1064 | |||
1065 | 2: mrs x0, tpidr_el2 | ||
1066 | str w1, [x0, #VCPU_ESR_EL2] | ||
1067 | str x2, [x0, #VCPU_FAR_EL2] | ||
1068 | str x3, [x0, #VCPU_HPFAR_EL2] | ||
1069 | |||
1070 | mov x1, #ARM_EXCEPTION_TRAP | ||
1071 | b __kvm_vcpu_return | ||
1072 | |||
1073 | /* | ||
1074 | * Translation failed. Just return to the guest and | ||
1075 | * let it fault again. Another CPU is probably playing | ||
1076 | * behind our back. | ||
1077 | */ | ||
1078 | 3: pop x2, x3 | ||
1079 | pop x0, x1 | ||
1080 | |||
1081 | eret | ||
1082 | |||
1083 | el1_irq: | ||
1084 | push x0, x1 | ||
1085 | push x2, x3 | ||
1086 | mrs x0, tpidr_el2 | ||
1087 | mov x1, #ARM_EXCEPTION_IRQ | ||
1088 | b __kvm_vcpu_return | ||
1089 | |||
1090 | .ltorg | ||
1091 | |||
1092 | .align 11 | ||
1093 | |||
1094 | ENTRY(__kvm_hyp_vector) | ||
1095 | ventry el2t_sync_invalid // Synchronous EL2t | ||
1096 | ventry el2t_irq_invalid // IRQ EL2t | ||
1097 | ventry el2t_fiq_invalid // FIQ EL2t | ||
1098 | ventry el2t_error_invalid // Error EL2t | ||
1099 | |||
1100 | ventry el2h_sync_invalid // Synchronous EL2h | ||
1101 | ventry el2h_irq_invalid // IRQ EL2h | ||
1102 | ventry el2h_fiq_invalid // FIQ EL2h | ||
1103 | ventry el2h_error_invalid // Error EL2h | ||
1104 | |||
1105 | ventry el1_sync // Synchronous 64-bit EL1 | ||
1106 | ventry el1_irq // IRQ 64-bit EL1 | ||
1107 | ventry el1_fiq_invalid // FIQ 64-bit EL1 | ||
1108 | ventry el1_error_invalid // Error 64-bit EL1 | ||
1109 | |||
1110 | ventry el1_sync // Synchronous 32-bit EL1 | ||
1111 | ventry el1_irq // IRQ 32-bit EL1 | ||
1112 | ventry el1_fiq_invalid // FIQ 32-bit EL1 | ||
1113 | ventry el1_error_invalid // Error 32-bit EL1 | ||
1114 | ENDPROC(__kvm_hyp_vector) | ||
1115 | |||
1116 | |||
1117 | ENTRY(__kvm_get_mdcr_el2) | ||
1118 | mrs x0, mdcr_el2 | ||
1119 | ret | ||
1120 | ENDPROC(__kvm_get_mdcr_el2) | ||
1121 | |||
1122 | .popsection | ||
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S deleted file mode 100644 index 3f000712a85d..000000000000 --- a/arch/arm64/kvm/vgic-v2-switch.S +++ /dev/null | |||
@@ -1,134 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <linux/irqchip/arm-gic.h> | ||
20 | |||
21 | #include <asm/assembler.h> | ||
22 | #include <asm/memory.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | #include <asm/kvm.h> | ||
25 | #include <asm/kvm_asm.h> | ||
26 | #include <asm/kvm_arm.h> | ||
27 | #include <asm/kvm_mmu.h> | ||
28 | |||
29 | .text | ||
30 | .pushsection .hyp.text, "ax" | ||
31 | |||
32 | /* | ||
33 | * Save the VGIC CPU state into memory | ||
34 | * x0: Register pointing to VCPU struct | ||
35 | * Do not corrupt x1!!! | ||
36 | */ | ||
37 | ENTRY(__save_vgic_v2_state) | ||
38 | __save_vgic_v2_state: | ||
39 | /* Get VGIC VCTRL base into x2 */ | ||
40 | ldr x2, [x0, #VCPU_KVM] | ||
41 | kern_hyp_va x2 | ||
42 | ldr x2, [x2, #KVM_VGIC_VCTRL] | ||
43 | kern_hyp_va x2 | ||
44 | cbz x2, 2f // disabled | ||
45 | |||
46 | /* Compute the address of struct vgic_cpu */ | ||
47 | add x3, x0, #VCPU_VGIC_CPU | ||
48 | |||
49 | /* Save all interesting registers */ | ||
50 | ldr w5, [x2, #GICH_VMCR] | ||
51 | ldr w6, [x2, #GICH_MISR] | ||
52 | ldr w7, [x2, #GICH_EISR0] | ||
53 | ldr w8, [x2, #GICH_EISR1] | ||
54 | ldr w9, [x2, #GICH_ELRSR0] | ||
55 | ldr w10, [x2, #GICH_ELRSR1] | ||
56 | ldr w11, [x2, #GICH_APR] | ||
57 | CPU_BE( rev w5, w5 ) | ||
58 | CPU_BE( rev w6, w6 ) | ||
59 | CPU_BE( rev w7, w7 ) | ||
60 | CPU_BE( rev w8, w8 ) | ||
61 | CPU_BE( rev w9, w9 ) | ||
62 | CPU_BE( rev w10, w10 ) | ||
63 | CPU_BE( rev w11, w11 ) | ||
64 | |||
65 | str w5, [x3, #VGIC_V2_CPU_VMCR] | ||
66 | str w6, [x3, #VGIC_V2_CPU_MISR] | ||
67 | CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] ) | ||
68 | CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] ) | ||
69 | CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] ) | ||
70 | CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] ) | ||
71 | CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] ) | ||
72 | CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] ) | ||
73 | CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] ) | ||
74 | CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] ) | ||
75 | str w11, [x3, #VGIC_V2_CPU_APR] | ||
76 | |||
77 | /* Clear GICH_HCR */ | ||
78 | str wzr, [x2, #GICH_HCR] | ||
79 | |||
80 | /* Save list registers */ | ||
81 | add x2, x2, #GICH_LR0 | ||
82 | ldr w4, [x3, #VGIC_CPU_NR_LR] | ||
83 | add x3, x3, #VGIC_V2_CPU_LR | ||
84 | 1: ldr w5, [x2], #4 | ||
85 | CPU_BE( rev w5, w5 ) | ||
86 | str w5, [x3], #4 | ||
87 | sub w4, w4, #1 | ||
88 | cbnz w4, 1b | ||
89 | 2: | ||
90 | ret | ||
91 | ENDPROC(__save_vgic_v2_state) | ||
92 | |||
93 | /* | ||
94 | * Restore the VGIC CPU state from memory | ||
95 | * x0: Register pointing to VCPU struct | ||
96 | */ | ||
97 | ENTRY(__restore_vgic_v2_state) | ||
98 | __restore_vgic_v2_state: | ||
99 | /* Get VGIC VCTRL base into x2 */ | ||
100 | ldr x2, [x0, #VCPU_KVM] | ||
101 | kern_hyp_va x2 | ||
102 | ldr x2, [x2, #KVM_VGIC_VCTRL] | ||
103 | kern_hyp_va x2 | ||
104 | cbz x2, 2f // disabled | ||
105 | |||
106 | /* Compute the address of struct vgic_cpu */ | ||
107 | add x3, x0, #VCPU_VGIC_CPU | ||
108 | |||
109 | /* We only restore a minimal set of registers */ | ||
110 | ldr w4, [x3, #VGIC_V2_CPU_HCR] | ||
111 | ldr w5, [x3, #VGIC_V2_CPU_VMCR] | ||
112 | ldr w6, [x3, #VGIC_V2_CPU_APR] | ||
113 | CPU_BE( rev w4, w4 ) | ||
114 | CPU_BE( rev w5, w5 ) | ||
115 | CPU_BE( rev w6, w6 ) | ||
116 | |||
117 | str w4, [x2, #GICH_HCR] | ||
118 | str w5, [x2, #GICH_VMCR] | ||
119 | str w6, [x2, #GICH_APR] | ||
120 | |||
121 | /* Restore list registers */ | ||
122 | add x2, x2, #GICH_LR0 | ||
123 | ldr w4, [x3, #VGIC_CPU_NR_LR] | ||
124 | add x3, x3, #VGIC_V2_CPU_LR | ||
125 | 1: ldr w5, [x3], #4 | ||
126 | CPU_BE( rev w5, w5 ) | ||
127 | str w5, [x2], #4 | ||
128 | sub w4, w4, #1 | ||
129 | cbnz w4, 1b | ||
130 | 2: | ||
131 | ret | ||
132 | ENDPROC(__restore_vgic_v2_state) | ||
133 | |||
134 | .popsection | ||
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S deleted file mode 100644 index 3c20730ddff5..000000000000 --- a/arch/arm64/kvm/vgic-v3-switch.S +++ /dev/null | |||
@@ -1,269 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <linux/irqchip/arm-gic-v3.h> | ||
20 | |||
21 | #include <asm/assembler.h> | ||
22 | #include <asm/memory.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | #include <asm/kvm.h> | ||
25 | #include <asm/kvm_asm.h> | ||
26 | #include <asm/kvm_arm.h> | ||
27 | |||
28 | .text | ||
29 | .pushsection .hyp.text, "ax" | ||
30 | |||
31 | /* | ||
32 | * We store LRs in reverse order to let the CPU deal with streaming | ||
33 | * access. Use this macro to make it look saner... | ||
34 | */ | ||
35 | #define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8) | ||
36 | |||
37 | /* | ||
38 | * Save the VGIC CPU state into memory | ||
39 | * x0: Register pointing to VCPU struct | ||
40 | * Do not corrupt x1!!! | ||
41 | */ | ||
42 | .macro save_vgic_v3_state | ||
43 | // Compute the address of struct vgic_cpu | ||
44 | add x3, x0, #VCPU_VGIC_CPU | ||
45 | |||
46 | // Make sure stores to the GIC via the memory mapped interface | ||
47 | // are now visible to the system register interface | ||
48 | dsb st | ||
49 | |||
50 | // Save all interesting registers | ||
51 | mrs_s x5, ICH_VMCR_EL2 | ||
52 | mrs_s x6, ICH_MISR_EL2 | ||
53 | mrs_s x7, ICH_EISR_EL2 | ||
54 | mrs_s x8, ICH_ELSR_EL2 | ||
55 | |||
56 | str w5, [x3, #VGIC_V3_CPU_VMCR] | ||
57 | str w6, [x3, #VGIC_V3_CPU_MISR] | ||
58 | str w7, [x3, #VGIC_V3_CPU_EISR] | ||
59 | str w8, [x3, #VGIC_V3_CPU_ELRSR] | ||
60 | |||
61 | msr_s ICH_HCR_EL2, xzr | ||
62 | |||
63 | mrs_s x21, ICH_VTR_EL2 | ||
64 | mvn w22, w21 | ||
65 | ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 | ||
66 | |||
67 | adr x24, 1f | ||
68 | add x24, x24, x23 | ||
69 | br x24 | ||
70 | |||
71 | 1: | ||
72 | mrs_s x20, ICH_LR15_EL2 | ||
73 | mrs_s x19, ICH_LR14_EL2 | ||
74 | mrs_s x18, ICH_LR13_EL2 | ||
75 | mrs_s x17, ICH_LR12_EL2 | ||
76 | mrs_s x16, ICH_LR11_EL2 | ||
77 | mrs_s x15, ICH_LR10_EL2 | ||
78 | mrs_s x14, ICH_LR9_EL2 | ||
79 | mrs_s x13, ICH_LR8_EL2 | ||
80 | mrs_s x12, ICH_LR7_EL2 | ||
81 | mrs_s x11, ICH_LR6_EL2 | ||
82 | mrs_s x10, ICH_LR5_EL2 | ||
83 | mrs_s x9, ICH_LR4_EL2 | ||
84 | mrs_s x8, ICH_LR3_EL2 | ||
85 | mrs_s x7, ICH_LR2_EL2 | ||
86 | mrs_s x6, ICH_LR1_EL2 | ||
87 | mrs_s x5, ICH_LR0_EL2 | ||
88 | |||
89 | adr x24, 1f | ||
90 | add x24, x24, x23 | ||
91 | br x24 | ||
92 | |||
93 | 1: | ||
94 | str x20, [x3, #LR_OFFSET(15)] | ||
95 | str x19, [x3, #LR_OFFSET(14)] | ||
96 | str x18, [x3, #LR_OFFSET(13)] | ||
97 | str x17, [x3, #LR_OFFSET(12)] | ||
98 | str x16, [x3, #LR_OFFSET(11)] | ||
99 | str x15, [x3, #LR_OFFSET(10)] | ||
100 | str x14, [x3, #LR_OFFSET(9)] | ||
101 | str x13, [x3, #LR_OFFSET(8)] | ||
102 | str x12, [x3, #LR_OFFSET(7)] | ||
103 | str x11, [x3, #LR_OFFSET(6)] | ||
104 | str x10, [x3, #LR_OFFSET(5)] | ||
105 | str x9, [x3, #LR_OFFSET(4)] | ||
106 | str x8, [x3, #LR_OFFSET(3)] | ||
107 | str x7, [x3, #LR_OFFSET(2)] | ||
108 | str x6, [x3, #LR_OFFSET(1)] | ||
109 | str x5, [x3, #LR_OFFSET(0)] | ||
110 | |||
111 | tbnz w21, #29, 6f // 6 bits | ||
112 | tbz w21, #30, 5f // 5 bits | ||
113 | // 7 bits | ||
114 | mrs_s x20, ICH_AP0R3_EL2 | ||
115 | str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] | ||
116 | mrs_s x19, ICH_AP0R2_EL2 | ||
117 | str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] | ||
118 | 6: mrs_s x18, ICH_AP0R1_EL2 | ||
119 | str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] | ||
120 | 5: mrs_s x17, ICH_AP0R0_EL2 | ||
121 | str w17, [x3, #VGIC_V3_CPU_AP0R] | ||
122 | |||
123 | tbnz w21, #29, 6f // 6 bits | ||
124 | tbz w21, #30, 5f // 5 bits | ||
125 | // 7 bits | ||
126 | mrs_s x20, ICH_AP1R3_EL2 | ||
127 | str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] | ||
128 | mrs_s x19, ICH_AP1R2_EL2 | ||
129 | str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] | ||
130 | 6: mrs_s x18, ICH_AP1R1_EL2 | ||
131 | str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] | ||
132 | 5: mrs_s x17, ICH_AP1R0_EL2 | ||
133 | str w17, [x3, #VGIC_V3_CPU_AP1R] | ||
134 | |||
135 | // Restore SRE_EL1 access and re-enable SRE at EL1. | ||
136 | mrs_s x5, ICC_SRE_EL2 | ||
137 | orr x5, x5, #ICC_SRE_EL2_ENABLE | ||
138 | msr_s ICC_SRE_EL2, x5 | ||
139 | isb | ||
140 | mov x5, #1 | ||
141 | msr_s ICC_SRE_EL1, x5 | ||
142 | .endm | ||
143 | |||
144 | /* | ||
145 | * Restore the VGIC CPU state from memory | ||
146 | * x0: Register pointing to VCPU struct | ||
147 | */ | ||
148 | .macro restore_vgic_v3_state | ||
149 | // Compute the address of struct vgic_cpu | ||
150 | add x3, x0, #VCPU_VGIC_CPU | ||
151 | |||
152 | // Restore all interesting registers | ||
153 | ldr w4, [x3, #VGIC_V3_CPU_HCR] | ||
154 | ldr w5, [x3, #VGIC_V3_CPU_VMCR] | ||
155 | ldr w25, [x3, #VGIC_V3_CPU_SRE] | ||
156 | |||
157 | msr_s ICC_SRE_EL1, x25 | ||
158 | |||
159 | // make sure SRE is valid before writing the other registers | ||
160 | isb | ||
161 | |||
162 | msr_s ICH_HCR_EL2, x4 | ||
163 | msr_s ICH_VMCR_EL2, x5 | ||
164 | |||
165 | mrs_s x21, ICH_VTR_EL2 | ||
166 | |||
167 | tbnz w21, #29, 6f // 6 bits | ||
168 | tbz w21, #30, 5f // 5 bits | ||
169 | // 7 bits | ||
170 | ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] | ||
171 | msr_s ICH_AP1R3_EL2, x20 | ||
172 | ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] | ||
173 | msr_s ICH_AP1R2_EL2, x19 | ||
174 | 6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] | ||
175 | msr_s ICH_AP1R1_EL2, x18 | ||
176 | 5: ldr w17, [x3, #VGIC_V3_CPU_AP1R] | ||
177 | msr_s ICH_AP1R0_EL2, x17 | ||
178 | |||
179 | tbnz w21, #29, 6f // 6 bits | ||
180 | tbz w21, #30, 5f // 5 bits | ||
181 | // 7 bits | ||
182 | ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] | ||
183 | msr_s ICH_AP0R3_EL2, x20 | ||
184 | ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] | ||
185 | msr_s ICH_AP0R2_EL2, x19 | ||
186 | 6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] | ||
187 | msr_s ICH_AP0R1_EL2, x18 | ||
188 | 5: ldr w17, [x3, #VGIC_V3_CPU_AP0R] | ||
189 | msr_s ICH_AP0R0_EL2, x17 | ||
190 | |||
191 | and w22, w21, #0xf | ||
192 | mvn w22, w21 | ||
193 | ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 | ||
194 | |||
195 | adr x24, 1f | ||
196 | add x24, x24, x23 | ||
197 | br x24 | ||
198 | |||
199 | 1: | ||
200 | ldr x20, [x3, #LR_OFFSET(15)] | ||
201 | ldr x19, [x3, #LR_OFFSET(14)] | ||
202 | ldr x18, [x3, #LR_OFFSET(13)] | ||
203 | ldr x17, [x3, #LR_OFFSET(12)] | ||
204 | ldr x16, [x3, #LR_OFFSET(11)] | ||
205 | ldr x15, [x3, #LR_OFFSET(10)] | ||
206 | ldr x14, [x3, #LR_OFFSET(9)] | ||
207 | ldr x13, [x3, #LR_OFFSET(8)] | ||
208 | ldr x12, [x3, #LR_OFFSET(7)] | ||
209 | ldr x11, [x3, #LR_OFFSET(6)] | ||
210 | ldr x10, [x3, #LR_OFFSET(5)] | ||
211 | ldr x9, [x3, #LR_OFFSET(4)] | ||
212 | ldr x8, [x3, #LR_OFFSET(3)] | ||
213 | ldr x7, [x3, #LR_OFFSET(2)] | ||
214 | ldr x6, [x3, #LR_OFFSET(1)] | ||
215 | ldr x5, [x3, #LR_OFFSET(0)] | ||
216 | |||
217 | adr x24, 1f | ||
218 | add x24, x24, x23 | ||
219 | br x24 | ||
220 | |||
221 | 1: | ||
222 | msr_s ICH_LR15_EL2, x20 | ||
223 | msr_s ICH_LR14_EL2, x19 | ||
224 | msr_s ICH_LR13_EL2, x18 | ||
225 | msr_s ICH_LR12_EL2, x17 | ||
226 | msr_s ICH_LR11_EL2, x16 | ||
227 | msr_s ICH_LR10_EL2, x15 | ||
228 | msr_s ICH_LR9_EL2, x14 | ||
229 | msr_s ICH_LR8_EL2, x13 | ||
230 | msr_s ICH_LR7_EL2, x12 | ||
231 | msr_s ICH_LR6_EL2, x11 | ||
232 | msr_s ICH_LR5_EL2, x10 | ||
233 | msr_s ICH_LR4_EL2, x9 | ||
234 | msr_s ICH_LR3_EL2, x8 | ||
235 | msr_s ICH_LR2_EL2, x7 | ||
236 | msr_s ICH_LR1_EL2, x6 | ||
237 | msr_s ICH_LR0_EL2, x5 | ||
238 | |||
239 | // Ensure that the above will have reached the | ||
240 | // (re)distributors. This ensure the guest will read | ||
241 | // the correct values from the memory-mapped interface. | ||
242 | isb | ||
243 | dsb sy | ||
244 | |||
245 | // Prevent the guest from touching the GIC system registers | ||
246 | // if SRE isn't enabled for GICv3 emulation | ||
247 | cbnz x25, 1f | ||
248 | mrs_s x5, ICC_SRE_EL2 | ||
249 | and x5, x5, #~ICC_SRE_EL2_ENABLE | ||
250 | msr_s ICC_SRE_EL2, x5 | ||
251 | 1: | ||
252 | .endm | ||
253 | |||
254 | ENTRY(__save_vgic_v3_state) | ||
255 | save_vgic_v3_state | ||
256 | ret | ||
257 | ENDPROC(__save_vgic_v3_state) | ||
258 | |||
259 | ENTRY(__restore_vgic_v3_state) | ||
260 | restore_vgic_v3_state | ||
261 | ret | ||
262 | ENDPROC(__restore_vgic_v3_state) | ||
263 | |||
264 | ENTRY(__vgic_v3_get_ich_vtr_el2) | ||
265 | mrs_s x0, ICH_VTR_EL2 | ||
266 | ret | ||
267 | ENDPROC(__vgic_v3_get_ich_vtr_el2) | ||
268 | |||
269 | .popsection | ||