aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 19:42:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 19:42:49 -0400
commit519f526d391b0ef775aeb04c4b6f632ea6b3ee50 (patch)
tree36985d7882734c136fc3c9a48e9d9abf9e97c1f1 /arch/arm64/kvm
parent06ab838c2024db468855118087db16d8fa905ddc (diff)
parentba60c41ae392b473a1897faa0b8739fcb8759d69 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more kvm updates from Paolo Bonzini: "ARM: - Full debug support for arm64 - Active state switching for timer interrupts - Lazy FP/SIMD save/restore for arm64 - Generic ARMv8 target PPC: - Book3S: A few bug fixes - Book3S: Allow micro-threading on POWER8 x86: - Compiler warnings Generic: - Adaptive polling for guest halt" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (49 commits) kvm: irqchip: fix memory leak kvm: move new trace event outside #ifdef CONFIG_KVM_ASYNC_PF KVM: trace kvm_halt_poll_ns grow/shrink KVM: dynamic halt-polling KVM: make halt_poll_ns per-vCPU Silence compiler warning in arch/x86/kvm/emulate.c kvm: compile process_smi_save_seg_64() only for x86_64 KVM: x86: avoid uninitialized variable warning KVM: PPC: Book3S: Fix typo in top comment about locking KVM: PPC: Book3S: Fix size of the PSPB register KVM: PPC: Book3S HV: Exit on H_DOORBELL if HOST_IPI is set KVM: PPC: Book3S HV: Fix race in starting secondary threads KVM: PPC: Book3S: correct width in XER handling KVM: PPC: Book3S HV: Fix preempted vcore stolen time calculation KVM: PPC: Book3S HV: Fix preempted vcore list locking KVM: PPC: Book3S HV: Implement H_CLEAR_REF and H_CLEAR_MOD KVM: PPC: Book3S HV: Fix bug in dirty page tracking KVM: PPC: Book3S HV: Fix race in reading change bit when removing HPTE KVM: PPC: Book3S HV: Implement dynamic micro-threading on POWER8 KVM: PPC: Book3S HV: Make use of unused threads when running guests ...
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/debug.c217
-rw-r--r--arch/arm64/kvm/guest.c43
-rw-r--r--arch/arm64/kvm/handle_exit.c44
-rw-r--r--arch/arm64/kvm/hyp.S617
-rw-r--r--arch/arm64/kvm/reset.c20
-rw-r--r--arch/arm64/kvm/sys_regs.c291
-rw-r--r--arch/arm64/kvm/sys_regs.h6
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c2
-rw-r--r--arch/arm64/kvm/trace.h123
10 files changed, 939 insertions, 426 deletions
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index f90f4aa7f88d..1949fe5f5424 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
17 17
18kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o 18kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
19kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o 19kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o 20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
21 21
22kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o 22kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
23kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o 23kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
new file mode 100644
index 000000000000..47e5f0feaee8
--- /dev/null
+++ b/arch/arm64/kvm/debug.c
@@ -0,0 +1,217 @@
1/*
2 * Debug and Guest Debug support
3 *
4 * Copyright (C) 2015 - Linaro Ltd
5 * Author: Alex Bennée <alex.bennee@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kvm_host.h>
21#include <linux/hw_breakpoint.h>
22
23#include <asm/debug-monitors.h>
24#include <asm/kvm_asm.h>
25#include <asm/kvm_arm.h>
26#include <asm/kvm_emulate.h>
27
28#include "trace.h"
29
30/* These are the bits of MDSCR_EL1 we may manipulate */
31#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
32 DBG_MDSCR_KDE | \
33 DBG_MDSCR_MDE)
34
35static DEFINE_PER_CPU(u32, mdcr_el2);
36
37/**
38 * save/restore_guest_debug_regs
39 *
40 * For some debug operations we need to tweak some guest registers. As
41 * a result we need to save the state of those registers before we
42 * make those modifications.
43 *
44 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
45 * after we have restored the preserved value to the main context.
46 */
47static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
48{
49 vcpu->arch.guest_debug_preserved.mdscr_el1 = vcpu_sys_reg(vcpu, MDSCR_EL1);
50
51 trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
52 vcpu->arch.guest_debug_preserved.mdscr_el1);
53}
54
55static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
56{
57 vcpu_sys_reg(vcpu, MDSCR_EL1) = vcpu->arch.guest_debug_preserved.mdscr_el1;
58
59 trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
60 vcpu_sys_reg(vcpu, MDSCR_EL1));
61}
62
63/**
64 * kvm_arm_init_debug - grab what we need for debug
65 *
66 * Currently the sole task of this function is to retrieve the initial
67 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
68 * presumably been set-up by some knowledgeable bootcode.
69 *
70 * It is called once per-cpu during CPU hyp initialisation.
71 */
72
73void kvm_arm_init_debug(void)
74{
75 __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
76}
77
78/**
79 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
80 */
81
82void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
83{
84 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
85}
86
87/**
88 * kvm_arm_setup_debug - set up debug related stuff
89 *
90 * @vcpu: the vcpu pointer
91 *
92 * This is called before each entry into the hypervisor to setup any
93 * debug related registers. Currently this just ensures we will trap
94 * access to:
95 * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
96 * - Debug ROM Address (MDCR_EL2_TDRA)
97 * - OS related registers (MDCR_EL2_TDOSA)
98 *
99 * Additionally, KVM only traps guest accesses to the debug registers if
100 * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
101 * flag on vcpu->arch.debug_flags). Since the guest must not interfere
102 * with the hardware state when debugging the guest, we must ensure that
103 * trapping is enabled whenever we are debugging the guest using the
104 * debug registers.
105 */
106
107void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
108{
109 bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
110
111 trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
112
113 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
114 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
115 MDCR_EL2_TPMCR |
116 MDCR_EL2_TDRA |
117 MDCR_EL2_TDOSA);
118
119 /* Is Guest debugging in effect? */
120 if (vcpu->guest_debug) {
121 /* Route all software debug exceptions to EL2 */
122 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
123
124 /* Save guest debug state */
125 save_guest_debug_regs(vcpu);
126
127 /*
128 * Single Step (ARM ARM D2.12.3 The software step state
129 * machine)
130 *
131 * If we are doing Single Step we need to manipulate
132 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
133 * step has occurred the hypervisor will trap the
134 * debug exception and we return to userspace.
135 *
136 * If the guest attempts to single step its userspace
137 * we would have to deal with a trapped exception
138 * while in the guest kernel. Because this would be
139 * hard to unwind we suppress the guest's ability to
140 * do so by masking MDSCR_EL.SS.
141 *
142 * This confuses guest debuggers which use
143 * single-step behind the scenes but everything
144 * returns to normal once the host is no longer
145 * debugging the system.
146 */
147 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
148 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
149 vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_SS;
150 } else {
151 vcpu_sys_reg(vcpu, MDSCR_EL1) &= ~DBG_MDSCR_SS;
152 }
153
154 trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
155
156 /*
157 * HW Breakpoints and watchpoints
158 *
159 * We simply switch the debug_ptr to point to our new
160 * external_debug_state which has been populated by the
161 * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
162 * mechanism ensures the registers are updated on the
163 * world switch.
164 */
165 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
166 /* Enable breakpoints/watchpoints */
167 vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_MDE;
168
169 vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
170 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
171 trap_debug = true;
172
173 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
174 &vcpu->arch.debug_ptr->dbg_bcr[0],
175 &vcpu->arch.debug_ptr->dbg_bvr[0]);
176
177 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
178 &vcpu->arch.debug_ptr->dbg_wcr[0],
179 &vcpu->arch.debug_ptr->dbg_wvr[0]);
180 }
181 }
182
183 BUG_ON(!vcpu->guest_debug &&
184 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
185
186 /* Trap debug register access */
187 if (trap_debug)
188 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
189
190 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
191 trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_sys_reg(vcpu, MDSCR_EL1));
192}
193
194void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
195{
196 trace_kvm_arm_clear_debug(vcpu->guest_debug);
197
198 if (vcpu->guest_debug) {
199 restore_guest_debug_regs(vcpu);
200
201 /*
202 * If we were using HW debug we need to restore the
203 * debug_ptr to the guest debug state.
204 */
205 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
206 kvm_arm_reset_debug_ptr(vcpu);
207
208 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
209 &vcpu->arch.debug_ptr->dbg_bcr[0],
210 &vcpu->arch.debug_ptr->dbg_bvr[0]);
211
212 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
213 &vcpu->arch.debug_ptr->dbg_wcr[0],
214 &vcpu->arch.debug_ptr->dbg_wvr[0]);
215 }
216 }
217}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 9535bd555d1d..d250160d32bc 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -32,6 +32,8 @@
32#include <asm/kvm_emulate.h> 32#include <asm/kvm_emulate.h>
33#include <asm/kvm_coproc.h> 33#include <asm/kvm_coproc.h>
34 34
35#include "trace.h"
36
35struct kvm_stats_debugfs_item debugfs_entries[] = { 37struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { NULL } 38 { NULL }
37}; 39};
@@ -293,7 +295,8 @@ int __attribute_const__ kvm_target_cpu(void)
293 break; 295 break;
294 }; 296 };
295 297
296 return -EINVAL; 298 /* Return a default generic target */
299 return KVM_ARM_TARGET_GENERIC_V8;
297} 300}
298 301
299int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) 302int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
@@ -331,3 +334,41 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
331{ 334{
332 return -EINVAL; 335 return -EINVAL;
333} 336}
337
338#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
339 KVM_GUESTDBG_USE_SW_BP | \
340 KVM_GUESTDBG_USE_HW | \
341 KVM_GUESTDBG_SINGLESTEP)
342
343/**
344 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
345 * @kvm: pointer to the KVM struct
346 * @kvm_guest_debug: the ioctl data buffer
347 *
348 * This sets up and enables the VM for guest debugging. Userspace
349 * passes in a control flag to enable different debug types and
350 * potentially other architecture specific information in the rest of
351 * the structure.
352 */
353int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
354 struct kvm_guest_debug *dbg)
355{
356 trace_kvm_set_guest_debug(vcpu, dbg->control);
357
358 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
359 return -EINVAL;
360
361 if (dbg->control & KVM_GUESTDBG_ENABLE) {
362 vcpu->guest_debug = dbg->control;
363
364 /* Hardware assisted Break and Watch points */
365 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
366 vcpu->arch.external_debug_state = dbg->arch;
367 }
368
369 } else {
370 /* If not enabled clear all flags */
371 vcpu->guest_debug = 0;
372 }
373 return 0;
374}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 524fa25671fc..68a0759b1375 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -82,6 +82,45 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
82 return 1; 82 return 1;
83} 83}
84 84
85/**
86 * kvm_handle_guest_debug - handle a debug exception instruction
87 *
88 * @vcpu: the vcpu pointer
89 * @run: access to the kvm_run structure for results
90 *
91 * We route all debug exceptions through the same handler. If both the
92 * guest and host are using the same debug facilities it will be up to
93 * userspace to re-inject the correct exception for guest delivery.
94 *
95 * @return: 0 (while setting run->exit_reason), -1 for error
96 */
97static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
98{
99 u32 hsr = kvm_vcpu_get_hsr(vcpu);
100 int ret = 0;
101
102 run->exit_reason = KVM_EXIT_DEBUG;
103 run->debug.arch.hsr = hsr;
104
105 switch (hsr >> ESR_ELx_EC_SHIFT) {
106 case ESR_ELx_EC_WATCHPT_LOW:
107 run->debug.arch.far = vcpu->arch.fault.far_el2;
108 /* fall through */
109 case ESR_ELx_EC_SOFTSTP_LOW:
110 case ESR_ELx_EC_BREAKPT_LOW:
111 case ESR_ELx_EC_BKPT32:
112 case ESR_ELx_EC_BRK64:
113 break;
114 default:
115 kvm_err("%s: un-handled case hsr: %#08x\n",
116 __func__, (unsigned int) hsr);
117 ret = -1;
118 break;
119 }
120
121 return ret;
122}
123
85static exit_handle_fn arm_exit_handlers[] = { 124static exit_handle_fn arm_exit_handlers[] = {
86 [ESR_ELx_EC_WFx] = kvm_handle_wfx, 125 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
87 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, 126 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
@@ -96,6 +135,11 @@ static exit_handle_fn arm_exit_handlers[] = {
96 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, 135 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
97 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, 136 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
98 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, 137 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
138 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
139 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
140 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
141 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
142 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
99}; 143};
100 144
101static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) 145static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 10915aaf0b01..37c89ea2c572 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -230,199 +230,52 @@
230 stp x24, x25, [x3, #160] 230 stp x24, x25, [x3, #160]
231.endm 231.endm
232 232
233.macro save_debug 233.macro save_debug type
234 // x2: base address for cpu context 234 // x4: pointer to register set
235 // x3: tmp register 235 // x5: number of registers to skip
236 236 // x6..x22 trashed
237 mrs x26, id_aa64dfr0_el1 237
238 ubfx x24, x26, #12, #4 // Extract BRPs 238 adr x22, 1f
239 ubfx x25, x26, #20, #4 // Extract WRPs 239 add x22, x22, x5, lsl #2
240 mov w26, #15 240 br x22
241 sub w24, w26, w24 // How many BPs to skip
242 sub w25, w26, w25 // How many WPs to skip
243
244 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
245
246 adr x26, 1f
247 add x26, x26, x24, lsl #2
248 br x26
2491:
250 mrs x20, dbgbcr15_el1
251 mrs x19, dbgbcr14_el1
252 mrs x18, dbgbcr13_el1
253 mrs x17, dbgbcr12_el1
254 mrs x16, dbgbcr11_el1
255 mrs x15, dbgbcr10_el1
256 mrs x14, dbgbcr9_el1
257 mrs x13, dbgbcr8_el1
258 mrs x12, dbgbcr7_el1
259 mrs x11, dbgbcr6_el1
260 mrs x10, dbgbcr5_el1
261 mrs x9, dbgbcr4_el1
262 mrs x8, dbgbcr3_el1
263 mrs x7, dbgbcr2_el1
264 mrs x6, dbgbcr1_el1
265 mrs x5, dbgbcr0_el1
266
267 adr x26, 1f
268 add x26, x26, x24, lsl #2
269 br x26
270
2711:
272 str x20, [x3, #(15 * 8)]
273 str x19, [x3, #(14 * 8)]
274 str x18, [x3, #(13 * 8)]
275 str x17, [x3, #(12 * 8)]
276 str x16, [x3, #(11 * 8)]
277 str x15, [x3, #(10 * 8)]
278 str x14, [x3, #(9 * 8)]
279 str x13, [x3, #(8 * 8)]
280 str x12, [x3, #(7 * 8)]
281 str x11, [x3, #(6 * 8)]
282 str x10, [x3, #(5 * 8)]
283 str x9, [x3, #(4 * 8)]
284 str x8, [x3, #(3 * 8)]
285 str x7, [x3, #(2 * 8)]
286 str x6, [x3, #(1 * 8)]
287 str x5, [x3, #(0 * 8)]
288
289 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
290
291 adr x26, 1f
292 add x26, x26, x24, lsl #2
293 br x26
2941: 2411:
295 mrs x20, dbgbvr15_el1 242 mrs x21, \type\()15_el1
296 mrs x19, dbgbvr14_el1 243 mrs x20, \type\()14_el1
297 mrs x18, dbgbvr13_el1 244 mrs x19, \type\()13_el1
298 mrs x17, dbgbvr12_el1 245 mrs x18, \type\()12_el1
299 mrs x16, dbgbvr11_el1 246 mrs x17, \type\()11_el1
300 mrs x15, dbgbvr10_el1 247 mrs x16, \type\()10_el1
301 mrs x14, dbgbvr9_el1 248 mrs x15, \type\()9_el1
302 mrs x13, dbgbvr8_el1 249 mrs x14, \type\()8_el1
303 mrs x12, dbgbvr7_el1 250 mrs x13, \type\()7_el1
304 mrs x11, dbgbvr6_el1 251 mrs x12, \type\()6_el1
305 mrs x10, dbgbvr5_el1 252 mrs x11, \type\()5_el1
306 mrs x9, dbgbvr4_el1 253 mrs x10, \type\()4_el1
307 mrs x8, dbgbvr3_el1 254 mrs x9, \type\()3_el1
308 mrs x7, dbgbvr2_el1 255 mrs x8, \type\()2_el1
309 mrs x6, dbgbvr1_el1 256 mrs x7, \type\()1_el1
310 mrs x5, dbgbvr0_el1 257 mrs x6, \type\()0_el1
311 258
312 adr x26, 1f 259 adr x22, 1f
313 add x26, x26, x24, lsl #2 260 add x22, x22, x5, lsl #2
314 br x26 261 br x22
315
3161:
317 str x20, [x3, #(15 * 8)]
318 str x19, [x3, #(14 * 8)]
319 str x18, [x3, #(13 * 8)]
320 str x17, [x3, #(12 * 8)]
321 str x16, [x3, #(11 * 8)]
322 str x15, [x3, #(10 * 8)]
323 str x14, [x3, #(9 * 8)]
324 str x13, [x3, #(8 * 8)]
325 str x12, [x3, #(7 * 8)]
326 str x11, [x3, #(6 * 8)]
327 str x10, [x3, #(5 * 8)]
328 str x9, [x3, #(4 * 8)]
329 str x8, [x3, #(3 * 8)]
330 str x7, [x3, #(2 * 8)]
331 str x6, [x3, #(1 * 8)]
332 str x5, [x3, #(0 * 8)]
333
334 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
335
336 adr x26, 1f
337 add x26, x26, x25, lsl #2
338 br x26
3391:
340 mrs x20, dbgwcr15_el1
341 mrs x19, dbgwcr14_el1
342 mrs x18, dbgwcr13_el1
343 mrs x17, dbgwcr12_el1
344 mrs x16, dbgwcr11_el1
345 mrs x15, dbgwcr10_el1
346 mrs x14, dbgwcr9_el1
347 mrs x13, dbgwcr8_el1
348 mrs x12, dbgwcr7_el1
349 mrs x11, dbgwcr6_el1
350 mrs x10, dbgwcr5_el1
351 mrs x9, dbgwcr4_el1
352 mrs x8, dbgwcr3_el1
353 mrs x7, dbgwcr2_el1
354 mrs x6, dbgwcr1_el1
355 mrs x5, dbgwcr0_el1
356
357 adr x26, 1f
358 add x26, x26, x25, lsl #2
359 br x26
360
3611:
362 str x20, [x3, #(15 * 8)]
363 str x19, [x3, #(14 * 8)]
364 str x18, [x3, #(13 * 8)]
365 str x17, [x3, #(12 * 8)]
366 str x16, [x3, #(11 * 8)]
367 str x15, [x3, #(10 * 8)]
368 str x14, [x3, #(9 * 8)]
369 str x13, [x3, #(8 * 8)]
370 str x12, [x3, #(7 * 8)]
371 str x11, [x3, #(6 * 8)]
372 str x10, [x3, #(5 * 8)]
373 str x9, [x3, #(4 * 8)]
374 str x8, [x3, #(3 * 8)]
375 str x7, [x3, #(2 * 8)]
376 str x6, [x3, #(1 * 8)]
377 str x5, [x3, #(0 * 8)]
378
379 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
380
381 adr x26, 1f
382 add x26, x26, x25, lsl #2
383 br x26
3841:
385 mrs x20, dbgwvr15_el1
386 mrs x19, dbgwvr14_el1
387 mrs x18, dbgwvr13_el1
388 mrs x17, dbgwvr12_el1
389 mrs x16, dbgwvr11_el1
390 mrs x15, dbgwvr10_el1
391 mrs x14, dbgwvr9_el1
392 mrs x13, dbgwvr8_el1
393 mrs x12, dbgwvr7_el1
394 mrs x11, dbgwvr6_el1
395 mrs x10, dbgwvr5_el1
396 mrs x9, dbgwvr4_el1
397 mrs x8, dbgwvr3_el1
398 mrs x7, dbgwvr2_el1
399 mrs x6, dbgwvr1_el1
400 mrs x5, dbgwvr0_el1
401
402 adr x26, 1f
403 add x26, x26, x25, lsl #2
404 br x26
405
4061: 2621:
407 str x20, [x3, #(15 * 8)] 263 str x21, [x4, #(15 * 8)]
408 str x19, [x3, #(14 * 8)] 264 str x20, [x4, #(14 * 8)]
409 str x18, [x3, #(13 * 8)] 265 str x19, [x4, #(13 * 8)]
410 str x17, [x3, #(12 * 8)] 266 str x18, [x4, #(12 * 8)]
411 str x16, [x3, #(11 * 8)] 267 str x17, [x4, #(11 * 8)]
412 str x15, [x3, #(10 * 8)] 268 str x16, [x4, #(10 * 8)]
413 str x14, [x3, #(9 * 8)] 269 str x15, [x4, #(9 * 8)]
414 str x13, [x3, #(8 * 8)] 270 str x14, [x4, #(8 * 8)]
415 str x12, [x3, #(7 * 8)] 271 str x13, [x4, #(7 * 8)]
416 str x11, [x3, #(6 * 8)] 272 str x12, [x4, #(6 * 8)]
417 str x10, [x3, #(5 * 8)] 273 str x11, [x4, #(5 * 8)]
418 str x9, [x3, #(4 * 8)] 274 str x10, [x4, #(4 * 8)]
419 str x8, [x3, #(3 * 8)] 275 str x9, [x4, #(3 * 8)]
420 str x7, [x3, #(2 * 8)] 276 str x8, [x4, #(2 * 8)]
421 str x6, [x3, #(1 * 8)] 277 str x7, [x4, #(1 * 8)]
422 str x5, [x3, #(0 * 8)] 278 str x6, [x4, #(0 * 8)]
423
424 mrs x21, mdccint_el1
425 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
426.endm 279.endm
427 280
428.macro restore_sysregs 281.macro restore_sysregs
@@ -467,195 +320,52 @@
467 msr mdscr_el1, x25 320 msr mdscr_el1, x25
468.endm 321.endm
469 322
470.macro restore_debug 323.macro restore_debug type
471 // x2: base address for cpu context 324 // x4: pointer to register set
472 // x3: tmp register 325 // x5: number of registers to skip
473 326 // x6..x22 trashed
474 mrs x26, id_aa64dfr0_el1
475 ubfx x24, x26, #12, #4 // Extract BRPs
476 ubfx x25, x26, #20, #4 // Extract WRPs
477 mov w26, #15
478 sub w24, w26, w24 // How many BPs to skip
479 sub w25, w26, w25 // How many WPs to skip
480
481 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
482 327
483 adr x26, 1f 328 adr x22, 1f
484 add x26, x26, x24, lsl #2 329 add x22, x22, x5, lsl #2
485 br x26 330 br x22
4861:
487 ldr x20, [x3, #(15 * 8)]
488 ldr x19, [x3, #(14 * 8)]
489 ldr x18, [x3, #(13 * 8)]
490 ldr x17, [x3, #(12 * 8)]
491 ldr x16, [x3, #(11 * 8)]
492 ldr x15, [x3, #(10 * 8)]
493 ldr x14, [x3, #(9 * 8)]
494 ldr x13, [x3, #(8 * 8)]
495 ldr x12, [x3, #(7 * 8)]
496 ldr x11, [x3, #(6 * 8)]
497 ldr x10, [x3, #(5 * 8)]
498 ldr x9, [x3, #(4 * 8)]
499 ldr x8, [x3, #(3 * 8)]
500 ldr x7, [x3, #(2 * 8)]
501 ldr x6, [x3, #(1 * 8)]
502 ldr x5, [x3, #(0 * 8)]
503
504 adr x26, 1f
505 add x26, x26, x24, lsl #2
506 br x26
5071: 3311:
508 msr dbgbcr15_el1, x20 332 ldr x21, [x4, #(15 * 8)]
509 msr dbgbcr14_el1, x19 333 ldr x20, [x4, #(14 * 8)]
510 msr dbgbcr13_el1, x18 334 ldr x19, [x4, #(13 * 8)]
511 msr dbgbcr12_el1, x17 335 ldr x18, [x4, #(12 * 8)]
512 msr dbgbcr11_el1, x16 336 ldr x17, [x4, #(11 * 8)]
513 msr dbgbcr10_el1, x15 337 ldr x16, [x4, #(10 * 8)]
514 msr dbgbcr9_el1, x14 338 ldr x15, [x4, #(9 * 8)]
515 msr dbgbcr8_el1, x13 339 ldr x14, [x4, #(8 * 8)]
516 msr dbgbcr7_el1, x12 340 ldr x13, [x4, #(7 * 8)]
517 msr dbgbcr6_el1, x11 341 ldr x12, [x4, #(6 * 8)]
518 msr dbgbcr5_el1, x10 342 ldr x11, [x4, #(5 * 8)]
519 msr dbgbcr4_el1, x9 343 ldr x10, [x4, #(4 * 8)]
520 msr dbgbcr3_el1, x8 344 ldr x9, [x4, #(3 * 8)]
521 msr dbgbcr2_el1, x7 345 ldr x8, [x4, #(2 * 8)]
522 msr dbgbcr1_el1, x6 346 ldr x7, [x4, #(1 * 8)]
523 msr dbgbcr0_el1, x5 347 ldr x6, [x4, #(0 * 8)]
524 348
525 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1) 349 adr x22, 1f
526 350 add x22, x22, x5, lsl #2
527 adr x26, 1f 351 br x22
528 add x26, x26, x24, lsl #2
529 br x26
5301: 3521:
531 ldr x20, [x3, #(15 * 8)] 353 msr \type\()15_el1, x21
532 ldr x19, [x3, #(14 * 8)] 354 msr \type\()14_el1, x20
533 ldr x18, [x3, #(13 * 8)] 355 msr \type\()13_el1, x19
534 ldr x17, [x3, #(12 * 8)] 356 msr \type\()12_el1, x18
535 ldr x16, [x3, #(11 * 8)] 357 msr \type\()11_el1, x17
536 ldr x15, [x3, #(10 * 8)] 358 msr \type\()10_el1, x16
537 ldr x14, [x3, #(9 * 8)] 359 msr \type\()9_el1, x15
538 ldr x13, [x3, #(8 * 8)] 360 msr \type\()8_el1, x14
539 ldr x12, [x3, #(7 * 8)] 361 msr \type\()7_el1, x13
540 ldr x11, [x3, #(6 * 8)] 362 msr \type\()6_el1, x12
541 ldr x10, [x3, #(5 * 8)] 363 msr \type\()5_el1, x11
542 ldr x9, [x3, #(4 * 8)] 364 msr \type\()4_el1, x10
543 ldr x8, [x3, #(3 * 8)] 365 msr \type\()3_el1, x9
544 ldr x7, [x3, #(2 * 8)] 366 msr \type\()2_el1, x8
545 ldr x6, [x3, #(1 * 8)] 367 msr \type\()1_el1, x7
546 ldr x5, [x3, #(0 * 8)] 368 msr \type\()0_el1, x6
547
548 adr x26, 1f
549 add x26, x26, x24, lsl #2
550 br x26
5511:
552 msr dbgbvr15_el1, x20
553 msr dbgbvr14_el1, x19
554 msr dbgbvr13_el1, x18
555 msr dbgbvr12_el1, x17
556 msr dbgbvr11_el1, x16
557 msr dbgbvr10_el1, x15
558 msr dbgbvr9_el1, x14
559 msr dbgbvr8_el1, x13
560 msr dbgbvr7_el1, x12
561 msr dbgbvr6_el1, x11
562 msr dbgbvr5_el1, x10
563 msr dbgbvr4_el1, x9
564 msr dbgbvr3_el1, x8
565 msr dbgbvr2_el1, x7
566 msr dbgbvr1_el1, x6
567 msr dbgbvr0_el1, x5
568
569 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
570
571 adr x26, 1f
572 add x26, x26, x25, lsl #2
573 br x26
5741:
575 ldr x20, [x3, #(15 * 8)]
576 ldr x19, [x3, #(14 * 8)]
577 ldr x18, [x3, #(13 * 8)]
578 ldr x17, [x3, #(12 * 8)]
579 ldr x16, [x3, #(11 * 8)]
580 ldr x15, [x3, #(10 * 8)]
581 ldr x14, [x3, #(9 * 8)]
582 ldr x13, [x3, #(8 * 8)]
583 ldr x12, [x3, #(7 * 8)]
584 ldr x11, [x3, #(6 * 8)]
585 ldr x10, [x3, #(5 * 8)]
586 ldr x9, [x3, #(4 * 8)]
587 ldr x8, [x3, #(3 * 8)]
588 ldr x7, [x3, #(2 * 8)]
589 ldr x6, [x3, #(1 * 8)]
590 ldr x5, [x3, #(0 * 8)]
591
592 adr x26, 1f
593 add x26, x26, x25, lsl #2
594 br x26
5951:
596 msr dbgwcr15_el1, x20
597 msr dbgwcr14_el1, x19
598 msr dbgwcr13_el1, x18
599 msr dbgwcr12_el1, x17
600 msr dbgwcr11_el1, x16
601 msr dbgwcr10_el1, x15
602 msr dbgwcr9_el1, x14
603 msr dbgwcr8_el1, x13
604 msr dbgwcr7_el1, x12
605 msr dbgwcr6_el1, x11
606 msr dbgwcr5_el1, x10
607 msr dbgwcr4_el1, x9
608 msr dbgwcr3_el1, x8
609 msr dbgwcr2_el1, x7
610 msr dbgwcr1_el1, x6
611 msr dbgwcr0_el1, x5
612
613 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
614
615 adr x26, 1f
616 add x26, x26, x25, lsl #2
617 br x26
6181:
619 ldr x20, [x3, #(15 * 8)]
620 ldr x19, [x3, #(14 * 8)]
621 ldr x18, [x3, #(13 * 8)]
622 ldr x17, [x3, #(12 * 8)]
623 ldr x16, [x3, #(11 * 8)]
624 ldr x15, [x3, #(10 * 8)]
625 ldr x14, [x3, #(9 * 8)]
626 ldr x13, [x3, #(8 * 8)]
627 ldr x12, [x3, #(7 * 8)]
628 ldr x11, [x3, #(6 * 8)]
629 ldr x10, [x3, #(5 * 8)]
630 ldr x9, [x3, #(4 * 8)]
631 ldr x8, [x3, #(3 * 8)]
632 ldr x7, [x3, #(2 * 8)]
633 ldr x6, [x3, #(1 * 8)]
634 ldr x5, [x3, #(0 * 8)]
635
636 adr x26, 1f
637 add x26, x26, x25, lsl #2
638 br x26
6391:
640 msr dbgwvr15_el1, x20
641 msr dbgwvr14_el1, x19
642 msr dbgwvr13_el1, x18
643 msr dbgwvr12_el1, x17
644 msr dbgwvr11_el1, x16
645 msr dbgwvr10_el1, x15
646 msr dbgwvr9_el1, x14
647 msr dbgwvr8_el1, x13
648 msr dbgwvr7_el1, x12
649 msr dbgwvr6_el1, x11
650 msr dbgwvr5_el1, x10
651 msr dbgwvr4_el1, x9
652 msr dbgwvr3_el1, x8
653 msr dbgwvr2_el1, x7
654 msr dbgwvr1_el1, x6
655 msr dbgwvr0_el1, x5
656
657 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
658 msr mdccint_el1, x21
659.endm 369.endm
660 370
661.macro skip_32bit_state tmp, target 371.macro skip_32bit_state tmp, target
@@ -675,6 +385,14 @@
675 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target 385 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
676.endm 386.endm
677 387
388/*
389 * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
390 */
391.macro skip_fpsimd_state tmp, target
392 mrs \tmp, cptr_el2
393 tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target
394.endm
395
678.macro compute_debug_state target 396.macro compute_debug_state target
679 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY 397 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
680 // is set, we do a full save/restore cycle and disable trapping. 398 // is set, we do a full save/restore cycle and disable trapping.
@@ -713,10 +431,12 @@
713 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) 431 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
714 mrs x4, dacr32_el2 432 mrs x4, dacr32_el2
715 mrs x5, ifsr32_el2 433 mrs x5, ifsr32_el2
716 mrs x6, fpexc32_el2
717 stp x4, x5, [x3] 434 stp x4, x5, [x3]
718 str x6, [x3, #16]
719 435
436 skip_fpsimd_state x8, 3f
437 mrs x6, fpexc32_el2
438 str x6, [x3, #16]
4393:
720 skip_debug_state x8, 2f 440 skip_debug_state x8, 2f
721 mrs x7, dbgvcr32_el2 441 mrs x7, dbgvcr32_el2
722 str x7, [x3, #24] 442 str x7, [x3, #24]
@@ -743,10 +463,8 @@
743 463
744 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) 464 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
745 ldp x4, x5, [x3] 465 ldp x4, x5, [x3]
746 ldr x6, [x3, #16]
747 msr dacr32_el2, x4 466 msr dacr32_el2, x4
748 msr ifsr32_el2, x5 467 msr ifsr32_el2, x5
749 msr fpexc32_el2, x6
750 468
751 skip_debug_state x8, 2f 469 skip_debug_state x8, 2f
752 ldr x7, [x3, #24] 470 ldr x7, [x3, #24]
@@ -763,31 +481,35 @@
763 481
764.macro activate_traps 482.macro activate_traps
765 ldr x2, [x0, #VCPU_HCR_EL2] 483 ldr x2, [x0, #VCPU_HCR_EL2]
484
485 /*
486 * We are about to set CPTR_EL2.TFP to trap all floating point
487 * register accesses to EL2, however, the ARM ARM clearly states that
488 * traps are only taken to EL2 if the operation would not otherwise
489 * trap to EL1. Therefore, always make sure that for 32-bit guests,
490 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
491 */
492 tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
493 mov x3, #(1 << 30)
494 msr fpexc32_el2, x3
495 isb
49699:
766 msr hcr_el2, x2 497 msr hcr_el2, x2
767 mov x2, #CPTR_EL2_TTA 498 mov x2, #CPTR_EL2_TTA
499 orr x2, x2, #CPTR_EL2_TFP
768 msr cptr_el2, x2 500 msr cptr_el2, x2
769 501
770 mov x2, #(1 << 15) // Trap CP15 Cr=15 502 mov x2, #(1 << 15) // Trap CP15 Cr=15
771 msr hstr_el2, x2 503 msr hstr_el2, x2
772 504
773 mrs x2, mdcr_el2 505 // Monitor Debug Config - see kvm_arm_setup_debug()
774 and x2, x2, #MDCR_EL2_HPMN_MASK 506 ldr x2, [x0, #VCPU_MDCR_EL2]
775 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
776 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
777
778 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
779 // if not dirty.
780 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
781 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
782 orr x2, x2, #MDCR_EL2_TDA
7831:
784 msr mdcr_el2, x2 507 msr mdcr_el2, x2
785.endm 508.endm
786 509
787.macro deactivate_traps 510.macro deactivate_traps
788 mov x2, #HCR_RW 511 mov x2, #HCR_RW
789 msr hcr_el2, x2 512 msr hcr_el2, x2
790 msr cptr_el2, xzr
791 msr hstr_el2, xzr 513 msr hstr_el2, xzr
792 514
793 mrs x2, mdcr_el2 515 mrs x2, mdcr_el2
@@ -900,21 +622,101 @@ __restore_sysregs:
900 restore_sysregs 622 restore_sysregs
901 ret 623 ret
902 624
625/* Save debug state */
903__save_debug: 626__save_debug:
904 save_debug 627 // x2: ptr to CPU context
628 // x3: ptr to debug reg struct
629 // x4/x5/x6-22/x24-26: trashed
630
631 mrs x26, id_aa64dfr0_el1
632 ubfx x24, x26, #12, #4 // Extract BRPs
633 ubfx x25, x26, #20, #4 // Extract WRPs
634 mov w26, #15
635 sub w24, w26, w24 // How many BPs to skip
636 sub w25, w26, w25 // How many WPs to skip
637
638 mov x5, x24
639 add x4, x3, #DEBUG_BCR
640 save_debug dbgbcr
641 add x4, x3, #DEBUG_BVR
642 save_debug dbgbvr
643
644 mov x5, x25
645 add x4, x3, #DEBUG_WCR
646 save_debug dbgwcr
647 add x4, x3, #DEBUG_WVR
648 save_debug dbgwvr
649
650 mrs x21, mdccint_el1
651 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
905 ret 652 ret
906 653
654/* Restore debug state */
907__restore_debug: 655__restore_debug:
908 restore_debug 656 // x2: ptr to CPU context
657 // x3: ptr to debug reg struct
658 // x4/x5/x6-22/x24-26: trashed
659
660 mrs x26, id_aa64dfr0_el1
661 ubfx x24, x26, #12, #4 // Extract BRPs
662 ubfx x25, x26, #20, #4 // Extract WRPs
663 mov w26, #15
664 sub w24, w26, w24 // How many BPs to skip
665 sub w25, w26, w25 // How many WPs to skip
666
667 mov x5, x24
668 add x4, x3, #DEBUG_BCR
669 restore_debug dbgbcr
670 add x4, x3, #DEBUG_BVR
671 restore_debug dbgbvr
672
673 mov x5, x25
674 add x4, x3, #DEBUG_WCR
675 restore_debug dbgwcr
676 add x4, x3, #DEBUG_WVR
677 restore_debug dbgwvr
678
679 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
680 msr mdccint_el1, x21
681
909 ret 682 ret
910 683
911__save_fpsimd: 684__save_fpsimd:
685 skip_fpsimd_state x3, 1f
912 save_fpsimd 686 save_fpsimd
913 ret 6871: ret
914 688
915__restore_fpsimd: 689__restore_fpsimd:
690 skip_fpsimd_state x3, 1f
916 restore_fpsimd 691 restore_fpsimd
917 ret 6921: ret
693
694switch_to_guest_fpsimd:
695 push x4, lr
696
697 mrs x2, cptr_el2
698 bic x2, x2, #CPTR_EL2_TFP
699 msr cptr_el2, x2
700 isb
701
702 mrs x0, tpidr_el2
703
704 ldr x2, [x0, #VCPU_HOST_CONTEXT]
705 kern_hyp_va x2
706 bl __save_fpsimd
707
708 add x2, x0, #VCPU_CONTEXT
709 bl __restore_fpsimd
710
711 skip_32bit_state x3, 1f
712 ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
713 msr fpexc32_el2, x4
7141:
715 pop x4, lr
716 pop x2, x3
717 pop x0, x1
718
719 eret
918 720
919/* 721/*
920 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); 722 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
@@ -936,10 +738,10 @@ ENTRY(__kvm_vcpu_run)
936 kern_hyp_va x2 738 kern_hyp_va x2
937 739
938 save_host_regs 740 save_host_regs
939 bl __save_fpsimd
940 bl __save_sysregs 741 bl __save_sysregs
941 742
942 compute_debug_state 1f 743 compute_debug_state 1f
744 add x3, x0, #VCPU_HOST_DEBUG_STATE
943 bl __save_debug 745 bl __save_debug
9441: 7461:
945 activate_traps 747 activate_traps
@@ -952,9 +754,10 @@ ENTRY(__kvm_vcpu_run)
952 add x2, x0, #VCPU_CONTEXT 754 add x2, x0, #VCPU_CONTEXT
953 755
954 bl __restore_sysregs 756 bl __restore_sysregs
955 bl __restore_fpsimd
956 757
957 skip_debug_state x3, 1f 758 skip_debug_state x3, 1f
759 ldr x3, [x0, #VCPU_DEBUG_PTR]
760 kern_hyp_va x3
958 bl __restore_debug 761 bl __restore_debug
9591: 7621:
960 restore_guest_32bit_state 763 restore_guest_32bit_state
@@ -975,6 +778,8 @@ __kvm_vcpu_return:
975 bl __save_sysregs 778 bl __save_sysregs
976 779
977 skip_debug_state x3, 1f 780 skip_debug_state x3, 1f
781 ldr x3, [x0, #VCPU_DEBUG_PTR]
782 kern_hyp_va x3
978 bl __save_debug 783 bl __save_debug
9791: 7841:
980 save_guest_32bit_state 785 save_guest_32bit_state
@@ -991,12 +796,15 @@ __kvm_vcpu_return:
991 796
992 bl __restore_sysregs 797 bl __restore_sysregs
993 bl __restore_fpsimd 798 bl __restore_fpsimd
799 /* Clear FPSIMD and Trace trapping */
800 msr cptr_el2, xzr
994 801
995 skip_debug_state x3, 1f 802 skip_debug_state x3, 1f
996 // Clear the dirty flag for the next run, as all the state has 803 // Clear the dirty flag for the next run, as all the state has
997 // already been saved. Note that we nuke the whole 64bit word. 804 // already been saved. Note that we nuke the whole 64bit word.
998 // If we ever add more flags, we'll have to be more careful... 805 // If we ever add more flags, we'll have to be more careful...
999 str xzr, [x0, #VCPU_DEBUG_FLAGS] 806 str xzr, [x0, #VCPU_DEBUG_FLAGS]
807 add x3, x0, #VCPU_HOST_DEBUG_STATE
1000 bl __restore_debug 808 bl __restore_debug
10011: 8091:
1002 restore_host_regs 810 restore_host_regs
@@ -1199,6 +1007,11 @@ el1_trap:
1199 * x1: ESR 1007 * x1: ESR
1200 * x2: ESR_EC 1008 * x2: ESR_EC
1201 */ 1009 */
1010
1011 /* Guest accessed VFP/SIMD registers, save host, restore Guest */
1012 cmp x2, #ESR_ELx_EC_FP_ASIMD
1013 b.eq switch_to_guest_fpsimd
1014
1202 cmp x2, #ESR_ELx_EC_DABT_LOW 1015 cmp x2, #ESR_ELx_EC_DABT_LOW
1203 mov x0, #ESR_ELx_EC_IABT_LOW 1016 mov x0, #ESR_ELx_EC_IABT_LOW
1204 ccmp x2, x0, #4, ne 1017 ccmp x2, x0, #4, ne
@@ -1293,4 +1106,10 @@ ENTRY(__kvm_hyp_vector)
1293 ventry el1_error_invalid // Error 32-bit EL1 1106 ventry el1_error_invalid // Error 32-bit EL1
1294ENDPROC(__kvm_hyp_vector) 1107ENDPROC(__kvm_hyp_vector)
1295 1108
1109
1110ENTRY(__kvm_get_mdcr_el2)
1111 mrs x0, mdcr_el2
1112 ret
1113ENDPROC(__kvm_get_mdcr_el2)
1114
1296 .popsection 1115 .popsection
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 0b4326578985..91cf5350b328 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24#include <linux/kvm.h> 24#include <linux/kvm.h>
25#include <linux/hw_breakpoint.h>
25 26
26#include <kvm/arm_arch_timer.h> 27#include <kvm/arm_arch_timer.h>
27 28
@@ -56,6 +57,12 @@ static bool cpu_has_32bit_el1(void)
56 return !!(pfr0 & 0x20); 57 return !!(pfr0 & 0x20);
57} 58}
58 59
60/**
61 * kvm_arch_dev_ioctl_check_extension
62 *
63 * We currently assume that the number of HW registers is uniform
64 * across all CPUs (see cpuinfo_sanity_check).
65 */
59int kvm_arch_dev_ioctl_check_extension(long ext) 66int kvm_arch_dev_ioctl_check_extension(long ext)
60{ 67{
61 int r; 68 int r;
@@ -64,6 +71,15 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
64 case KVM_CAP_ARM_EL1_32BIT: 71 case KVM_CAP_ARM_EL1_32BIT:
65 r = cpu_has_32bit_el1(); 72 r = cpu_has_32bit_el1();
66 break; 73 break;
74 case KVM_CAP_GUEST_DEBUG_HW_BPS:
75 r = get_num_brps();
76 break;
77 case KVM_CAP_GUEST_DEBUG_HW_WPS:
78 r = get_num_wrps();
79 break;
80 case KVM_CAP_SET_GUEST_DEBUG:
81 r = 1;
82 break;
67 default: 83 default:
68 r = 0; 84 r = 0;
69 } 85 }
@@ -105,7 +121,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
105 kvm_reset_sys_regs(vcpu); 121 kvm_reset_sys_regs(vcpu);
106 122
107 /* Reset timer */ 123 /* Reset timer */
108 kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); 124 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
109
110 return 0;
111} 125}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c370b4014799..b41607d270ac 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -38,6 +38,8 @@
38 38
39#include "sys_regs.h" 39#include "sys_regs.h"
40 40
41#include "trace.h"
42
41/* 43/*
42 * All of this file is extremly similar to the ARM coproc.c, but the 44 * All of this file is extremly similar to the ARM coproc.c, but the
43 * types are different. My gut feeling is that it should be pretty 45 * types are different. My gut feeling is that it should be pretty
@@ -208,9 +210,217 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
208 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); 210 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
209 } 211 }
210 212
213 trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
214
215 return true;
216}
217
218/*
219 * reg_to_dbg/dbg_to_reg
220 *
221 * A 32 bit write to a debug register leave top bits alone
222 * A 32 bit read from a debug register only returns the bottom bits
223 *
224 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
225 * hyp.S code switches between host and guest values in future.
226 */
227static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
228 const struct sys_reg_params *p,
229 u64 *dbg_reg)
230{
231 u64 val = *vcpu_reg(vcpu, p->Rt);
232
233 if (p->is_32bit) {
234 val &= 0xffffffffUL;
235 val |= ((*dbg_reg >> 32) << 32);
236 }
237
238 *dbg_reg = val;
239 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
240}
241
242static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
243 const struct sys_reg_params *p,
244 u64 *dbg_reg)
245{
246 u64 val = *dbg_reg;
247
248 if (p->is_32bit)
249 val &= 0xffffffffUL;
250
251 *vcpu_reg(vcpu, p->Rt) = val;
252}
253
254static inline bool trap_bvr(struct kvm_vcpu *vcpu,
255 const struct sys_reg_params *p,
256 const struct sys_reg_desc *rd)
257{
258 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
259
260 if (p->is_write)
261 reg_to_dbg(vcpu, p, dbg_reg);
262 else
263 dbg_to_reg(vcpu, p, dbg_reg);
264
265 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
266
267 return true;
268}
269
270static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
271 const struct kvm_one_reg *reg, void __user *uaddr)
272{
273 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
274
275 if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
276 return -EFAULT;
277 return 0;
278}
279
280static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
281 const struct kvm_one_reg *reg, void __user *uaddr)
282{
283 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
284
285 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
286 return -EFAULT;
287 return 0;
288}
289
290static inline void reset_bvr(struct kvm_vcpu *vcpu,
291 const struct sys_reg_desc *rd)
292{
293 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
294}
295
296static inline bool trap_bcr(struct kvm_vcpu *vcpu,
297 const struct sys_reg_params *p,
298 const struct sys_reg_desc *rd)
299{
300 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
301
302 if (p->is_write)
303 reg_to_dbg(vcpu, p, dbg_reg);
304 else
305 dbg_to_reg(vcpu, p, dbg_reg);
306
307 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
308
309 return true;
310}
311
312static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
313 const struct kvm_one_reg *reg, void __user *uaddr)
314{
315 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
316
317 if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
318 return -EFAULT;
319
320 return 0;
321}
322
323static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
324 const struct kvm_one_reg *reg, void __user *uaddr)
325{
326 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
327
328 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
329 return -EFAULT;
330 return 0;
331}
332
333static inline void reset_bcr(struct kvm_vcpu *vcpu,
334 const struct sys_reg_desc *rd)
335{
336 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
337}
338
339static inline bool trap_wvr(struct kvm_vcpu *vcpu,
340 const struct sys_reg_params *p,
341 const struct sys_reg_desc *rd)
342{
343 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
344
345 if (p->is_write)
346 reg_to_dbg(vcpu, p, dbg_reg);
347 else
348 dbg_to_reg(vcpu, p, dbg_reg);
349
350 trace_trap_reg(__func__, rd->reg, p->is_write,
351 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
352
211 return true; 353 return true;
212} 354}
213 355
356static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
357 const struct kvm_one_reg *reg, void __user *uaddr)
358{
359 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
360
361 if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
362 return -EFAULT;
363 return 0;
364}
365
366static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
367 const struct kvm_one_reg *reg, void __user *uaddr)
368{
369 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
370
371 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
372 return -EFAULT;
373 return 0;
374}
375
376static inline void reset_wvr(struct kvm_vcpu *vcpu,
377 const struct sys_reg_desc *rd)
378{
379 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
380}
381
382static inline bool trap_wcr(struct kvm_vcpu *vcpu,
383 const struct sys_reg_params *p,
384 const struct sys_reg_desc *rd)
385{
386 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
387
388 if (p->is_write)
389 reg_to_dbg(vcpu, p, dbg_reg);
390 else
391 dbg_to_reg(vcpu, p, dbg_reg);
392
393 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
394
395 return true;
396}
397
398static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
399 const struct kvm_one_reg *reg, void __user *uaddr)
400{
401 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
402
403 if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
404 return -EFAULT;
405 return 0;
406}
407
408static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
409 const struct kvm_one_reg *reg, void __user *uaddr)
410{
411 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
412
413 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
414 return -EFAULT;
415 return 0;
416}
417
418static inline void reset_wcr(struct kvm_vcpu *vcpu,
419 const struct sys_reg_desc *rd)
420{
421 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
422}
423
214static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 424static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
215{ 425{
216 u64 amair; 426 u64 amair;
@@ -240,16 +450,16 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
240#define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 450#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
241 /* DBGBVRn_EL1 */ \ 451 /* DBGBVRn_EL1 */ \
242 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ 452 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
243 trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \ 453 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
244 /* DBGBCRn_EL1 */ \ 454 /* DBGBCRn_EL1 */ \
245 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ 455 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
246 trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \ 456 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
247 /* DBGWVRn_EL1 */ \ 457 /* DBGWVRn_EL1 */ \
248 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ 458 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
249 trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \ 459 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
250 /* DBGWCRn_EL1 */ \ 460 /* DBGWCRn_EL1 */ \
251 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ 461 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
252 trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 } 462 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
253 463
254/* 464/*
255 * Architected system registers. 465 * Architected system registers.
@@ -516,28 +726,57 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
516 return true; 726 return true;
517} 727}
518 728
519#define DBG_BCR_BVR_WCR_WVR(n) \ 729/* AArch32 debug register mappings
520 /* DBGBVRn */ \ 730 *
521 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \ 731 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
522 NULL, (cp14_DBGBVR0 + (n) * 2) }, \ 732 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
523 /* DBGBCRn */ \ 733 *
524 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \ 734 * All control registers and watchpoint value registers are mapped to
525 NULL, (cp14_DBGBCR0 + (n) * 2) }, \ 735 * the lower 32 bits of their AArch64 equivalents. We share the trap
526 /* DBGWVRn */ \ 736 * handlers with the above AArch64 code which checks what mode the
527 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \ 737 * system is in.
528 NULL, (cp14_DBGWVR0 + (n) * 2) }, \ 738 */
529 /* DBGWCRn */ \ 739
530 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \ 740static inline bool trap_xvr(struct kvm_vcpu *vcpu,
531 NULL, (cp14_DBGWCR0 + (n) * 2) } 741 const struct sys_reg_params *p,
532 742 const struct sys_reg_desc *rd)
533#define DBGBXVR(n) \ 743{
534 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \ 744 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
535 NULL, cp14_DBGBXVR0 + n * 2 } 745
746 if (p->is_write) {
747 u64 val = *dbg_reg;
748
749 val &= 0xffffffffUL;
750 val |= *vcpu_reg(vcpu, p->Rt) << 32;
751 *dbg_reg = val;
752
753 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
754 } else {
755 *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
756 }
757
758 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
759
760 return true;
761}
762
763#define DBG_BCR_BVR_WCR_WVR(n) \
764 /* DBGBVRn */ \
765 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
766 /* DBGBCRn */ \
767 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
768 /* DBGWVRn */ \
769 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
770 /* DBGWCRn */ \
771 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
772
773#define DBGBXVR(n) \
774 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
536 775
537/* 776/*
538 * Trapped cp14 registers. We generally ignore most of the external 777 * Trapped cp14 registers. We generally ignore most of the external
539 * debug, on the principle that they don't really make sense to a 778 * debug, on the principle that they don't really make sense to a
540 * guest. Revisit this one day, whould this principle change. 779 * guest. Revisit this one day, would this principle change.
541 */ 780 */
542static const struct sys_reg_desc cp14_regs[] = { 781static const struct sys_reg_desc cp14_regs[] = {
543 /* DBGIDR */ 782 /* DBGIDR */
@@ -999,6 +1238,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
999 struct sys_reg_params params; 1238 struct sys_reg_params params;
1000 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1239 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1001 1240
1241 trace_kvm_handle_sys_reg(esr);
1242
1002 params.is_aarch32 = false; 1243 params.is_aarch32 = false;
1003 params.is_32bit = false; 1244 params.is_32bit = false;
1004 params.Op0 = (esr >> 20) & 3; 1245 params.Op0 = (esr >> 20) & 3;
@@ -1303,6 +1544,9 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
1303 if (!r) 1544 if (!r)
1304 return get_invariant_sys_reg(reg->id, uaddr); 1545 return get_invariant_sys_reg(reg->id, uaddr);
1305 1546
1547 if (r->get_user)
1548 return (r->get_user)(vcpu, r, reg, uaddr);
1549
1306 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); 1550 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
1307} 1551}
1308 1552
@@ -1321,6 +1565,9 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
1321 if (!r) 1565 if (!r)
1322 return set_invariant_sys_reg(reg->id, uaddr); 1566 return set_invariant_sys_reg(reg->id, uaddr);
1323 1567
1568 if (r->set_user)
1569 return (r->set_user)(vcpu, r, reg, uaddr);
1570
1324 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); 1571 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
1325} 1572}
1326 1573
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index d411e251412c..eaa324e4db4d 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -55,6 +55,12 @@ struct sys_reg_desc {
55 55
56 /* Value (usually reset value) */ 56 /* Value (usually reset value) */
57 u64 val; 57 u64 val;
58
59 /* Custom get/set_user functions, fallback to generic if NULL */
60 int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
61 const struct kvm_one_reg *reg, void __user *uaddr);
62 int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
63 const struct kvm_one_reg *reg, void __user *uaddr);
58}; 64};
59 65
60static inline void print_sys_reg_instr(const struct sys_reg_params *p) 66static inline void print_sys_reg_instr(const struct sys_reg_params *p)
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 475fd2929310..1e4576824165 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -94,6 +94,8 @@ static int __init sys_reg_genericv8_init(void)
94 &genericv8_target_table); 94 &genericv8_target_table);
95 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA, 95 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
96 &genericv8_target_table); 96 &genericv8_target_table);
97 kvm_register_target_sys_reg_table(KVM_ARM_TARGET_GENERIC_V8,
98 &genericv8_target_table);
97 99
98 return 0; 100 return 0;
99} 101}
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
index 157416e963f2..7fb0008c4fa3 100644
--- a/arch/arm64/kvm/trace.h
+++ b/arch/arm64/kvm/trace.h
@@ -44,6 +44,129 @@ TRACE_EVENT(kvm_hvc_arm64,
44 __entry->vcpu_pc, __entry->r0, __entry->imm) 44 __entry->vcpu_pc, __entry->r0, __entry->imm)
45); 45);
46 46
47TRACE_EVENT(kvm_arm_setup_debug,
48 TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
49 TP_ARGS(vcpu, guest_debug),
50
51 TP_STRUCT__entry(
52 __field(struct kvm_vcpu *, vcpu)
53 __field(__u32, guest_debug)
54 ),
55
56 TP_fast_assign(
57 __entry->vcpu = vcpu;
58 __entry->guest_debug = guest_debug;
59 ),
60
61 TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
62);
63
64TRACE_EVENT(kvm_arm_clear_debug,
65 TP_PROTO(__u32 guest_debug),
66 TP_ARGS(guest_debug),
67
68 TP_STRUCT__entry(
69 __field(__u32, guest_debug)
70 ),
71
72 TP_fast_assign(
73 __entry->guest_debug = guest_debug;
74 ),
75
76 TP_printk("flags: 0x%08x", __entry->guest_debug)
77);
78
79TRACE_EVENT(kvm_arm_set_dreg32,
80 TP_PROTO(const char *name, __u32 value),
81 TP_ARGS(name, value),
82
83 TP_STRUCT__entry(
84 __field(const char *, name)
85 __field(__u32, value)
86 ),
87
88 TP_fast_assign(
89 __entry->name = name;
90 __entry->value = value;
91 ),
92
93 TP_printk("%s: 0x%08x", __entry->name, __entry->value)
94);
95
96TRACE_EVENT(kvm_arm_set_regset,
97 TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
98 TP_ARGS(type, len, control, value),
99 TP_STRUCT__entry(
100 __field(const char *, name)
101 __field(int, len)
102 __array(u64, ctrls, 16)
103 __array(u64, values, 16)
104 ),
105 TP_fast_assign(
106 __entry->name = type;
107 __entry->len = len;
108 memcpy(__entry->ctrls, control, len << 3);
109 memcpy(__entry->values, value, len << 3);
110 ),
111 TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name,
112 __print_array(__entry->ctrls, __entry->len, sizeof(__u64)),
113 __print_array(__entry->values, __entry->len, sizeof(__u64)))
114);
115
116TRACE_EVENT(trap_reg,
117 TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value),
118 TP_ARGS(fn, reg, is_write, write_value),
119
120 TP_STRUCT__entry(
121 __field(const char *, fn)
122 __field(int, reg)
123 __field(bool, is_write)
124 __field(u64, write_value)
125 ),
126
127 TP_fast_assign(
128 __entry->fn = fn;
129 __entry->reg = reg;
130 __entry->is_write = is_write;
131 __entry->write_value = write_value;
132 ),
133
134 TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
135);
136
137TRACE_EVENT(kvm_handle_sys_reg,
138 TP_PROTO(unsigned long hsr),
139 TP_ARGS(hsr),
140
141 TP_STRUCT__entry(
142 __field(unsigned long, hsr)
143 ),
144
145 TP_fast_assign(
146 __entry->hsr = hsr;
147 ),
148
149 TP_printk("HSR 0x%08lx", __entry->hsr)
150);
151
152TRACE_EVENT(kvm_set_guest_debug,
153 TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
154 TP_ARGS(vcpu, guest_debug),
155
156 TP_STRUCT__entry(
157 __field(struct kvm_vcpu *, vcpu)
158 __field(__u32, guest_debug)
159 ),
160
161 TP_fast_assign(
162 __entry->vcpu = vcpu;
163 __entry->guest_debug = guest_debug;
164 ),
165
166 TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
167);
168
169
47#endif /* _TRACE_ARM64_KVM_H */ 170#endif /* _TRACE_ARM64_KVM_H */
48 171
49#undef TRACE_INCLUDE_PATH 172#undef TRACE_INCLUDE_PATH