diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2012-12-10 11:15:34 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2013-06-07 09:03:36 -0400 |
commit | 7c8c5e6a9101ea57a1c2c9faff0917e79251a21e (patch) | |
tree | 9cfd6c3a7f2c1235911b0473bec8ce00b4f3fca0 | |
parent | 54f81d0eb93896da73d1636bca84cf90f52cabdf (diff) |
arm64: KVM: system register handling
Provide 64bit system register handling, modeled after the cp15
handling for ARM.
Reviewed-by: Christopher Covington <cov@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r-- | arch/arm64/include/asm/kvm_coproc.h | 51 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/kvm.h | 29 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 883 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.h | 138 | ||||
-rw-r--r-- | include/uapi/linux/kvm.h | 1 |
5 files changed, 1102 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h new file mode 100644 index 000000000000..9b4477acb554 --- /dev/null +++ b/arch/arm64/include/asm/kvm_coproc.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/include/asm/kvm_coproc.h | ||
6 | * Copyright (C) 2012 Rusty Russell IBM Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARM64_KVM_COPROC_H__ | ||
22 | #define __ARM64_KVM_COPROC_H__ | ||
23 | |||
24 | #include <linux/kvm_host.h> | ||
25 | |||
26 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); | ||
27 | |||
28 | struct kvm_sys_reg_table { | ||
29 | const struct sys_reg_desc *table; | ||
30 | size_t num; | ||
31 | }; | ||
32 | |||
33 | struct kvm_sys_reg_target_table { | ||
34 | struct kvm_sys_reg_table table64; | ||
35 | }; | ||
36 | |||
37 | void kvm_register_target_sys_reg_table(unsigned int target, | ||
38 | struct kvm_sys_reg_target_table *table); | ||
39 | |||
40 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
41 | |||
42 | #define kvm_coproc_table_init kvm_sys_reg_table_init | ||
43 | void kvm_sys_reg_table_init(void); | ||
44 | |||
45 | struct kvm_one_reg; | ||
46 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); | ||
47 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | ||
48 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | ||
49 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); | ||
50 | |||
51 | #endif /* __ARM64_KVM_COPROC_H__ */ | ||
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 4e64570a20c9..ebac919dc0ca 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -92,6 +92,35 @@ struct kvm_sync_regs { | |||
92 | struct kvm_arch_memory_slot { | 92 | struct kvm_arch_memory_slot { |
93 | }; | 93 | }; |
94 | 94 | ||
95 | /* If you need to interpret the index values, here is the key: */ | ||
96 | #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 | ||
97 | #define KVM_REG_ARM_COPROC_SHIFT 16 | ||
98 | |||
99 | /* Normal registers are mapped as coprocessor 16. */ | ||
100 | #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) | ||
101 | #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32)) | ||
102 | |||
103 | /* Some registers need more space to represent values. */ | ||
104 | #define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) | ||
105 | #define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 | ||
106 | #define KVM_REG_ARM_DEMUX_ID_SHIFT 8 | ||
107 | #define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) | ||
108 | #define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF | ||
109 | #define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 | ||
110 | |||
111 | /* AArch64 system registers */ | ||
112 | #define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT) | ||
113 | #define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000 | ||
114 | #define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14 | ||
115 | #define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800 | ||
116 | #define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11 | ||
117 | #define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780 | ||
118 | #define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7 | ||
119 | #define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078 | ||
120 | #define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3 | ||
121 | #define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 | ||
122 | #define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 | ||
123 | |||
95 | /* KVM_IRQ_LINE irq field index values */ | 124 | /* KVM_IRQ_LINE irq field index values */ |
96 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 125 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
97 | #define KVM_ARM_IRQ_TYPE_MASK 0xff | 126 | #define KVM_ARM_IRQ_TYPE_MASK 0xff |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c new file mode 100644 index 000000000000..52fff0ae3442 --- /dev/null +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -0,0 +1,883 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/kvm/coproc.c: | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Authors: Rusty Russell <rusty@rustcorp.com.au> | ||
8 | * Christoffer Dall <c.dall@virtualopensystems.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
21 | */ | ||
22 | |||
23 | #include <linux/mm.h> | ||
24 | #include <linux/kvm_host.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <asm/kvm_arm.h> | ||
27 | #include <asm/kvm_host.h> | ||
28 | #include <asm/kvm_emulate.h> | ||
29 | #include <asm/kvm_coproc.h> | ||
30 | #include <asm/cacheflush.h> | ||
31 | #include <asm/cputype.h> | ||
32 | #include <trace/events/kvm.h> | ||
33 | |||
34 | #include "sys_regs.h" | ||
35 | |||
36 | /* | ||
37 | * All of this file is extremly similar to the ARM coproc.c, but the | ||
38 | * types are different. My gut feeling is that it should be pretty | ||
39 | * easy to merge, but that would be an ABI breakage -- again. VFP | ||
40 | * would also need to be abstracted. | ||
41 | */ | ||
42 | |||
43 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ | ||
44 | static u32 cache_levels; | ||
45 | |||
46 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ | ||
47 | #define CSSELR_MAX 12 | ||
48 | |||
49 | /* Which cache CCSIDR represents depends on CSSELR value. */ | ||
50 | static u32 get_ccsidr(u32 csselr) | ||
51 | { | ||
52 | u32 ccsidr; | ||
53 | |||
54 | /* Make sure noone else changes CSSELR during this! */ | ||
55 | local_irq_disable(); | ||
56 | /* Put value into CSSELR */ | ||
57 | asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); | ||
58 | isb(); | ||
59 | /* Read result out of CCSIDR */ | ||
60 | asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); | ||
61 | local_irq_enable(); | ||
62 | |||
63 | return ccsidr; | ||
64 | } | ||
65 | |||
66 | static void do_dc_cisw(u32 val) | ||
67 | { | ||
68 | asm volatile("dc cisw, %x0" : : "r" (val)); | ||
69 | dsb(); | ||
70 | } | ||
71 | |||
72 | static void do_dc_csw(u32 val) | ||
73 | { | ||
74 | asm volatile("dc csw, %x0" : : "r" (val)); | ||
75 | dsb(); | ||
76 | } | ||
77 | |||
78 | /* See note at ARM ARM B1.14.4 */ | ||
79 | static bool access_dcsw(struct kvm_vcpu *vcpu, | ||
80 | const struct sys_reg_params *p, | ||
81 | const struct sys_reg_desc *r) | ||
82 | { | ||
83 | unsigned long val; | ||
84 | int cpu; | ||
85 | |||
86 | if (!p->is_write) | ||
87 | return read_from_write_only(vcpu, p); | ||
88 | |||
89 | cpu = get_cpu(); | ||
90 | |||
91 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
92 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
93 | |||
94 | /* If we were already preempted, take the long way around */ | ||
95 | if (cpu != vcpu->arch.last_pcpu) { | ||
96 | flush_cache_all(); | ||
97 | goto done; | ||
98 | } | ||
99 | |||
100 | val = *vcpu_reg(vcpu, p->Rt); | ||
101 | |||
102 | switch (p->CRm) { | ||
103 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
104 | case 14: /* DCCISW */ | ||
105 | do_dc_cisw(val); | ||
106 | break; | ||
107 | |||
108 | case 10: /* DCCSW */ | ||
109 | do_dc_csw(val); | ||
110 | break; | ||
111 | } | ||
112 | |||
113 | done: | ||
114 | put_cpu(); | ||
115 | |||
116 | return true; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * We could trap ID_DFR0 and tell the guest we don't support performance | ||
121 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | ||
122 | * NAKed, so it will read the PMCR anyway. | ||
123 | * | ||
124 | * Therefore we tell the guest we have 0 counters. Unfortunately, we | ||
125 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | ||
126 | * all PM registers, which doesn't crash the guest kernel at least. | ||
127 | */ | ||
128 | static bool pm_fake(struct kvm_vcpu *vcpu, | ||
129 | const struct sys_reg_params *p, | ||
130 | const struct sys_reg_desc *r) | ||
131 | { | ||
132 | if (p->is_write) | ||
133 | return ignore_write(vcpu, p); | ||
134 | else | ||
135 | return read_zero(vcpu, p); | ||
136 | } | ||
137 | |||
138 | static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | ||
139 | { | ||
140 | u64 amair; | ||
141 | |||
142 | asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); | ||
143 | vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; | ||
144 | } | ||
145 | |||
146 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | ||
147 | { | ||
148 | /* | ||
149 | * Simply map the vcpu_id into the Aff0 field of the MPIDR. | ||
150 | */ | ||
151 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Architected system registers. | ||
156 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | ||
157 | */ | ||
158 | static const struct sys_reg_desc sys_reg_descs[] = { | ||
159 | /* DC ISW */ | ||
160 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), | ||
161 | access_dcsw }, | ||
162 | /* DC CSW */ | ||
163 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), | ||
164 | access_dcsw }, | ||
165 | /* DC CISW */ | ||
166 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), | ||
167 | access_dcsw }, | ||
168 | |||
169 | /* MPIDR_EL1 */ | ||
170 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), | ||
171 | NULL, reset_mpidr, MPIDR_EL1 }, | ||
172 | /* SCTLR_EL1 */ | ||
173 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), | ||
174 | NULL, reset_val, SCTLR_EL1, 0x00C50078 }, | ||
175 | /* CPACR_EL1 */ | ||
176 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), | ||
177 | NULL, reset_val, CPACR_EL1, 0 }, | ||
178 | /* TTBR0_EL1 */ | ||
179 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), | ||
180 | NULL, reset_unknown, TTBR0_EL1 }, | ||
181 | /* TTBR1_EL1 */ | ||
182 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), | ||
183 | NULL, reset_unknown, TTBR1_EL1 }, | ||
184 | /* TCR_EL1 */ | ||
185 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), | ||
186 | NULL, reset_val, TCR_EL1, 0 }, | ||
187 | |||
188 | /* AFSR0_EL1 */ | ||
189 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), | ||
190 | NULL, reset_unknown, AFSR0_EL1 }, | ||
191 | /* AFSR1_EL1 */ | ||
192 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), | ||
193 | NULL, reset_unknown, AFSR1_EL1 }, | ||
194 | /* ESR_EL1 */ | ||
195 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), | ||
196 | NULL, reset_unknown, ESR_EL1 }, | ||
197 | /* FAR_EL1 */ | ||
198 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), | ||
199 | NULL, reset_unknown, FAR_EL1 }, | ||
200 | |||
201 | /* PMINTENSET_EL1 */ | ||
202 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | ||
203 | pm_fake }, | ||
204 | /* PMINTENCLR_EL1 */ | ||
205 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), | ||
206 | pm_fake }, | ||
207 | |||
208 | /* MAIR_EL1 */ | ||
209 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), | ||
210 | NULL, reset_unknown, MAIR_EL1 }, | ||
211 | /* AMAIR_EL1 */ | ||
212 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), | ||
213 | NULL, reset_amair_el1, AMAIR_EL1 }, | ||
214 | |||
215 | /* VBAR_EL1 */ | ||
216 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), | ||
217 | NULL, reset_val, VBAR_EL1, 0 }, | ||
218 | /* CONTEXTIDR_EL1 */ | ||
219 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), | ||
220 | NULL, reset_val, CONTEXTIDR_EL1, 0 }, | ||
221 | /* TPIDR_EL1 */ | ||
222 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), | ||
223 | NULL, reset_unknown, TPIDR_EL1 }, | ||
224 | |||
225 | /* CNTKCTL_EL1 */ | ||
226 | { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), | ||
227 | NULL, reset_val, CNTKCTL_EL1, 0}, | ||
228 | |||
229 | /* CSSELR_EL1 */ | ||
230 | { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | ||
231 | NULL, reset_unknown, CSSELR_EL1 }, | ||
232 | |||
233 | /* PMCR_EL0 */ | ||
234 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), | ||
235 | pm_fake }, | ||
236 | /* PMCNTENSET_EL0 */ | ||
237 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), | ||
238 | pm_fake }, | ||
239 | /* PMCNTENCLR_EL0 */ | ||
240 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), | ||
241 | pm_fake }, | ||
242 | /* PMOVSCLR_EL0 */ | ||
243 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), | ||
244 | pm_fake }, | ||
245 | /* PMSWINC_EL0 */ | ||
246 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), | ||
247 | pm_fake }, | ||
248 | /* PMSELR_EL0 */ | ||
249 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), | ||
250 | pm_fake }, | ||
251 | /* PMCEID0_EL0 */ | ||
252 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), | ||
253 | pm_fake }, | ||
254 | /* PMCEID1_EL0 */ | ||
255 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), | ||
256 | pm_fake }, | ||
257 | /* PMCCNTR_EL0 */ | ||
258 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), | ||
259 | pm_fake }, | ||
260 | /* PMXEVTYPER_EL0 */ | ||
261 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), | ||
262 | pm_fake }, | ||
263 | /* PMXEVCNTR_EL0 */ | ||
264 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), | ||
265 | pm_fake }, | ||
266 | /* PMUSERENR_EL0 */ | ||
267 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), | ||
268 | pm_fake }, | ||
269 | /* PMOVSSET_EL0 */ | ||
270 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), | ||
271 | pm_fake }, | ||
272 | |||
273 | /* TPIDR_EL0 */ | ||
274 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), | ||
275 | NULL, reset_unknown, TPIDR_EL0 }, | ||
276 | /* TPIDRRO_EL0 */ | ||
277 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), | ||
278 | NULL, reset_unknown, TPIDRRO_EL0 }, | ||
279 | }; | ||
280 | |||
281 | /* Target specific emulation tables */ | ||
282 | static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; | ||
283 | |||
284 | void kvm_register_target_sys_reg_table(unsigned int target, | ||
285 | struct kvm_sys_reg_target_table *table) | ||
286 | { | ||
287 | target_tables[target] = table; | ||
288 | } | ||
289 | |||
290 | /* Get specific register table for this target. */ | ||
291 | static const struct sys_reg_desc *get_target_table(unsigned target, size_t *num) | ||
292 | { | ||
293 | struct kvm_sys_reg_target_table *table; | ||
294 | |||
295 | table = target_tables[target]; | ||
296 | *num = table->table64.num; | ||
297 | return table->table64.table; | ||
298 | } | ||
299 | |||
300 | static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, | ||
301 | const struct sys_reg_desc table[], | ||
302 | unsigned int num) | ||
303 | { | ||
304 | unsigned int i; | ||
305 | |||
306 | for (i = 0; i < num; i++) { | ||
307 | const struct sys_reg_desc *r = &table[i]; | ||
308 | |||
309 | if (params->Op0 != r->Op0) | ||
310 | continue; | ||
311 | if (params->Op1 != r->Op1) | ||
312 | continue; | ||
313 | if (params->CRn != r->CRn) | ||
314 | continue; | ||
315 | if (params->CRm != r->CRm) | ||
316 | continue; | ||
317 | if (params->Op2 != r->Op2) | ||
318 | continue; | ||
319 | |||
320 | return r; | ||
321 | } | ||
322 | return NULL; | ||
323 | } | ||
324 | |||
325 | static int emulate_sys_reg(struct kvm_vcpu *vcpu, | ||
326 | const struct sys_reg_params *params) | ||
327 | { | ||
328 | size_t num; | ||
329 | const struct sys_reg_desc *table, *r; | ||
330 | |||
331 | table = get_target_table(vcpu->arch.target, &num); | ||
332 | |||
333 | /* Search target-specific then generic table. */ | ||
334 | r = find_reg(params, table, num); | ||
335 | if (!r) | ||
336 | r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | ||
337 | |||
338 | if (likely(r)) { | ||
339 | /* | ||
340 | * Not having an accessor means that we have | ||
341 | * configured a trap that we don't know how to | ||
342 | * handle. This certainly qualifies as a gross bug | ||
343 | * that should be fixed right away. | ||
344 | */ | ||
345 | BUG_ON(!r->access); | ||
346 | |||
347 | if (likely(r->access(vcpu, params, r))) { | ||
348 | /* Skip instruction, since it was emulated */ | ||
349 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
350 | return 1; | ||
351 | } | ||
352 | /* If access function fails, it should complain. */ | ||
353 | } else { | ||
354 | kvm_err("Unsupported guest sys_reg access at: %lx\n", | ||
355 | *vcpu_pc(vcpu)); | ||
356 | print_sys_reg_instr(params); | ||
357 | } | ||
358 | kvm_inject_undefined(vcpu); | ||
359 | return 1; | ||
360 | } | ||
361 | |||
362 | static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, | ||
363 | const struct sys_reg_desc *table, size_t num) | ||
364 | { | ||
365 | unsigned long i; | ||
366 | |||
367 | for (i = 0; i < num; i++) | ||
368 | if (table[i].reset) | ||
369 | table[i].reset(vcpu, &table[i]); | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access | ||
374 | * @vcpu: The VCPU pointer | ||
375 | * @run: The kvm_run struct | ||
376 | */ | ||
377 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
378 | { | ||
379 | struct sys_reg_params params; | ||
380 | unsigned long esr = kvm_vcpu_get_hsr(vcpu); | ||
381 | |||
382 | params.Op0 = (esr >> 20) & 3; | ||
383 | params.Op1 = (esr >> 14) & 0x7; | ||
384 | params.CRn = (esr >> 10) & 0xf; | ||
385 | params.CRm = (esr >> 1) & 0xf; | ||
386 | params.Op2 = (esr >> 17) & 0x7; | ||
387 | params.Rt = (esr >> 5) & 0x1f; | ||
388 | params.is_write = !(esr & 1); | ||
389 | |||
390 | return emulate_sys_reg(vcpu, ¶ms); | ||
391 | } | ||
392 | |||
393 | /****************************************************************************** | ||
394 | * Userspace API | ||
395 | *****************************************************************************/ | ||
396 | |||
397 | static bool index_to_params(u64 id, struct sys_reg_params *params) | ||
398 | { | ||
399 | switch (id & KVM_REG_SIZE_MASK) { | ||
400 | case KVM_REG_SIZE_U64: | ||
401 | /* Any unused index bits means it's not valid. */ | ||
402 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | ||
403 | | KVM_REG_ARM_COPROC_MASK | ||
404 | | KVM_REG_ARM64_SYSREG_OP0_MASK | ||
405 | | KVM_REG_ARM64_SYSREG_OP1_MASK | ||
406 | | KVM_REG_ARM64_SYSREG_CRN_MASK | ||
407 | | KVM_REG_ARM64_SYSREG_CRM_MASK | ||
408 | | KVM_REG_ARM64_SYSREG_OP2_MASK)) | ||
409 | return false; | ||
410 | params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) | ||
411 | >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); | ||
412 | params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) | ||
413 | >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); | ||
414 | params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) | ||
415 | >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); | ||
416 | params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) | ||
417 | >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); | ||
418 | params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) | ||
419 | >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); | ||
420 | return true; | ||
421 | default: | ||
422 | return false; | ||
423 | } | ||
424 | } | ||
425 | |||
426 | /* Decode an index value, and find the sys_reg_desc entry. */ | ||
427 | static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, | ||
428 | u64 id) | ||
429 | { | ||
430 | size_t num; | ||
431 | const struct sys_reg_desc *table, *r; | ||
432 | struct sys_reg_params params; | ||
433 | |||
434 | /* We only do sys_reg for now. */ | ||
435 | if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) | ||
436 | return NULL; | ||
437 | |||
438 | if (!index_to_params(id, ¶ms)) | ||
439 | return NULL; | ||
440 | |||
441 | table = get_target_table(vcpu->arch.target, &num); | ||
442 | r = find_reg(¶ms, table, num); | ||
443 | if (!r) | ||
444 | r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | ||
445 | |||
446 | /* Not saved in the sys_reg array? */ | ||
447 | if (r && !r->reg) | ||
448 | r = NULL; | ||
449 | |||
450 | return r; | ||
451 | } | ||
452 | |||
453 | /* | ||
454 | * These are the invariant sys_reg registers: we let the guest see the | ||
455 | * host versions of these, so they're part of the guest state. | ||
456 | * | ||
457 | * A future CPU may provide a mechanism to present different values to | ||
458 | * the guest, or a future kvm may trap them. | ||
459 | */ | ||
460 | |||
461 | #define FUNCTION_INVARIANT(reg) \ | ||
462 | static void get_##reg(struct kvm_vcpu *v, \ | ||
463 | const struct sys_reg_desc *r) \ | ||
464 | { \ | ||
465 | u64 val; \ | ||
466 | \ | ||
467 | asm volatile("mrs %0, " __stringify(reg) "\n" \ | ||
468 | : "=r" (val)); \ | ||
469 | ((struct sys_reg_desc *)r)->val = val; \ | ||
470 | } | ||
471 | |||
472 | FUNCTION_INVARIANT(midr_el1) | ||
473 | FUNCTION_INVARIANT(ctr_el0) | ||
474 | FUNCTION_INVARIANT(revidr_el1) | ||
475 | FUNCTION_INVARIANT(id_pfr0_el1) | ||
476 | FUNCTION_INVARIANT(id_pfr1_el1) | ||
477 | FUNCTION_INVARIANT(id_dfr0_el1) | ||
478 | FUNCTION_INVARIANT(id_afr0_el1) | ||
479 | FUNCTION_INVARIANT(id_mmfr0_el1) | ||
480 | FUNCTION_INVARIANT(id_mmfr1_el1) | ||
481 | FUNCTION_INVARIANT(id_mmfr2_el1) | ||
482 | FUNCTION_INVARIANT(id_mmfr3_el1) | ||
483 | FUNCTION_INVARIANT(id_isar0_el1) | ||
484 | FUNCTION_INVARIANT(id_isar1_el1) | ||
485 | FUNCTION_INVARIANT(id_isar2_el1) | ||
486 | FUNCTION_INVARIANT(id_isar3_el1) | ||
487 | FUNCTION_INVARIANT(id_isar4_el1) | ||
488 | FUNCTION_INVARIANT(id_isar5_el1) | ||
489 | FUNCTION_INVARIANT(clidr_el1) | ||
490 | FUNCTION_INVARIANT(aidr_el1) | ||
491 | |||
492 | /* ->val is filled in by kvm_sys_reg_table_init() */ | ||
493 | static struct sys_reg_desc invariant_sys_regs[] = { | ||
494 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), | ||
495 | NULL, get_midr_el1 }, | ||
496 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), | ||
497 | NULL, get_revidr_el1 }, | ||
498 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), | ||
499 | NULL, get_id_pfr0_el1 }, | ||
500 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), | ||
501 | NULL, get_id_pfr1_el1 }, | ||
502 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), | ||
503 | NULL, get_id_dfr0_el1 }, | ||
504 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), | ||
505 | NULL, get_id_afr0_el1 }, | ||
506 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), | ||
507 | NULL, get_id_mmfr0_el1 }, | ||
508 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), | ||
509 | NULL, get_id_mmfr1_el1 }, | ||
510 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), | ||
511 | NULL, get_id_mmfr2_el1 }, | ||
512 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), | ||
513 | NULL, get_id_mmfr3_el1 }, | ||
514 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), | ||
515 | NULL, get_id_isar0_el1 }, | ||
516 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), | ||
517 | NULL, get_id_isar1_el1 }, | ||
518 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), | ||
519 | NULL, get_id_isar2_el1 }, | ||
520 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), | ||
521 | NULL, get_id_isar3_el1 }, | ||
522 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), | ||
523 | NULL, get_id_isar4_el1 }, | ||
524 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), | ||
525 | NULL, get_id_isar5_el1 }, | ||
526 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), | ||
527 | NULL, get_clidr_el1 }, | ||
528 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), | ||
529 | NULL, get_aidr_el1 }, | ||
530 | { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), | ||
531 | NULL, get_ctr_el0 }, | ||
532 | }; | ||
533 | |||
534 | static int reg_from_user(void *val, const void __user *uaddr, u64 id) | ||
535 | { | ||
536 | /* This Just Works because we are little endian. */ | ||
537 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) | ||
538 | return -EFAULT; | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | static int reg_to_user(void __user *uaddr, const void *val, u64 id) | ||
543 | { | ||
544 | /* This Just Works because we are little endian. */ | ||
545 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) | ||
546 | return -EFAULT; | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static int get_invariant_sys_reg(u64 id, void __user *uaddr) | ||
551 | { | ||
552 | struct sys_reg_params params; | ||
553 | const struct sys_reg_desc *r; | ||
554 | |||
555 | if (!index_to_params(id, ¶ms)) | ||
556 | return -ENOENT; | ||
557 | |||
558 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | ||
559 | if (!r) | ||
560 | return -ENOENT; | ||
561 | |||
562 | return reg_to_user(uaddr, &r->val, id); | ||
563 | } | ||
564 | |||
565 | static int set_invariant_sys_reg(u64 id, void __user *uaddr) | ||
566 | { | ||
567 | struct sys_reg_params params; | ||
568 | const struct sys_reg_desc *r; | ||
569 | int err; | ||
570 | u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ | ||
571 | |||
572 | if (!index_to_params(id, ¶ms)) | ||
573 | return -ENOENT; | ||
574 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | ||
575 | if (!r) | ||
576 | return -ENOENT; | ||
577 | |||
578 | err = reg_from_user(&val, uaddr, id); | ||
579 | if (err) | ||
580 | return err; | ||
581 | |||
582 | /* This is what we mean by invariant: you can't change it. */ | ||
583 | if (r->val != val) | ||
584 | return -EINVAL; | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static bool is_valid_cache(u32 val) | ||
590 | { | ||
591 | u32 level, ctype; | ||
592 | |||
593 | if (val >= CSSELR_MAX) | ||
594 | return -ENOENT; | ||
595 | |||
596 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | ||
597 | level = (val >> 1); | ||
598 | ctype = (cache_levels >> (level * 3)) & 7; | ||
599 | |||
600 | switch (ctype) { | ||
601 | case 0: /* No cache */ | ||
602 | return false; | ||
603 | case 1: /* Instruction cache only */ | ||
604 | return (val & 1); | ||
605 | case 2: /* Data cache only */ | ||
606 | case 4: /* Unified cache */ | ||
607 | return !(val & 1); | ||
608 | case 3: /* Separate instruction and data caches */ | ||
609 | return true; | ||
610 | default: /* Reserved: we can't know instruction or data. */ | ||
611 | return false; | ||
612 | } | ||
613 | } | ||
614 | |||
615 | static int demux_c15_get(u64 id, void __user *uaddr) | ||
616 | { | ||
617 | u32 val; | ||
618 | u32 __user *uval = uaddr; | ||
619 | |||
620 | /* Fail if we have unknown bits set. */ | ||
621 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
622 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
623 | return -ENOENT; | ||
624 | |||
625 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | ||
626 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | ||
627 | if (KVM_REG_SIZE(id) != 4) | ||
628 | return -ENOENT; | ||
629 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | ||
630 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | ||
631 | if (!is_valid_cache(val)) | ||
632 | return -ENOENT; | ||
633 | |||
634 | return put_user(get_ccsidr(val), uval); | ||
635 | default: | ||
636 | return -ENOENT; | ||
637 | } | ||
638 | } | ||
639 | |||
640 | static int demux_c15_set(u64 id, void __user *uaddr) | ||
641 | { | ||
642 | u32 val, newval; | ||
643 | u32 __user *uval = uaddr; | ||
644 | |||
645 | /* Fail if we have unknown bits set. */ | ||
646 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
647 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
648 | return -ENOENT; | ||
649 | |||
650 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | ||
651 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | ||
652 | if (KVM_REG_SIZE(id) != 4) | ||
653 | return -ENOENT; | ||
654 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | ||
655 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | ||
656 | if (!is_valid_cache(val)) | ||
657 | return -ENOENT; | ||
658 | |||
659 | if (get_user(newval, uval)) | ||
660 | return -EFAULT; | ||
661 | |||
662 | /* This is also invariant: you can't change it. */ | ||
663 | if (newval != get_ccsidr(val)) | ||
664 | return -EINVAL; | ||
665 | return 0; | ||
666 | default: | ||
667 | return -ENOENT; | ||
668 | } | ||
669 | } | ||
670 | |||
671 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
672 | { | ||
673 | const struct sys_reg_desc *r; | ||
674 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | ||
675 | |||
676 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | ||
677 | return demux_c15_get(reg->id, uaddr); | ||
678 | |||
679 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | ||
680 | return -ENOENT; | ||
681 | |||
682 | r = index_to_sys_reg_desc(vcpu, reg->id); | ||
683 | if (!r) | ||
684 | return get_invariant_sys_reg(reg->id, uaddr); | ||
685 | |||
686 | return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); | ||
687 | } | ||
688 | |||
689 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
690 | { | ||
691 | const struct sys_reg_desc *r; | ||
692 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | ||
693 | |||
694 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | ||
695 | return demux_c15_set(reg->id, uaddr); | ||
696 | |||
697 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | ||
698 | return -ENOENT; | ||
699 | |||
700 | r = index_to_sys_reg_desc(vcpu, reg->id); | ||
701 | if (!r) | ||
702 | return set_invariant_sys_reg(reg->id, uaddr); | ||
703 | |||
704 | return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); | ||
705 | } | ||
706 | |||
707 | static unsigned int num_demux_regs(void) | ||
708 | { | ||
709 | unsigned int i, count = 0; | ||
710 | |||
711 | for (i = 0; i < CSSELR_MAX; i++) | ||
712 | if (is_valid_cache(i)) | ||
713 | count++; | ||
714 | |||
715 | return count; | ||
716 | } | ||
717 | |||
718 | static int write_demux_regids(u64 __user *uindices) | ||
719 | { | ||
720 | u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; | ||
721 | unsigned int i; | ||
722 | |||
723 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | ||
724 | for (i = 0; i < CSSELR_MAX; i++) { | ||
725 | if (!is_valid_cache(i)) | ||
726 | continue; | ||
727 | if (put_user(val | i, uindices)) | ||
728 | return -EFAULT; | ||
729 | uindices++; | ||
730 | } | ||
731 | return 0; | ||
732 | } | ||
733 | |||
734 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg) | ||
735 | { | ||
736 | return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | | ||
737 | KVM_REG_ARM64_SYSREG | | ||
738 | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | | ||
739 | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | | ||
740 | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | | ||
741 | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | | ||
742 | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); | ||
743 | } | ||
744 | |||
745 | static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) | ||
746 | { | ||
747 | if (!*uind) | ||
748 | return true; | ||
749 | |||
750 | if (put_user(sys_reg_to_index(reg), *uind)) | ||
751 | return false; | ||
752 | |||
753 | (*uind)++; | ||
754 | return true; | ||
755 | } | ||
756 | |||
757 | /* Assumed ordered tables, see kvm_sys_reg_table_init. */ | ||
758 | static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) | ||
759 | { | ||
760 | const struct sys_reg_desc *i1, *i2, *end1, *end2; | ||
761 | unsigned int total = 0; | ||
762 | size_t num; | ||
763 | |||
764 | /* We check for duplicates here, to allow arch-specific overrides. */ | ||
765 | i1 = get_target_table(vcpu->arch.target, &num); | ||
766 | end1 = i1 + num; | ||
767 | i2 = sys_reg_descs; | ||
768 | end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); | ||
769 | |||
770 | BUG_ON(i1 == end1 || i2 == end2); | ||
771 | |||
772 | /* Walk carefully, as both tables may refer to the same register. */ | ||
773 | while (i1 || i2) { | ||
774 | int cmp = cmp_sys_reg(i1, i2); | ||
775 | /* target-specific overrides generic entry. */ | ||
776 | if (cmp <= 0) { | ||
777 | /* Ignore registers we trap but don't save. */ | ||
778 | if (i1->reg) { | ||
779 | if (!copy_reg_to_user(i1, &uind)) | ||
780 | return -EFAULT; | ||
781 | total++; | ||
782 | } | ||
783 | } else { | ||
784 | /* Ignore registers we trap but don't save. */ | ||
785 | if (i2->reg) { | ||
786 | if (!copy_reg_to_user(i2, &uind)) | ||
787 | return -EFAULT; | ||
788 | total++; | ||
789 | } | ||
790 | } | ||
791 | |||
792 | if (cmp <= 0 && ++i1 == end1) | ||
793 | i1 = NULL; | ||
794 | if (cmp >= 0 && ++i2 == end2) | ||
795 | i2 = NULL; | ||
796 | } | ||
797 | return total; | ||
798 | } | ||
799 | |||
800 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) | ||
801 | { | ||
802 | return ARRAY_SIZE(invariant_sys_regs) | ||
803 | + num_demux_regs() | ||
804 | + walk_sys_regs(vcpu, (u64 __user *)NULL); | ||
805 | } | ||
806 | |||
807 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
808 | { | ||
809 | unsigned int i; | ||
810 | int err; | ||
811 | |||
812 | /* Then give them all the invariant registers' indices. */ | ||
813 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { | ||
814 | if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) | ||
815 | return -EFAULT; | ||
816 | uindices++; | ||
817 | } | ||
818 | |||
819 | err = walk_sys_regs(vcpu, uindices); | ||
820 | if (err < 0) | ||
821 | return err; | ||
822 | uindices += err; | ||
823 | |||
824 | return write_demux_regids(uindices); | ||
825 | } | ||
826 | |||
827 | void kvm_sys_reg_table_init(void) | ||
828 | { | ||
829 | unsigned int i; | ||
830 | struct sys_reg_desc clidr; | ||
831 | |||
832 | /* Make sure tables are unique and in order. */ | ||
833 | for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++) | ||
834 | BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0); | ||
835 | |||
836 | /* We abuse the reset function to overwrite the table itself. */ | ||
837 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) | ||
838 | invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); | ||
839 | |||
840 | /* | ||
841 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: | ||
842 | * | ||
843 | * If software reads the Cache Type fields from Ctype1 | ||
844 | * upwards, once it has seen a value of 0b000, no caches | ||
845 | * exist at further-out levels of the hierarchy. So, for | ||
846 | * example, if Ctype3 is the first Cache Type field with a | ||
847 | * value of 0b000, the values of Ctype4 to Ctype7 must be | ||
848 | * ignored. | ||
849 | */ | ||
850 | get_clidr_el1(NULL, &clidr); /* Ugly... */ | ||
851 | cache_levels = clidr.val; | ||
852 | for (i = 0; i < 7; i++) | ||
853 | if (((cache_levels >> (i*3)) & 7) == 0) | ||
854 | break; | ||
855 | /* Clear all higher bits. */ | ||
856 | cache_levels &= (1 << (i*3))-1; | ||
857 | } | ||
858 | |||
859 | /** | ||
860 | * kvm_reset_sys_regs - sets system registers to reset value | ||
861 | * @vcpu: The VCPU pointer | ||
862 | * | ||
863 | * This function finds the right table above and sets the registers on the | ||
864 | * virtual CPU struct to their architecturally defined reset values. | ||
865 | */ | ||
866 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | ||
867 | { | ||
868 | size_t num; | ||
869 | const struct sys_reg_desc *table; | ||
870 | |||
871 | /* Catch someone adding a register without putting in reset entry. */ | ||
872 | memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); | ||
873 | |||
874 | /* Generic chip reset first (so target could override). */ | ||
875 | reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | ||
876 | |||
877 | table = get_target_table(vcpu->arch.target, &num); | ||
878 | reset_sys_reg_descs(vcpu, table, num); | ||
879 | |||
880 | for (num = 1; num < NR_SYS_REGS; num++) | ||
881 | if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | ||
882 | panic("Didn't reset vcpu_sys_reg(%zi)", num); | ||
883 | } | ||
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h new file mode 100644 index 000000000000..d50d3722998e --- /dev/null +++ b/arch/arm64/kvm/sys_regs.h | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from arch/arm/kvm/coproc.h | ||
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
7 | * Authors: Christoffer Dall <c.dall@virtualopensystems.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License, version 2, as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__ | ||
23 | #define __ARM64_KVM_SYS_REGS_LOCAL_H__ | ||
24 | |||
25 | struct sys_reg_params { | ||
26 | u8 Op0; | ||
27 | u8 Op1; | ||
28 | u8 CRn; | ||
29 | u8 CRm; | ||
30 | u8 Op2; | ||
31 | u8 Rt; | ||
32 | bool is_write; | ||
33 | }; | ||
34 | |||
35 | struct sys_reg_desc { | ||
36 | /* MRS/MSR instruction which accesses it. */ | ||
37 | u8 Op0; | ||
38 | u8 Op1; | ||
39 | u8 CRn; | ||
40 | u8 CRm; | ||
41 | u8 Op2; | ||
42 | |||
43 | /* Trapped access from guest, if non-NULL. */ | ||
44 | bool (*access)(struct kvm_vcpu *, | ||
45 | const struct sys_reg_params *, | ||
46 | const struct sys_reg_desc *); | ||
47 | |||
48 | /* Initialization for vcpu. */ | ||
49 | void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *); | ||
50 | |||
51 | /* Index into sys_reg[], or 0 if we don't need to save it. */ | ||
52 | int reg; | ||
53 | |||
54 | /* Value (usually reset value) */ | ||
55 | u64 val; | ||
56 | }; | ||
57 | |||
58 | static inline void print_sys_reg_instr(const struct sys_reg_params *p) | ||
59 | { | ||
60 | /* Look, we even formatted it for you to paste into the table! */ | ||
61 | kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", | ||
62 | p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); | ||
63 | } | ||
64 | |||
65 | static inline bool ignore_write(struct kvm_vcpu *vcpu, | ||
66 | const struct sys_reg_params *p) | ||
67 | { | ||
68 | return true; | ||
69 | } | ||
70 | |||
71 | static inline bool read_zero(struct kvm_vcpu *vcpu, | ||
72 | const struct sys_reg_params *p) | ||
73 | { | ||
74 | *vcpu_reg(vcpu, p->Rt) = 0; | ||
75 | return true; | ||
76 | } | ||
77 | |||
78 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, | ||
79 | const struct sys_reg_params *params) | ||
80 | { | ||
81 | kvm_debug("sys_reg write to read-only register at: %lx\n", | ||
82 | *vcpu_pc(vcpu)); | ||
83 | print_sys_reg_instr(params); | ||
84 | return false; | ||
85 | } | ||
86 | |||
87 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, | ||
88 | const struct sys_reg_params *params) | ||
89 | { | ||
90 | kvm_debug("sys_reg read to write-only register at: %lx\n", | ||
91 | *vcpu_pc(vcpu)); | ||
92 | print_sys_reg_instr(params); | ||
93 | return false; | ||
94 | } | ||
95 | |||
96 | /* Reset functions */ | ||
97 | static inline void reset_unknown(struct kvm_vcpu *vcpu, | ||
98 | const struct sys_reg_desc *r) | ||
99 | { | ||
100 | BUG_ON(!r->reg); | ||
101 | BUG_ON(r->reg >= NR_SYS_REGS); | ||
102 | vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL; | ||
103 | } | ||
104 | |||
105 | static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | ||
106 | { | ||
107 | BUG_ON(!r->reg); | ||
108 | BUG_ON(r->reg >= NR_SYS_REGS); | ||
109 | vcpu_sys_reg(vcpu, r->reg) = r->val; | ||
110 | } | ||
111 | |||
112 | static inline int cmp_sys_reg(const struct sys_reg_desc *i1, | ||
113 | const struct sys_reg_desc *i2) | ||
114 | { | ||
115 | BUG_ON(i1 == i2); | ||
116 | if (!i1) | ||
117 | return 1; | ||
118 | else if (!i2) | ||
119 | return -1; | ||
120 | if (i1->Op0 != i2->Op0) | ||
121 | return i1->Op0 - i2->Op0; | ||
122 | if (i1->Op1 != i2->Op1) | ||
123 | return i1->Op1 - i2->Op1; | ||
124 | if (i1->CRn != i2->CRn) | ||
125 | return i1->CRn - i2->CRn; | ||
126 | if (i1->CRm != i2->CRm) | ||
127 | return i1->CRm - i2->CRm; | ||
128 | return i1->Op2 - i2->Op2; | ||
129 | } | ||
130 | |||
131 | |||
132 | #define Op0(_x) .Op0 = _x | ||
133 | #define Op1(_x) .Op1 = _x | ||
134 | #define CRn(_x) .CRn = _x | ||
135 | #define CRm(_x) .CRm = _x | ||
136 | #define Op2(_x) .Op2 = _x | ||
137 | |||
138 | #endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */ | ||
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a5c86fc34a37..2d1bcb891468 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -783,6 +783,7 @@ struct kvm_dirty_tlb { | |||
783 | #define KVM_REG_IA64 0x3000000000000000ULL | 783 | #define KVM_REG_IA64 0x3000000000000000ULL |
784 | #define KVM_REG_ARM 0x4000000000000000ULL | 784 | #define KVM_REG_ARM 0x4000000000000000ULL |
785 | #define KVM_REG_S390 0x5000000000000000ULL | 785 | #define KVM_REG_S390 0x5000000000000000ULL |
786 | #define KVM_REG_ARM64 0x6000000000000000ULL | ||
786 | 787 | ||
787 | #define KVM_REG_SIZE_SHIFT 52 | 788 | #define KVM_REG_SIZE_SHIFT 52 |
788 | #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL | 789 | #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL |