aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/coproc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm/coproc.c')
-rw-r--r--arch/arm/kvm/coproc.c360
1 files changed, 360 insertions, 0 deletions
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 0c433558591c..722efe3b1675 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -16,8 +16,368 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */ 18 */
19#include <linux/mm.h>
19#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_host.h>
23#include <asm/kvm_emulate.h>
24#include <asm/kvm_coproc.h>
25#include <asm/cacheflush.h>
26#include <asm/cputype.h>
27#include <trace/events/kvm.h>
20 28
29#include "trace.h"
30#include "coproc.h"
31
32
33/******************************************************************************
34 * Co-processor emulation
35 *****************************************************************************/
36
37int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
38{
39 kvm_inject_undefined(vcpu);
40 return 1;
41}
42
43int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
44{
45 /*
46 * We can get here, if the host has been built without VFPv3 support,
47 * but the guest attempted a floating point operation.
48 */
49 kvm_inject_undefined(vcpu);
50 return 1;
51}
52
53int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
54{
55 kvm_inject_undefined(vcpu);
56 return 1;
57}
58
59int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
60{
61 kvm_inject_undefined(vcpu);
62 return 1;
63}
64
65/* See note at ARM ARM B1.14.4 */
66static bool access_dcsw(struct kvm_vcpu *vcpu,
67 const struct coproc_params *p,
68 const struct coproc_reg *r)
69{
70 u32 val;
71 int cpu;
72
73 cpu = get_cpu();
74
75 if (!p->is_write)
76 return read_from_write_only(vcpu, p);
77
78 cpumask_setall(&vcpu->arch.require_dcache_flush);
79 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
80
81 /* If we were already preempted, take the long way around */
82 if (cpu != vcpu->arch.last_pcpu) {
83 flush_cache_all();
84 goto done;
85 }
86
87 val = *vcpu_reg(vcpu, p->Rt1);
88
89 switch (p->CRm) {
90 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
91 case 14: /* DCCISW */
92 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
93 break;
94
95 case 10: /* DCCSW */
96 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
97 break;
98 }
99
100done:
101 put_cpu();
102
103 return true;
104}
105
106/*
107 * We could trap ID_DFR0 and tell the guest we don't support performance
108 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
109 * NAKed, so it will read the PMCR anyway.
110 *
111 * Therefore we tell the guest we have 0 counters. Unfortunately, we
112 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
113 * all PM registers, which doesn't crash the guest kernel at least.
114 */
115static bool pm_fake(struct kvm_vcpu *vcpu,
116 const struct coproc_params *p,
117 const struct coproc_reg *r)
118{
119 if (p->is_write)
120 return ignore_write(vcpu, p);
121 else
122 return read_zero(vcpu, p);
123}
124
125#define access_pmcr pm_fake
126#define access_pmcntenset pm_fake
127#define access_pmcntenclr pm_fake
128#define access_pmovsr pm_fake
129#define access_pmselr pm_fake
130#define access_pmceid0 pm_fake
131#define access_pmceid1 pm_fake
132#define access_pmccntr pm_fake
133#define access_pmxevtyper pm_fake
134#define access_pmxevcntr pm_fake
135#define access_pmuserenr pm_fake
136#define access_pmintenset pm_fake
137#define access_pmintenclr pm_fake
138
139/* Architected CP15 registers.
140 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
141 */
142static const struct coproc_reg cp15_regs[] = {
143 /* CSSELR: swapped by interrupt.S. */
144 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
145 NULL, reset_unknown, c0_CSSELR },
146
147 /* TTBR0/TTBR1: swapped by interrupt.S. */
148 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
149 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
150
151 /* TTBCR: swapped by interrupt.S. */
152 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
153 NULL, reset_val, c2_TTBCR, 0x00000000 },
154
155 /* DACR: swapped by interrupt.S. */
156 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
157 NULL, reset_unknown, c3_DACR },
158
159 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
160 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
161 NULL, reset_unknown, c5_DFSR },
162 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
163 NULL, reset_unknown, c5_IFSR },
164 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
165 NULL, reset_unknown, c5_ADFSR },
166 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
167 NULL, reset_unknown, c5_AIFSR },
168
169 /* DFAR/IFAR: swapped by interrupt.S. */
170 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
171 NULL, reset_unknown, c6_DFAR },
172 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
173 NULL, reset_unknown, c6_IFAR },
174 /*
175 * DC{C,I,CI}SW operations:
176 */
177 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
178 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
179 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
180 /*
181 * Dummy performance monitor implementation.
182 */
183 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
184 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
185 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
186 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
187 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
188 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
189 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
190 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
191 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
192 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
193 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
194 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
195 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
196
197 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
198 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
199 NULL, reset_unknown, c10_PRRR},
200 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
201 NULL, reset_unknown, c10_NMRR},
202
203 /* VBAR: swapped by interrupt.S. */
204 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
205 NULL, reset_val, c12_VBAR, 0x00000000 },
206
207 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
208 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
209 NULL, reset_val, c13_CID, 0x00000000 },
210 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
211 NULL, reset_unknown, c13_TID_URW },
212 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
213 NULL, reset_unknown, c13_TID_URO },
214 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
215 NULL, reset_unknown, c13_TID_PRIV },
216};
217
218/* Target specific emulation tables */
219static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
220
221void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
222{
223 target_tables[table->target] = table;
224}
225
226/* Get specific register table for this target. */
227static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
228{
229 struct kvm_coproc_target_table *table;
230
231 table = target_tables[target];
232 *num = table->num;
233 return table->table;
234}
235
236static const struct coproc_reg *find_reg(const struct coproc_params *params,
237 const struct coproc_reg table[],
238 unsigned int num)
239{
240 unsigned int i;
241
242 for (i = 0; i < num; i++) {
243 const struct coproc_reg *r = &table[i];
244
245 if (params->is_64bit != r->is_64)
246 continue;
247 if (params->CRn != r->CRn)
248 continue;
249 if (params->CRm != r->CRm)
250 continue;
251 if (params->Op1 != r->Op1)
252 continue;
253 if (params->Op2 != r->Op2)
254 continue;
255
256 return r;
257 }
258 return NULL;
259}
260
261static int emulate_cp15(struct kvm_vcpu *vcpu,
262 const struct coproc_params *params)
263{
264 size_t num;
265 const struct coproc_reg *table, *r;
266
267 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
268 params->CRm, params->Op2, params->is_write);
269
270 table = get_target_table(vcpu->arch.target, &num);
271
272 /* Search target-specific then generic table. */
273 r = find_reg(params, table, num);
274 if (!r)
275 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
276
277 if (likely(r)) {
278 /* If we don't have an accessor, we should never get here! */
279 BUG_ON(!r->access);
280
281 if (likely(r->access(vcpu, params, r))) {
282 /* Skip instruction, since it was emulated */
283 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
284 return 1;
285 }
286 /* If access function fails, it should complain. */
287 } else {
288 kvm_err("Unsupported guest CP15 access at: %08x\n",
289 *vcpu_pc(vcpu));
290 print_cp_instr(params);
291 }
292 kvm_inject_undefined(vcpu);
293 return 1;
294}
295
296/**
297 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
298 * @vcpu: The VCPU pointer
299 * @run: The kvm_run struct
300 */
301int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
302{
303 struct coproc_params params;
304
305 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
306 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
307 params.is_write = ((vcpu->arch.hsr & 1) == 0);
308 params.is_64bit = true;
309
310 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
311 params.Op2 = 0;
312 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
313 params.CRn = 0;
314
315 return emulate_cp15(vcpu, &params);
316}
317
318static void reset_coproc_regs(struct kvm_vcpu *vcpu,
319 const struct coproc_reg *table, size_t num)
320{
321 unsigned long i;
322
323 for (i = 0; i < num; i++)
324 if (table[i].reset)
325 table[i].reset(vcpu, &table[i]);
326}
327
328/**
329 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
330 * @vcpu: The VCPU pointer
331 * @run: The kvm_run struct
332 */
333int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
334{
335 struct coproc_params params;
336
337 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
338 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
339 params.is_write = ((vcpu->arch.hsr & 1) == 0);
340 params.is_64bit = false;
341
342 params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
343 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
344 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
345 params.Rt2 = 0;
346
347 return emulate_cp15(vcpu, &params);
348}
349
350void kvm_coproc_table_init(void)
351{
352 unsigned int i;
353
354 /* Make sure tables are unique and in order. */
355 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
356 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
357}
358
359/**
360 * kvm_reset_coprocs - sets cp15 registers to reset value
361 * @vcpu: The VCPU pointer
362 *
363 * This function finds the right table above and sets the registers on the
364 * virtual CPU struct to their architecturally defined reset values.
365 */
21void kvm_reset_coprocs(struct kvm_vcpu *vcpu) 366void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
22{ 367{
368 size_t num;
369 const struct coproc_reg *table;
370
371 /* Catch someone adding a register without putting in reset entry. */
372 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
373
374 /* Generic chip reset first (so target could override). */
375 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
376
377 table = get_target_table(vcpu->arch.target, &num);
378 reset_coproc_regs(vcpu, table, num);
379
380 for (num = 1; num < NR_CP15_REGS; num++)
381 if (vcpu->arch.cp15[num] == 0x42424242)
382 panic("Didn't reset vcpu->arch.cp15[%zi]", num);
23} 383}