aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:48:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:48:05 -0400
commitdaf799cca8abbf7f3e253ecf1d41d244070773d7 (patch)
tree6fb27ff60b820ae0eeb906c8a5d8d7f93f89cd8b /arch/mips/kvm
parent6019958d146a4f127dae727a930f902c92531e6e (diff)
parentb22d1b6a91ca4260f869e349179ae53f18c664db (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: - More work on DT support for various platforms - Various fixes that were to late to make it straight into 3.9 - Improved platform support, in particular the Netlogic XLR and BCM63xx, and the SEAD3 and Malta eval boards. - Support for several Ralink SOC families. - Complete support for the microMIPS ASE which basically reencodes the existing MIPS32/MIPS64 ISA to use non-constant size instructions. - Some fallout from LTO work which remove old cruft and will generally make the MIPS kernel easier to maintain and resistant to compiler optimization, even in absence of LTO. - KVM support. While MIPS has announced hardware virtualization extensions this KVM extension uses trap and emulate mode for virtualization of MIPS32. More KVM work to add support for VZ hardware virtualizaiton extensions and MIPS64 will probably already be merged for 3.11. Most of this has been sitting in -next for a long time. All defconfigs have been build or run time tested except three for which fixes are being sent by other maintainers. Semantic conflict with kvm updates done as per Ralf * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (118 commits) MIPS: Add new GIC clockevent driver. MIPS: Formatting clean-ups for clocksources. MIPS: Refactor GIC clocksource code. MIPS: Move 'gic_frequency' to common location. MIPS: Move 'gic_present' to common location. MIPS: MIPS16e: Add unaligned access support. MIPS: MIPS16e: Support handling of delay slots. MIPS: MIPS16e: Add instruction formats. MIPS: microMIPS: Optimise 'strnlen' core library function. MIPS: microMIPS: Optimise 'strlen' core library function. MIPS: microMIPS: Optimise 'strncpy' core library function. MIPS: microMIPS: Optimise 'memset' core library function. MIPS: microMIPS: Add configuration option for microMIPS kernel. MIPS: microMIPS: Disable LL/SC and fix linker bug. MIPS: microMIPS: Add vdso support. MIPS: microMIPS: Add unaligned access support. MIPS: microMIPS: Support handling of delay slots. MIPS: microMIPS: Add support for exception handling. MIPS: microMIPS: Floating point support. MIPS: microMIPS: Fix macro naming in micro-assembler. ...
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/00README.txt31
-rw-r--r--arch/mips/kvm/Kconfig49
-rw-r--r--arch/mips/kvm/Makefile13
-rw-r--r--arch/mips/kvm/kvm_cb.c14
-rw-r--r--arch/mips/kvm/kvm_locore.S650
-rw-r--r--arch/mips/kvm/kvm_mips.c958
-rw-r--r--arch/mips/kvm/kvm_mips_comm.h23
-rw-r--r--arch/mips/kvm/kvm_mips_commpage.c37
-rw-r--r--arch/mips/kvm/kvm_mips_dyntrans.c149
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c1826
-rw-r--r--arch/mips/kvm/kvm_mips_int.c243
-rw-r--r--arch/mips/kvm/kvm_mips_int.h49
-rw-r--r--arch/mips/kvm/kvm_mips_opcode.h24
-rw-r--r--arch/mips/kvm/kvm_mips_stats.c82
-rw-r--r--arch/mips/kvm/kvm_tlb.c928
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c482
-rw-r--r--arch/mips/kvm/trace.h46
17 files changed, 5604 insertions, 0 deletions
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644
index 000000000000..51617e481aa3
--- /dev/null
+++ b/arch/mips/kvm/00README.txt
@@ -0,0 +1,31 @@
1KVM/MIPS Trap & Emulate Release Notes
2=====================================
3
4(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
5 Malta Board with FPGA based 34K
6 Sigma Designs TangoX board with a 24K based 8654 SoC.
7 Malta Board with 74K @ 1GHz
8
9(2) Both Guest kernel and Guest Userspace execute in UM.
10 Guest User address space: 0x00000000 -> 0x40000000
11 Guest Kernel Unmapped: 0x40000000 -> 0x60000000
12 Guest Kernel Mapped: 0x60000000 -> 0x80000000
13
14 Guest Usermode virtual memory is limited to 1GB.
15
16(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
17 Note that due to cache aliasing issues, 4K page sizes are NOT supported.
18
19(3) No HugeTLB Support
20 Both the host kernel and Guest kernel should have the page size set to 16K.
21 This will be implemented in a future release.
22
23(4) KVM/MIPS does not have support for SMP Guests
24 Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
25 LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
26 when we ERET back to the guest. This causes the guest to hang in an infinite loop.
27 This will be fixed in a future release.
28
29(5) Use Host FPU
30 Currently KVM/MIPS emulates a 24K CPU without a FPU.
31 This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 000000000000..2c15590e55f7
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,49 @@
1#
2# KVM configuration
3#
4source "virt/kvm/Kconfig"
5
6menuconfig VIRTUALIZATION
7 bool "Virtualization"
8 depends on HAVE_KVM
9 ---help---
10 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests).
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and disabled.
15
16if VIRTUALIZATION
17
18config KVM
19 tristate "Kernel-based Virtual Machine (KVM) support"
20 depends on HAVE_KVM
21 select PREEMPT_NOTIFIERS
22 select ANON_INODES
23 select KVM_MMIO
24 ---help---
25 Support for hosting Guest kernels.
26 Currently supported on MIPS32 processors.
27
28config KVM_MIPS_DYN_TRANS
29 bool "KVM/MIPS: Dynamic binary translation to reduce traps"
30 depends on KVM
31 ---help---
32 When running in Trap & Emulate mode patch privileged
33 instructions to reduce the number of traps.
34
35 If unsure, say Y.
36
37config KVM_MIPS_DEBUG_COP0_COUNTERS
38 bool "Maintain counters for COP0 accesses"
39 depends on KVM
40 ---help---
41 Maintain statistics for Guest COP0 accesses.
42 A histogram of COP0 accesses is printed when the VM is
43 shutdown.
44
45 If unsure, say N.
46
47source drivers/vhost/Kconfig
48
49endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 000000000000..78d87bbc99db
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,13 @@
1# Makefile for KVM support for MIPS
2#
3
4common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
5
6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
7
8kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
9 kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
10 kvm_mips_dyntrans.o kvm_trap_emul.o
11
12obj-$(CONFIG_KVM) += kvm.o
13obj-y += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644
index 000000000000..313c2e37b978
--- /dev/null
+++ b/arch/mips/kvm/kvm_cb.c
@@ -0,0 +1,14 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 * Authors: Yann Le Du <ledu@kymasys.com>
8 */
9
10#include <linux/export.h>
11#include <linux/kvm_host.h>
12
13struct kvm_mips_callbacks *kvm_mips_callbacks;
14EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644
index 000000000000..dca2aa665993
--- /dev/null
+++ b/arch/mips/kvm/kvm_locore.S
@@ -0,0 +1,650 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Main entry point for the guest, exception handling.
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h>
18
19
20#define _C_LABEL(x) x
21#define MIPSX(name) mips32_ ## name
22#define CALLFRAME_SIZ 32
23
24/*
25 * VECTOR
26 * exception vector entrypoint
27 */
28#define VECTOR(x, regmask) \
29 .ent _C_LABEL(x),0; \
30 EXPORT(x);
31
32#define VECTOR_END(x) \
33 EXPORT(x);
34
35/* Overload, Danger Will Robinson!! */
36#define PT_HOST_ASID PT_BVADDR
37#define PT_HOST_USERLOCAL PT_EPC
38
39#define CP0_DDATA_LO $28,3
40#define CP0_EBASE $15,1
41
42#define CP0_INTCTL $12,1
43#define CP0_SRSCTL $12,2
44#define CP0_SRSMAP $12,3
45#define CP0_HWRENA $7,0
46
47/* Resume Flags */
48#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
49
50#define RESUME_GUEST 0
51#define RESUME_HOST RESUME_FLAG_HOST
52
53/*
54 * __kvm_mips_vcpu_run: entry point to the guest
55 * a0: run
56 * a1: vcpu
57 */
58
59FEXPORT(__kvm_mips_vcpu_run)
60 .set push
61 .set noreorder
62 .set noat
63
64 /* k0/k1 not being used in host kernel context */
65 addiu k1,sp, -PT_SIZE
66 LONG_S $0, PT_R0(k1)
67 LONG_S $1, PT_R1(k1)
68 LONG_S $2, PT_R2(k1)
69 LONG_S $3, PT_R3(k1)
70
71 LONG_S $4, PT_R4(k1)
72 LONG_S $5, PT_R5(k1)
73 LONG_S $6, PT_R6(k1)
74 LONG_S $7, PT_R7(k1)
75
76 LONG_S $8, PT_R8(k1)
77 LONG_S $9, PT_R9(k1)
78 LONG_S $10, PT_R10(k1)
79 LONG_S $11, PT_R11(k1)
80 LONG_S $12, PT_R12(k1)
81 LONG_S $13, PT_R13(k1)
82 LONG_S $14, PT_R14(k1)
83 LONG_S $15, PT_R15(k1)
84 LONG_S $16, PT_R16(k1)
85 LONG_S $17, PT_R17(k1)
86
87 LONG_S $18, PT_R18(k1)
88 LONG_S $19, PT_R19(k1)
89 LONG_S $20, PT_R20(k1)
90 LONG_S $21, PT_R21(k1)
91 LONG_S $22, PT_R22(k1)
92 LONG_S $23, PT_R23(k1)
93 LONG_S $24, PT_R24(k1)
94 LONG_S $25, PT_R25(k1)
95
96 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
97
98 LONG_S $28, PT_R28(k1)
99 LONG_S $29, PT_R29(k1)
100 LONG_S $30, PT_R30(k1)
101 LONG_S $31, PT_R31(k1)
102
103 /* Save hi/lo */
104 mflo v0
105 LONG_S v0, PT_LO(k1)
106 mfhi v1
107 LONG_S v1, PT_HI(k1)
108
109 /* Save host status */
110 mfc0 v0, CP0_STATUS
111 LONG_S v0, PT_STATUS(k1)
112
113 /* Save host ASID, shove it into the BVADDR location */
114 mfc0 v1,CP0_ENTRYHI
115 andi v1, 0xff
116 LONG_S v1, PT_HOST_ASID(k1)
117
118 /* Save DDATA_LO, will be used to store pointer to vcpu */
119 mfc0 v1, CP0_DDATA_LO
120 LONG_S v1, PT_HOST_USERLOCAL(k1)
121
122 /* DDATA_LO has pointer to vcpu */
123 mtc0 a1,CP0_DDATA_LO
124
125 /* Offset into vcpu->arch */
126 addiu k1, a1, VCPU_HOST_ARCH
127
128 /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
129 LONG_S sp, VCPU_HOST_STACK(k1)
130
131 /* Save the kernel gp as well */
132 LONG_S gp, VCPU_HOST_GP(k1)
133
134 /* Setup status register for running the guest in UM, interrupts are disabled */
135 li k0,(ST0_EXL | KSU_USER| ST0_BEV)
136 mtc0 k0,CP0_STATUS
137 ehb
138
139 /* load up the new EBASE */
140 LONG_L k0, VCPU_GUEST_EBASE(k1)
141 mtc0 k0,CP0_EBASE
142
143 /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
144 * but make sure that timer interrupts are enabled
145 */
146 li k0,(ST0_EXL | KSU_USER | ST0_IE)
147 andi v0, v0, ST0_IM
148 or k0, k0, v0
149 mtc0 k0,CP0_STATUS
150 ehb
151
152
153 /* Set Guest EPC */
154 LONG_L t0, VCPU_PC(k1)
155 mtc0 t0, CP0_EPC
156
157FEXPORT(__kvm_mips_load_asid)
158 /* Set the ASID for the Guest Kernel */
159 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
160 /* addresses shift to 0x80000000 */
161 bltz t0, 1f /* If kernel */
162 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
163 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
1641:
165 /* t1: contains the base of the ASID array, need to get the cpu id */
166 LONG_L t2, TI_CPU($28) /* smp_processor_id */
167 sll t2, t2, 2 /* x4 */
168 addu t3, t1, t2
169 LONG_L k0, (t3)
170 andi k0, k0, 0xff
171 mtc0 k0,CP0_ENTRYHI
172 ehb
173
174 /* Disable RDHWR access */
175 mtc0 zero, CP0_HWRENA
176
177 /* Now load up the Guest Context from VCPU */
178 LONG_L $1, VCPU_R1(k1)
179 LONG_L $2, VCPU_R2(k1)
180 LONG_L $3, VCPU_R3(k1)
181
182 LONG_L $4, VCPU_R4(k1)
183 LONG_L $5, VCPU_R5(k1)
184 LONG_L $6, VCPU_R6(k1)
185 LONG_L $7, VCPU_R7(k1)
186
187 LONG_L $8, VCPU_R8(k1)
188 LONG_L $9, VCPU_R9(k1)
189 LONG_L $10, VCPU_R10(k1)
190 LONG_L $11, VCPU_R11(k1)
191 LONG_L $12, VCPU_R12(k1)
192 LONG_L $13, VCPU_R13(k1)
193 LONG_L $14, VCPU_R14(k1)
194 LONG_L $15, VCPU_R15(k1)
195 LONG_L $16, VCPU_R16(k1)
196 LONG_L $17, VCPU_R17(k1)
197 LONG_L $18, VCPU_R18(k1)
198 LONG_L $19, VCPU_R19(k1)
199 LONG_L $20, VCPU_R20(k1)
200 LONG_L $21, VCPU_R21(k1)
201 LONG_L $22, VCPU_R22(k1)
202 LONG_L $23, VCPU_R23(k1)
203 LONG_L $24, VCPU_R24(k1)
204 LONG_L $25, VCPU_R25(k1)
205
206 /* k0/k1 loaded up later */
207
208 LONG_L $28, VCPU_R28(k1)
209 LONG_L $29, VCPU_R29(k1)
210 LONG_L $30, VCPU_R30(k1)
211 LONG_L $31, VCPU_R31(k1)
212
213 /* Restore hi/lo */
214 LONG_L k0, VCPU_LO(k1)
215 mtlo k0
216
217 LONG_L k0, VCPU_HI(k1)
218 mthi k0
219
220FEXPORT(__kvm_mips_load_k0k1)
221 /* Restore the guest's k0/k1 registers */
222 LONG_L k0, VCPU_R26(k1)
223 LONG_L k1, VCPU_R27(k1)
224
225 /* Jump to guest */
226 eret
227 .set pop
228
229VECTOR(MIPSX(exception), unknown)
230/*
231 * Find out what mode we came from and jump to the proper handler.
232 */
233 .set push
234 .set noat
235 .set noreorder
236 mtc0 k0, CP0_ERROREPC #01: Save guest k0
237 ehb #02:
238
239 mfc0 k0, CP0_EBASE #02: Get EBASE
240 srl k0, k0, 10 #03: Get rid of CPUNum
241 sll k0, k0, 10 #04
242 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
243 addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
244 j k0 #07: jump to the function
245 nop #08: branch delay slot
246 .set push
247VECTOR_END(MIPSX(exceptionEnd))
248.end MIPSX(exception)
249
250/*
251 * Generic Guest exception handler. We end up here when the guest
252 * does something that causes a trap to kernel mode.
253 *
254 */
255NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
256 .set push
257 .set noat
258 .set noreorder
259
260 /* Get the VCPU pointer from DDTATA_LO */
261 mfc0 k1, CP0_DDATA_LO
262 addiu k1, k1, VCPU_HOST_ARCH
263
264 /* Start saving Guest context to VCPU */
265 LONG_S $0, VCPU_R0(k1)
266 LONG_S $1, VCPU_R1(k1)
267 LONG_S $2, VCPU_R2(k1)
268 LONG_S $3, VCPU_R3(k1)
269 LONG_S $4, VCPU_R4(k1)
270 LONG_S $5, VCPU_R5(k1)
271 LONG_S $6, VCPU_R6(k1)
272 LONG_S $7, VCPU_R7(k1)
273 LONG_S $8, VCPU_R8(k1)
274 LONG_S $9, VCPU_R9(k1)
275 LONG_S $10, VCPU_R10(k1)
276 LONG_S $11, VCPU_R11(k1)
277 LONG_S $12, VCPU_R12(k1)
278 LONG_S $13, VCPU_R13(k1)
279 LONG_S $14, VCPU_R14(k1)
280 LONG_S $15, VCPU_R15(k1)
281 LONG_S $16, VCPU_R16(k1)
282 LONG_S $17,VCPU_R17(k1)
283 LONG_S $18, VCPU_R18(k1)
284 LONG_S $19, VCPU_R19(k1)
285 LONG_S $20, VCPU_R20(k1)
286 LONG_S $21, VCPU_R21(k1)
287 LONG_S $22, VCPU_R22(k1)
288 LONG_S $23, VCPU_R23(k1)
289 LONG_S $24, VCPU_R24(k1)
290 LONG_S $25, VCPU_R25(k1)
291
292 /* Guest k0/k1 saved later */
293
294 LONG_S $28, VCPU_R28(k1)
295 LONG_S $29, VCPU_R29(k1)
296 LONG_S $30, VCPU_R30(k1)
297 LONG_S $31, VCPU_R31(k1)
298
299 /* We need to save hi/lo and restore them on
300 * the way out
301 */
302 mfhi t0
303 LONG_S t0, VCPU_HI(k1)
304
305 mflo t0
306 LONG_S t0, VCPU_LO(k1)
307
308 /* Finally save guest k0/k1 to VCPU */
309 mfc0 t0, CP0_ERROREPC
310 LONG_S t0, VCPU_R26(k1)
311
312 /* Get GUEST k1 and save it in VCPU */
313 la t1, ~0x2ff
314 mfc0 t0, CP0_EBASE
315 and t0, t0, t1
316 LONG_L t0, 0x3000(t0)
317 LONG_S t0, VCPU_R27(k1)
318
319 /* Now that context has been saved, we can use other registers */
320
321 /* Restore vcpu */
322 mfc0 a1, CP0_DDATA_LO
323 move s1, a1
324
325 /* Restore run (vcpu->run) */
326 LONG_L a0, VCPU_RUN(a1)
327 /* Save pointer to run in s0, will be saved by the compiler */
328 move s0, a0
329
330
331 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
332 mfc0 k0,CP0_EPC
333 LONG_S k0, VCPU_PC(k1)
334
335 mfc0 k0, CP0_BADVADDR
336 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
337
338 mfc0 k0, CP0_CAUSE
339 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
340
341 mfc0 k0, CP0_ENTRYHI
342 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
343
344 /* Now restore the host state just enough to run the handlers */
345
346 /* Swtich EBASE to the one used by Linux */
347 /* load up the host EBASE */
348 mfc0 v0, CP0_STATUS
349
350 .set at
351 or k0, v0, ST0_BEV
352 .set noat
353
354 mtc0 k0, CP0_STATUS
355 ehb
356
357 LONG_L k0, VCPU_HOST_EBASE(k1)
358 mtc0 k0,CP0_EBASE
359
360
361 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
362 .set at
363 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
364 or v0, v0, ST0_CU0
365 .set noat
366 mtc0 v0, CP0_STATUS
367 ehb
368
369 /* Load up host GP */
370 LONG_L gp, VCPU_HOST_GP(k1)
371
372 /* Need a stack before we can jump to "C" */
373 LONG_L sp, VCPU_HOST_STACK(k1)
374
375 /* Saved host state */
376 addiu sp,sp, -PT_SIZE
377
378 /* XXXKYMA do we need to load the host ASID, maybe not because the
379 * kernel entries are marked GLOBAL, need to verify
380 */
381
382 /* Restore host DDATA_LO */
383 LONG_L k0, PT_HOST_USERLOCAL(sp)
384 mtc0 k0, CP0_DDATA_LO
385
386 /* Restore RDHWR access */
387 la k0, 0x2000000F
388 mtc0 k0, CP0_HWRENA
389
390 /* Jump to handler */
391FEXPORT(__kvm_mips_jump_to_handler)
392 /* XXXKYMA: not sure if this is safe, how large is the stack?? */
393 /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
394 la t9,kvm_mips_handle_exit
395 jalr.hb t9
396 addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */
397
398 /* Return from handler Make sure interrupts are disabled */
399 di
400 ehb
401
402 /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
403 * while we were handling the exception from the guest, reload k1
404 */
405 move k1, s1
406 addiu k1, k1, VCPU_HOST_ARCH
407
408 /* Check return value, should tell us if we are returning to the host (handle I/O etc)
409 * or resuming the guest
410 */
411 andi t0, v0, RESUME_HOST
412 bnez t0, __kvm_mips_return_to_host
413 nop
414
415__kvm_mips_return_to_guest:
416 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
417 mtc0 s1, CP0_DDATA_LO
418
419 /* Load up the Guest EBASE to minimize the window where BEV is set */
420 LONG_L t0, VCPU_GUEST_EBASE(k1)
421
422 /* Switch EBASE back to the one used by KVM */
423 mfc0 v1, CP0_STATUS
424 .set at
425 or k0, v1, ST0_BEV
426 .set noat
427 mtc0 k0, CP0_STATUS
428 ehb
429 mtc0 t0,CP0_EBASE
430
431 /* Setup status register for running guest in UM */
432 .set at
433 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
434 and v1, v1, ~ST0_CU0
435 .set noat
436 mtc0 v1, CP0_STATUS
437 ehb
438
439
440 /* Set Guest EPC */
441 LONG_L t0, VCPU_PC(k1)
442 mtc0 t0, CP0_EPC
443
444 /* Set the ASID for the Guest Kernel */
445 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
446 /* addresses shift to 0x80000000 */
447 bltz t0, 1f /* If kernel */
448 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
449 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
4501:
451 /* t1: contains the base of the ASID array, need to get the cpu id */
452 LONG_L t2, TI_CPU($28) /* smp_processor_id */
453 sll t2, t2, 2 /* x4 */
454 addu t3, t1, t2
455 LONG_L k0, (t3)
456 andi k0, k0, 0xff
457 mtc0 k0,CP0_ENTRYHI
458 ehb
459
460 /* Disable RDHWR access */
461 mtc0 zero, CP0_HWRENA
462
463 /* load the guest context from VCPU and return */
464 LONG_L $0, VCPU_R0(k1)
465 LONG_L $1, VCPU_R1(k1)
466 LONG_L $2, VCPU_R2(k1)
467 LONG_L $3, VCPU_R3(k1)
468 LONG_L $4, VCPU_R4(k1)
469 LONG_L $5, VCPU_R5(k1)
470 LONG_L $6, VCPU_R6(k1)
471 LONG_L $7, VCPU_R7(k1)
472 LONG_L $8, VCPU_R8(k1)
473 LONG_L $9, VCPU_R9(k1)
474 LONG_L $10, VCPU_R10(k1)
475 LONG_L $11, VCPU_R11(k1)
476 LONG_L $12, VCPU_R12(k1)
477 LONG_L $13, VCPU_R13(k1)
478 LONG_L $14, VCPU_R14(k1)
479 LONG_L $15, VCPU_R15(k1)
480 LONG_L $16, VCPU_R16(k1)
481 LONG_L $17, VCPU_R17(k1)
482 LONG_L $18, VCPU_R18(k1)
483 LONG_L $19, VCPU_R19(k1)
484 LONG_L $20, VCPU_R20(k1)
485 LONG_L $21, VCPU_R21(k1)
486 LONG_L $22, VCPU_R22(k1)
487 LONG_L $23, VCPU_R23(k1)
488 LONG_L $24, VCPU_R24(k1)
489 LONG_L $25, VCPU_R25(k1)
490
491 /* $/k1 loaded later */
492 LONG_L $28, VCPU_R28(k1)
493 LONG_L $29, VCPU_R29(k1)
494 LONG_L $30, VCPU_R30(k1)
495 LONG_L $31, VCPU_R31(k1)
496
497FEXPORT(__kvm_mips_skip_guest_restore)
498 LONG_L k0, VCPU_HI(k1)
499 mthi k0
500
501 LONG_L k0, VCPU_LO(k1)
502 mtlo k0
503
504 LONG_L k0, VCPU_R26(k1)
505 LONG_L k1, VCPU_R27(k1)
506
507 eret
508
509__kvm_mips_return_to_host:
510 /* EBASE is already pointing to Linux */
511 LONG_L k1, VCPU_HOST_STACK(k1)
512 addiu k1,k1, -PT_SIZE
513
514 /* Restore host DDATA_LO */
515 LONG_L k0, PT_HOST_USERLOCAL(k1)
516 mtc0 k0, CP0_DDATA_LO
517
518 /* Restore host ASID */
519 LONG_L k0, PT_HOST_ASID(sp)
520 andi k0, 0xff
521 mtc0 k0,CP0_ENTRYHI
522 ehb
523
524 /* Load context saved on the host stack */
525 LONG_L $0, PT_R0(k1)
526 LONG_L $1, PT_R1(k1)
527
528 /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */
529 sra k0, v0, 2
530 move $2, k0
531
532 LONG_L $3, PT_R3(k1)
533 LONG_L $4, PT_R4(k1)
534 LONG_L $5, PT_R5(k1)
535 LONG_L $6, PT_R6(k1)
536 LONG_L $7, PT_R7(k1)
537 LONG_L $8, PT_R8(k1)
538 LONG_L $9, PT_R9(k1)
539 LONG_L $10, PT_R10(k1)
540 LONG_L $11, PT_R11(k1)
541 LONG_L $12, PT_R12(k1)
542 LONG_L $13, PT_R13(k1)
543 LONG_L $14, PT_R14(k1)
544 LONG_L $15, PT_R15(k1)
545 LONG_L $16, PT_R16(k1)
546 LONG_L $17, PT_R17(k1)
547 LONG_L $18, PT_R18(k1)
548 LONG_L $19, PT_R19(k1)
549 LONG_L $20, PT_R20(k1)
550 LONG_L $21, PT_R21(k1)
551 LONG_L $22, PT_R22(k1)
552 LONG_L $23, PT_R23(k1)
553 LONG_L $24, PT_R24(k1)
554 LONG_L $25, PT_R25(k1)
555
556 /* Host k0/k1 were not saved */
557
558 LONG_L $28, PT_R28(k1)
559 LONG_L $29, PT_R29(k1)
560 LONG_L $30, PT_R30(k1)
561
562 LONG_L k0, PT_HI(k1)
563 mthi k0
564
565 LONG_L k0, PT_LO(k1)
566 mtlo k0
567
568 /* Restore RDHWR access */
569 la k0, 0x2000000F
570 mtc0 k0, CP0_HWRENA
571
572
573 /* Restore RA, which is the address we will return to */
574 LONG_L ra, PT_R31(k1)
575 j ra
576 nop
577
578 .set pop
579VECTOR_END(MIPSX(GuestExceptionEnd))
580.end MIPSX(GuestException)
581
582MIPSX(exceptions):
583 ####
584 ##### The exception handlers.
585 #####
586 .word _C_LABEL(MIPSX(GuestException)) # 0
587 .word _C_LABEL(MIPSX(GuestException)) # 1
588 .word _C_LABEL(MIPSX(GuestException)) # 2
589 .word _C_LABEL(MIPSX(GuestException)) # 3
590 .word _C_LABEL(MIPSX(GuestException)) # 4
591 .word _C_LABEL(MIPSX(GuestException)) # 5
592 .word _C_LABEL(MIPSX(GuestException)) # 6
593 .word _C_LABEL(MIPSX(GuestException)) # 7
594 .word _C_LABEL(MIPSX(GuestException)) # 8
595 .word _C_LABEL(MIPSX(GuestException)) # 9
596 .word _C_LABEL(MIPSX(GuestException)) # 10
597 .word _C_LABEL(MIPSX(GuestException)) # 11
598 .word _C_LABEL(MIPSX(GuestException)) # 12
599 .word _C_LABEL(MIPSX(GuestException)) # 13
600 .word _C_LABEL(MIPSX(GuestException)) # 14
601 .word _C_LABEL(MIPSX(GuestException)) # 15
602 .word _C_LABEL(MIPSX(GuestException)) # 16
603 .word _C_LABEL(MIPSX(GuestException)) # 17
604 .word _C_LABEL(MIPSX(GuestException)) # 18
605 .word _C_LABEL(MIPSX(GuestException)) # 19
606 .word _C_LABEL(MIPSX(GuestException)) # 20
607 .word _C_LABEL(MIPSX(GuestException)) # 21
608 .word _C_LABEL(MIPSX(GuestException)) # 22
609 .word _C_LABEL(MIPSX(GuestException)) # 23
610 .word _C_LABEL(MIPSX(GuestException)) # 24
611 .word _C_LABEL(MIPSX(GuestException)) # 25
612 .word _C_LABEL(MIPSX(GuestException)) # 26
613 .word _C_LABEL(MIPSX(GuestException)) # 27
614 .word _C_LABEL(MIPSX(GuestException)) # 28
615 .word _C_LABEL(MIPSX(GuestException)) # 29
616 .word _C_LABEL(MIPSX(GuestException)) # 30
617 .word _C_LABEL(MIPSX(GuestException)) # 31
618
619
620/* This routine makes changes to the instruction stream effective to the hardware.
621 * It should be called after the instruction stream is written.
622 * On return, the new instructions are effective.
623 * Inputs:
624 * a0 = Start address of new instruction stream
625 * a1 = Size, in bytes, of new instruction stream
626 */
627
628#define HW_SYNCI_Step $1
629LEAF(MIPSX(SyncICache))
630 .set push
631 .set mips32r2
632 beq a1, zero, 20f
633 nop
634 addu a1, a0, a1
635 rdhwr v0, HW_SYNCI_Step
636 beq v0, zero, 20f
637 nop
638
63910:
640 synci 0(a0)
641 addu a0, a0, v0
642 sltu v1, a0, a1
643 bne v1, zero, 10b
644 nop
645 sync
64620:
647 jr.hb ra
648 nop
649 .set pop
650END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644
index 000000000000..e0dad0289797
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips.c
@@ -0,0 +1,958 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22#include <linux/kvm_host.h>
23
24#include "kvm_mips_int.h"
25#include "kvm_mips_comm.h"
26
27#define CREATE_TRACE_POINTS
28#include "trace.h"
29
30#ifndef VECTORSPACING
31#define VECTORSPACING 0x100 /* for EI/VI mode */
32#endif
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) },
37 { "cache", VCPU_STAT(cache_exits) },
38 { "signal", VCPU_STAT(signal_exits) },
39 { "interrupt", VCPU_STAT(int_exits) },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
46 { "syscall", VCPU_STAT(syscall_exits) },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
48 { "break_inst", VCPU_STAT(break_inst_exits) },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
51 {NULL}
52};
53
54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55{
56 int i;
57 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0;
60 }
61 return 0;
62}
63
64gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
65{
66 return gfn;
67}
68
69/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
70 * are "runnable" if interrupts are pending
71 */
72int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
73{
74 return !!(vcpu->arch.pending_exceptions);
75}
76
77int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
78{
79 return 1;
80}
81
82int kvm_arch_hardware_enable(void *garbage)
83{
84 return 0;
85}
86
87void kvm_arch_hardware_disable(void *garbage)
88{
89}
90
91int kvm_arch_hardware_setup(void)
92{
93 return 0;
94}
95
96void kvm_arch_hardware_unsetup(void)
97{
98}
99
100void kvm_arch_check_processor_compat(void *rtn)
101{
102 int *r = (int *)rtn;
103 *r = 0;
104 return;
105}
106
107static void kvm_mips_init_tlbs(struct kvm *kvm)
108{
109 unsigned long wired;
110
111 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
112 wired = read_c0_wired();
113 write_c0_wired(wired + 1);
114 mtc0_tlbw_hazard();
115 kvm->arch.commpage_tlb = wired;
116
117 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
118 kvm->arch.commpage_tlb);
119}
120
121static void kvm_mips_init_vm_percpu(void *arg)
122{
123 struct kvm *kvm = (struct kvm *)arg;
124
125 kvm_mips_init_tlbs(kvm);
126 kvm_mips_callbacks->vm_init(kvm);
127
128}
129
130int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
131{
132 if (atomic_inc_return(&kvm_mips_instance) == 1) {
133 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
134 __func__);
135 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
136 }
137
138
139 return 0;
140}
141
142void kvm_mips_free_vcpus(struct kvm *kvm)
143{
144 unsigned int i;
145 struct kvm_vcpu *vcpu;
146
147 /* Put the pages we reserved for the guest pmap */
148 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
149 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
150 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
151 }
152
153 if (kvm->arch.guest_pmap)
154 kfree(kvm->arch.guest_pmap);
155
156 kvm_for_each_vcpu(i, vcpu, kvm) {
157 kvm_arch_vcpu_free(vcpu);
158 }
159
160 mutex_lock(&kvm->lock);
161
162 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
163 kvm->vcpus[i] = NULL;
164
165 atomic_set(&kvm->online_vcpus, 0);
166
167 mutex_unlock(&kvm->lock);
168}
169
170void kvm_arch_sync_events(struct kvm *kvm)
171{
172}
173
174static void kvm_mips_uninit_tlbs(void *arg)
175{
176 /* Restore wired count */
177 write_c0_wired(0);
178 mtc0_tlbw_hazard();
179 /* Clear out all the TLBs */
180 kvm_local_flush_tlb_all();
181}
182
183void kvm_arch_destroy_vm(struct kvm *kvm)
184{
185 kvm_mips_free_vcpus(kvm);
186
187 /* If this is the last instance, restore wired count */
188 if (atomic_dec_return(&kvm_mips_instance) == 0) {
189 kvm_info("%s: last KVM instance, restoring TLB parameters\n",
190 __func__);
191 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
192 }
193}
194
195long
196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
197{
198 return -EINVAL;
199}
200
201void kvm_arch_free_memslot(struct kvm_memory_slot *free,
202 struct kvm_memory_slot *dont)
203{
204}
205
206int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
207{
208 return 0;
209}
210
211int kvm_arch_prepare_memory_region(struct kvm *kvm,
212 struct kvm_memory_slot *memslot,
213 struct kvm_userspace_memory_region *mem,
214 enum kvm_mr_change change)
215{
216 return 0;
217}
218
219void kvm_arch_commit_memory_region(struct kvm *kvm,
220 struct kvm_userspace_memory_region *mem,
221 const struct kvm_memory_slot *old,
222 enum kvm_mr_change change)
223{
224 unsigned long npages = 0;
225 int i, err = 0;
226
227 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
228 __func__, kvm, mem->slot, mem->guest_phys_addr,
229 mem->memory_size, mem->userspace_addr);
230
231 /* Setup Guest PMAP table */
232 if (!kvm->arch.guest_pmap) {
233 if (mem->slot == 0)
234 npages = mem->memory_size >> PAGE_SHIFT;
235
236 if (npages) {
237 kvm->arch.guest_pmap_npages = npages;
238 kvm->arch.guest_pmap =
239 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
240
241 if (!kvm->arch.guest_pmap) {
242 kvm_err("Failed to allocate guest PMAP");
243 err = -ENOMEM;
244 goto out;
245 }
246
247 kvm_info
248 ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
249 npages, kvm->arch.guest_pmap);
250
251 /* Now setup the page table */
252 for (i = 0; i < npages; i++) {
253 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
254 }
255 }
256 }
257out:
258 return;
259}
260
261void kvm_arch_flush_shadow_all(struct kvm *kvm)
262{
263}
264
265void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
266 struct kvm_memory_slot *slot)
267{
268}
269
270void kvm_arch_flush_shadow(struct kvm *kvm)
271{
272}
273
274struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
275{
276 extern char mips32_exception[], mips32_exceptionEnd[];
277 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
278 int err, size, offset;
279 void *gebase;
280 int i;
281
282 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
283
284 if (!vcpu) {
285 err = -ENOMEM;
286 goto out;
287 }
288
289 err = kvm_vcpu_init(vcpu, kvm, id);
290
291 if (err)
292 goto out_free_cpu;
293
294 kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
295
296 /* Allocate space for host mode exception handlers that handle
297 * guest mode exits
298 */
299 if (cpu_has_veic || cpu_has_vint) {
300 size = 0x200 + VECTORSPACING * 64;
301 } else {
302 size = 0x200;
303 }
304
305 /* Save Linux EBASE */
306 vcpu->arch.host_ebase = (void *)read_c0_ebase();
307
308 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
309
310 if (!gebase) {
311 err = -ENOMEM;
312 goto out_free_cpu;
313 }
314 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
315 ALIGN(size, PAGE_SIZE), gebase);
316
317 /* Save new ebase */
318 vcpu->arch.guest_ebase = gebase;
319
320 /* Copy L1 Guest Exception handler to correct offset */
321
322 /* TLB Refill, EXL = 0 */
323 memcpy(gebase, mips32_exception,
324 mips32_exceptionEnd - mips32_exception);
325
326 /* General Exception Entry point */
327 memcpy(gebase + 0x180, mips32_exception,
328 mips32_exceptionEnd - mips32_exception);
329
330 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
331 for (i = 0; i < 8; i++) {
332 kvm_debug("L1 Vectored handler @ %p\n",
333 gebase + 0x200 + (i * VECTORSPACING));
334 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
335 mips32_exceptionEnd - mips32_exception);
336 }
337
338 /* General handler, relocate to unmapped space for sanity's sake */
339 offset = 0x2000;
340 kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
341 gebase + offset,
342 mips32_GuestExceptionEnd - mips32_GuestException);
343
344 memcpy(gebase + offset, mips32_GuestException,
345 mips32_GuestExceptionEnd - mips32_GuestException);
346
347 /* Invalidate the icache for these ranges */
348 mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
349
350 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
351 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
352
353 if (!vcpu->arch.kseg0_commpage) {
354 err = -ENOMEM;
355 goto out_free_gebase;
356 }
357
358 kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
359 kvm_mips_commpage_init(vcpu);
360
361 /* Init */
362 vcpu->arch.last_sched_cpu = -1;
363
364 /* Start off the timer */
365 kvm_mips_emulate_count(vcpu);
366
367 return vcpu;
368
369out_free_gebase:
370 kfree(gebase);
371
372out_free_cpu:
373 kfree(vcpu);
374
375out:
376 return ERR_PTR(err);
377}
378
379void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
380{
381 hrtimer_cancel(&vcpu->arch.comparecount_timer);
382
383 kvm_vcpu_uninit(vcpu);
384
385 kvm_mips_dump_stats(vcpu);
386
387 if (vcpu->arch.guest_ebase)
388 kfree(vcpu->arch.guest_ebase);
389
390 if (vcpu->arch.kseg0_commpage)
391 kfree(vcpu->arch.kseg0_commpage);
392
393}
394
395void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
396{
397 kvm_arch_vcpu_free(vcpu);
398}
399
400int
401kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402 struct kvm_guest_debug *dbg)
403{
404 return -EINVAL;
405}
406
407int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
408{
409 int r = 0;
410 sigset_t sigsaved;
411
412 if (vcpu->sigset_active)
413 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
414
415 if (vcpu->mmio_needed) {
416 if (!vcpu->mmio_is_write)
417 kvm_mips_complete_mmio_load(vcpu, run);
418 vcpu->mmio_needed = 0;
419 }
420
421 /* Check if we have any exceptions/interrupts pending */
422 kvm_mips_deliver_interrupts(vcpu,
423 kvm_read_c0_guest_cause(vcpu->arch.cop0));
424
425 local_irq_disable();
426 kvm_guest_enter();
427
428 r = __kvm_mips_vcpu_run(run, vcpu);
429
430 kvm_guest_exit();
431 local_irq_enable();
432
433 if (vcpu->sigset_active)
434 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
435
436 return r;
437}
438
439int
440kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
441{
442 int intr = (int)irq->irq;
443 struct kvm_vcpu *dvcpu = NULL;
444
445 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
446 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
447 (int)intr);
448
449 if (irq->cpu == -1)
450 dvcpu = vcpu;
451 else
452 dvcpu = vcpu->kvm->vcpus[irq->cpu];
453
454 if (intr == 2 || intr == 3 || intr == 4) {
455 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
456
457 } else if (intr == -2 || intr == -3 || intr == -4) {
458 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
459 } else {
460 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
461 irq->cpu, irq->irq);
462 return -EINVAL;
463 }
464
465 dvcpu->arch.wait = 0;
466
467 if (waitqueue_active(&dvcpu->wq)) {
468 wake_up_interruptible(&dvcpu->wq);
469 }
470
471 return 0;
472}
473
474int
475kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476 struct kvm_mp_state *mp_state)
477{
478 return -EINVAL;
479}
480
481int
482kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 struct kvm_mp_state *mp_state)
484{
485 return -EINVAL;
486}
487
488long
489kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
490{
491 struct kvm_vcpu *vcpu = filp->private_data;
492 void __user *argp = (void __user *)arg;
493 long r;
494 int intr;
495
496 switch (ioctl) {
497 case KVM_NMI:
498 /* Treat the NMI as a CPU reset */
499 r = kvm_mips_reset_vcpu(vcpu);
500 break;
501 case KVM_INTERRUPT:
502 {
503 struct kvm_mips_interrupt irq;
504 r = -EFAULT;
505 if (copy_from_user(&irq, argp, sizeof(irq)))
506 goto out;
507
508 intr = (int)irq.irq;
509
510 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
511 irq.irq);
512
513 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
514 break;
515 }
516 default:
517 r = -EINVAL;
518 }
519
520out:
521 return r;
522}
523
524/*
525 * Get (and clear) the dirty memory log for a memory slot.
526 */
527int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
528{
529 struct kvm_memory_slot *memslot;
530 unsigned long ga, ga_end;
531 int is_dirty = 0;
532 int r;
533 unsigned long n;
534
535 mutex_lock(&kvm->slots_lock);
536
537 r = kvm_get_dirty_log(kvm, log, &is_dirty);
538 if (r)
539 goto out;
540
541 /* If nothing is dirty, don't bother messing with page tables. */
542 if (is_dirty) {
543 memslot = &kvm->memslots->memslots[log->slot];
544
545 ga = memslot->base_gfn << PAGE_SHIFT;
546 ga_end = ga + (memslot->npages << PAGE_SHIFT);
547
548 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
549 ga_end);
550
551 n = kvm_dirty_bitmap_bytes(memslot);
552 memset(memslot->dirty_bitmap, 0, n);
553 }
554
555 r = 0;
556out:
557 mutex_unlock(&kvm->slots_lock);
558 return r;
559
560}
561
562long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
563{
564 long r;
565
566 switch (ioctl) {
567 default:
568 r = -EINVAL;
569 }
570
571 return r;
572}
573
574int kvm_arch_init(void *opaque)
575{
576 int ret;
577
578 if (kvm_mips_callbacks) {
579 kvm_err("kvm: module already exists\n");
580 return -EEXIST;
581 }
582
583 ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
584
585 return ret;
586}
587
588void kvm_arch_exit(void)
589{
590 kvm_mips_callbacks = NULL;
591}
592
593int
594kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
595{
596 return -ENOTSUPP;
597}
598
599int
600kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
601{
602 return -ENOTSUPP;
603}
604
605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
606{
607 return 0;
608}
609
610int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
611{
612 return -ENOTSUPP;
613}
614
615int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
616{
617 return -ENOTSUPP;
618}
619
620int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
621{
622 return VM_FAULT_SIGBUS;
623}
624
625int kvm_dev_ioctl_check_extension(long ext)
626{
627 int r;
628
629 switch (ext) {
630 case KVM_CAP_COALESCED_MMIO:
631 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
632 break;
633 default:
634 r = 0;
635 break;
636 }
637 return r;
638
639}
640
641int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
642{
643 return kvm_mips_pending_timer(vcpu);
644}
645
646int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
647{
648 int i;
649 struct mips_coproc *cop0;
650
651 if (!vcpu)
652 return -1;
653
654 printk("VCPU Register Dump:\n");
655 printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
656 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
657
658 for (i = 0; i < 32; i += 4) {
659 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
660 vcpu->arch.gprs[i],
661 vcpu->arch.gprs[i + 1],
662 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
663 }
664 printk("\thi: 0x%08lx\n", vcpu->arch.hi);
665 printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
666
667 cop0 = vcpu->arch.cop0;
668 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
669 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
670
671 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
672
673 return 0;
674}
675
676int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
677{
678 int i;
679
680 for (i = 0; i < 32; i++)
681 vcpu->arch.gprs[i] = regs->gprs[i];
682
683 vcpu->arch.hi = regs->hi;
684 vcpu->arch.lo = regs->lo;
685 vcpu->arch.pc = regs->pc;
686
687 return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
688}
689
690int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
691{
692 int i;
693
694 for (i = 0; i < 32; i++)
695 regs->gprs[i] = vcpu->arch.gprs[i];
696
697 regs->hi = vcpu->arch.hi;
698 regs->lo = vcpu->arch.lo;
699 regs->pc = vcpu->arch.pc;
700
701 return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
702}
703
704void kvm_mips_comparecount_func(unsigned long data)
705{
706 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
707
708 kvm_mips_callbacks->queue_timer_int(vcpu);
709
710 vcpu->arch.wait = 0;
711 if (waitqueue_active(&vcpu->wq)) {
712 wake_up_interruptible(&vcpu->wq);
713 }
714}
715
716/*
717 * low level hrtimer wake routine.
718 */
719enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
720{
721 struct kvm_vcpu *vcpu;
722
723 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
724 kvm_mips_comparecount_func((unsigned long) vcpu);
725 hrtimer_forward_now(&vcpu->arch.comparecount_timer,
726 ktime_set(0, MS_TO_NS(10)));
727 return HRTIMER_RESTART;
728}
729
730int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
731{
732 kvm_mips_callbacks->vcpu_init(vcpu);
733 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
734 HRTIMER_MODE_REL);
735 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
736 kvm_mips_init_shadow_tlb(vcpu);
737 return 0;
738}
739
740void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
741{
742 return;
743}
744
745int
746kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
747{
748 return 0;
749}
750
751/* Initial guest state */
752int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
753{
754 return kvm_mips_callbacks->vcpu_setup(vcpu);
755}
756
757static
758void kvm_mips_set_c0_status(void)
759{
760 uint32_t status = read_c0_status();
761
762 if (cpu_has_fpu)
763 status |= (ST0_CU1);
764
765 if (cpu_has_dsp)
766 status |= (ST0_MX);
767
768 write_c0_status(status);
769 ehb();
770}
771
772/*
773 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
774 */
775int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
776{
777 uint32_t cause = vcpu->arch.host_cp0_cause;
778 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
779 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
780 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
781 enum emulation_result er = EMULATE_DONE;
782 int ret = RESUME_GUEST;
783
784 /* Set a default exit reason */
785 run->exit_reason = KVM_EXIT_UNKNOWN;
786 run->ready_for_interrupt_injection = 1;
787
788 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
789 kvm_mips_set_c0_status();
790
791 local_irq_enable();
792
793 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
794 cause, opc, run, vcpu);
795
796 /* Do a privilege check, if in UM most of these exit conditions end up
797 * causing an exception to be delivered to the Guest Kernel
798 */
799 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
800 if (er == EMULATE_PRIV_FAIL) {
801 goto skip_emul;
802 } else if (er == EMULATE_FAIL) {
803 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
804 ret = RESUME_HOST;
805 goto skip_emul;
806 }
807
808 switch (exccode) {
809 case T_INT:
810 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
811
812 ++vcpu->stat.int_exits;
813 trace_kvm_exit(vcpu, INT_EXITS);
814
815 if (need_resched()) {
816 cond_resched();
817 }
818
819 ret = RESUME_GUEST;
820 break;
821
822 case T_COP_UNUSABLE:
823 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
824
825 ++vcpu->stat.cop_unusable_exits;
826 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
827 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
828 /* XXXKYMA: Might need to return to user space */
829 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
830 ret = RESUME_HOST;
831 }
832 break;
833
834 case T_TLB_MOD:
835 ++vcpu->stat.tlbmod_exits;
836 trace_kvm_exit(vcpu, TLBMOD_EXITS);
837 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
838 break;
839
840 case T_TLB_ST_MISS:
841 kvm_debug
842 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
843 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
844 badvaddr);
845
846 ++vcpu->stat.tlbmiss_st_exits;
847 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
848 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
849 break;
850
851 case T_TLB_LD_MISS:
852 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
853 cause, opc, badvaddr);
854
855 ++vcpu->stat.tlbmiss_ld_exits;
856 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
857 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
858 break;
859
860 case T_ADDR_ERR_ST:
861 ++vcpu->stat.addrerr_st_exits;
862 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
863 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
864 break;
865
866 case T_ADDR_ERR_LD:
867 ++vcpu->stat.addrerr_ld_exits;
868 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
869 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
870 break;
871
872 case T_SYSCALL:
873 ++vcpu->stat.syscall_exits;
874 trace_kvm_exit(vcpu, SYSCALL_EXITS);
875 ret = kvm_mips_callbacks->handle_syscall(vcpu);
876 break;
877
878 case T_RES_INST:
879 ++vcpu->stat.resvd_inst_exits;
880 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
881 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
882 break;
883
884 case T_BREAK:
885 ++vcpu->stat.break_inst_exits;
886 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
887 ret = kvm_mips_callbacks->handle_break(vcpu);
888 break;
889
890 default:
891 kvm_err
892 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
893 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
894 kvm_read_c0_guest_status(vcpu->arch.cop0));
895 kvm_arch_vcpu_dump_regs(vcpu);
896 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
897 ret = RESUME_HOST;
898 break;
899
900 }
901
902skip_emul:
903 local_irq_disable();
904
905 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
906 kvm_mips_deliver_interrupts(vcpu, cause);
907
908 if (!(ret & RESUME_HOST)) {
909 /* Only check for signals if not already exiting to userspace */
910 if (signal_pending(current)) {
911 run->exit_reason = KVM_EXIT_INTR;
912 ret = (-EINTR << 2) | RESUME_HOST;
913 ++vcpu->stat.signal_exits;
914 trace_kvm_exit(vcpu, SIGNAL_EXITS);
915 }
916 }
917
918 return ret;
919}
920
921int __init kvm_mips_init(void)
922{
923 int ret;
924
925 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
926
927 if (ret)
928 return ret;
929
930 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
931 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
932 * to avoid the possibility of double faulting. The issue is that the TLB code
933 * references routines that are part of the the KVM module,
934 * which are only available once the module is loaded.
935 */
936 kvm_mips_gfn_to_pfn = gfn_to_pfn;
937 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
938 kvm_mips_is_error_pfn = is_error_pfn;
939
940 pr_info("KVM/MIPS Initialized\n");
941 return 0;
942}
943
944void __exit kvm_mips_exit(void)
945{
946 kvm_exit();
947
948 kvm_mips_gfn_to_pfn = NULL;
949 kvm_mips_release_pfn_clean = NULL;
950 kvm_mips_is_error_pfn = NULL;
951
952 pr_info("KVM/MIPS unloaded\n");
953}
954
955module_init(kvm_mips_init);
956module_exit(kvm_mips_exit);
957
958EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644
index 000000000000..a4a8c85cc8f7
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_comm.h
@@ -0,0 +1,23 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: commpage: mapped into get kernel space
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#ifndef __KVM_MIPS_COMMPAGE_H__
13#define __KVM_MIPS_COMMPAGE_H__
14
15struct kvm_mips_commpage {
16 struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */
17};
18
19#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
20
21extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
22
23#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644
index 000000000000..3873b1ecc40f
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_commpage.c
@@ -0,0 +1,37 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* commpage, currently used for Virtual COP0 registers.
7* Mapped into the guest kernel @ 0x0.
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <asm/page.h>
20#include <asm/cacheflush.h>
21#include <asm/mmu_context.h>
22
23#include <linux/kvm_host.h>
24
25#include "kvm_mips_comm.h"
26
27void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
28{
29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
30 memset(page, 0, sizeof(struct kvm_mips_commpage));
31
32 /* Specific init values for fields */
33 vcpu->arch.cop0 = &page->cop0;
34 memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
35
36 return;
37}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644
index 000000000000..96528e2d1ea6
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_dyntrans.c
@@ -0,0 +1,149 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/kvm_host.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19
20#include "kvm_mips_comm.h"
21
22#define SYNCI_TEMPLATE 0x041f0000
23#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
24#define SYNCI_OFFSET ((x) & 0xffff)
25
26#define LW_TEMPLATE 0x8c000000
27#define CLEAR_TEMPLATE 0x00000020
28#define SW_TEMPLATE 0xac000000
29
30int
31kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
32 struct kvm_vcpu *vcpu)
33{
34 int result = 0;
35 unsigned long kseg0_opc;
36 uint32_t synci_inst = 0x0;
37
38 /* Replace the CACHE instruction, with a NOP */
39 kseg0_opc =
40 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
41 (vcpu, (unsigned long) opc));
42 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
43 mips32_SyncICache(kseg0_opc, 32);
44
45 return result;
46}
47
48/*
49 * Address based CACHE instructions are transformed into synci(s). A little heavy
50 * for just D-cache invalidates, but avoids an expensive trap
51 */
52int
53kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
54 struct kvm_vcpu *vcpu)
55{
56 int result = 0;
57 unsigned long kseg0_opc;
58 uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
59
60 base = (inst >> 21) & 0x1f;
61 offset = inst & 0xffff;
62 synci_inst |= (base << 21);
63 synci_inst |= offset;
64
65 kseg0_opc =
66 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
67 (vcpu, (unsigned long) opc));
68 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
69 mips32_SyncICache(kseg0_opc, 32);
70
71 return result;
72}
73
74int
75kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
76{
77 int32_t rt, rd, sel;
78 uint32_t mfc0_inst;
79 unsigned long kseg0_opc, flags;
80
81 rt = (inst >> 16) & 0x1f;
82 rd = (inst >> 11) & 0x1f;
83 sel = inst & 0x7;
84
85 if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
86 mfc0_inst = CLEAR_TEMPLATE;
87 mfc0_inst |= ((rt & 0x1f) << 16);
88 } else {
89 mfc0_inst = LW_TEMPLATE;
90 mfc0_inst |= ((rt & 0x1f) << 16);
91 mfc0_inst |=
92 offsetof(struct mips_coproc,
93 reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
94 cop0);
95 }
96
97 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
98 kseg0_opc =
99 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
100 (vcpu, (unsigned long) opc));
101 memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
102 mips32_SyncICache(kseg0_opc, 32);
103 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
104 local_irq_save(flags);
105 memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
106 mips32_SyncICache((unsigned long) opc, 32);
107 local_irq_restore(flags);
108 } else {
109 kvm_err("%s: Invalid address: %p\n", __func__, opc);
110 return -EFAULT;
111 }
112
113 return 0;
114}
115
116int
117kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
118{
119 int32_t rt, rd, sel;
120 uint32_t mtc0_inst = SW_TEMPLATE;
121 unsigned long kseg0_opc, flags;
122
123 rt = (inst >> 16) & 0x1f;
124 rd = (inst >> 11) & 0x1f;
125 sel = inst & 0x7;
126
127 mtc0_inst |= ((rt & 0x1f) << 16);
128 mtc0_inst |=
129 offsetof(struct mips_coproc,
130 reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
131
132 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
133 kseg0_opc =
134 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
135 (vcpu, (unsigned long) opc));
136 memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
137 mips32_SyncICache(kseg0_opc, 32);
138 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
139 local_irq_save(flags);
140 memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
141 mips32_SyncICache((unsigned long) opc, 32);
142 local_irq_restore(flags);
143 } else {
144 kvm_err("%s: Invalid address: %p\n", __func__, opc);
145 return -EFAULT;
146 }
147
148 return 0;
149}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644
index 000000000000..2b2bac9a40aa
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -0,0 +1,1826 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Instruction/Exception emulation
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/kvm_host.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <linux/random.h>
20#include <asm/page.h>
21#include <asm/cacheflush.h>
22#include <asm/cpu-info.h>
23#include <asm/mmu_context.h>
24#include <asm/tlbflush.h>
25#include <asm/inst.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#include "kvm_mips_opcode.h"
32#include "kvm_mips_int.h"
33#include "kvm_mips_comm.h"
34
35#include "trace.h"
36
37/*
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
40 */
41unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42 unsigned long instpc)
43{
44 unsigned int dspcontrol;
45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch;
47 long epc = instpc;
48 long nextpc = KVM_INVALID_INST;
49
50 if (epc & 3)
51 goto unaligned;
52
53 /*
54 * Read the instruction
55 */
56 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58 if (insn.word == KVM_INVALID_INST)
59 return KVM_INVALID_INST;
60
61 switch (insn.i_format.opcode) {
62 /*
63 * jr and jalr are in r_format format.
64 */
65 case spec_op:
66 switch (insn.r_format.func) {
67 case jalr_op:
68 arch->gprs[insn.r_format.rd] = epc + 8;
69 /* Fall through */
70 case jr_op:
71 nextpc = arch->gprs[insn.r_format.rs];
72 break;
73 }
74 break;
75
76 /*
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80 */
81 case bcond_op:
82 switch (insn.i_format.rt) {
83 case bltz_op:
84 case bltzl_op:
85 if ((long)arch->gprs[insn.i_format.rs] < 0)
86 epc = epc + 4 + (insn.i_format.simmediate << 2);
87 else
88 epc += 8;
89 nextpc = epc;
90 break;
91
92 case bgez_op:
93 case bgezl_op:
94 if ((long)arch->gprs[insn.i_format.rs] >= 0)
95 epc = epc + 4 + (insn.i_format.simmediate << 2);
96 else
97 epc += 8;
98 nextpc = epc;
99 break;
100
101 case bltzal_op:
102 case bltzall_op:
103 arch->gprs[31] = epc + 8;
104 if ((long)arch->gprs[insn.i_format.rs] < 0)
105 epc = epc + 4 + (insn.i_format.simmediate << 2);
106 else
107 epc += 8;
108 nextpc = epc;
109 break;
110
111 case bgezal_op:
112 case bgezall_op:
113 arch->gprs[31] = epc + 8;
114 if ((long)arch->gprs[insn.i_format.rs] >= 0)
115 epc = epc + 4 + (insn.i_format.simmediate << 2);
116 else
117 epc += 8;
118 nextpc = epc;
119 break;
120 case bposge32_op:
121 if (!cpu_has_dsp)
122 goto sigill;
123
124 dspcontrol = rddsp(0x01);
125
126 if (dspcontrol >= 32) {
127 epc = epc + 4 + (insn.i_format.simmediate << 2);
128 } else
129 epc += 8;
130 nextpc = epc;
131 break;
132 }
133 break;
134
135 /*
136 * These are unconditional and in j_format.
137 */
138 case jal_op:
139 arch->gprs[31] = instpc + 8;
140 case j_op:
141 epc += 4;
142 epc >>= 28;
143 epc <<= 28;
144 epc |= (insn.j_format.target << 2);
145 nextpc = epc;
146 break;
147
148 /*
149 * These are conditional and in i_format.
150 */
151 case beq_op:
152 case beql_op:
153 if (arch->gprs[insn.i_format.rs] ==
154 arch->gprs[insn.i_format.rt])
155 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 else
157 epc += 8;
158 nextpc = epc;
159 break;
160
161 case bne_op:
162 case bnel_op:
163 if (arch->gprs[insn.i_format.rs] !=
164 arch->gprs[insn.i_format.rt])
165 epc = epc + 4 + (insn.i_format.simmediate << 2);
166 else
167 epc += 8;
168 nextpc = epc;
169 break;
170
171 case blez_op: /* not really i_format */
172 case blezl_op:
173 /* rt field assumed to be zero */
174 if ((long)arch->gprs[insn.i_format.rs] <= 0)
175 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 else
177 epc += 8;
178 nextpc = epc;
179 break;
180
181 case bgtz_op:
182 case bgtzl_op:
183 /* rt field assumed to be zero */
184 if ((long)arch->gprs[insn.i_format.rs] > 0)
185 epc = epc + 4 + (insn.i_format.simmediate << 2);
186 else
187 epc += 8;
188 nextpc = epc;
189 break;
190
191 /*
192 * And now the FPA/cp1 branch instructions.
193 */
194 case cop1_op:
195 printk("%s: unsupported cop1_op\n", __func__);
196 break;
197 }
198
199 return nextpc;
200
201unaligned:
202 printk("%s: unaligned epc\n", __func__);
203 return nextpc;
204
205sigill:
206 printk("%s: DSP branch but not DSP ASE\n", __func__);
207 return nextpc;
208}
209
210enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
211{
212 unsigned long branch_pc;
213 enum emulation_result er = EMULATE_DONE;
214
215 if (cause & CAUSEF_BD) {
216 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
217 if (branch_pc == KVM_INVALID_INST) {
218 er = EMULATE_FAIL;
219 } else {
220 vcpu->arch.pc = branch_pc;
221 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
222 }
223 } else
224 vcpu->arch.pc += 4;
225
226 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227
228 return er;
229}
230
231/* Everytime the compare register is written to, we need to decide when to fire
232 * the timer that represents timer ticks to the GUEST.
233 *
234 */
235enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
236{
237 struct mips_coproc *cop0 = vcpu->arch.cop0;
238 enum emulation_result er = EMULATE_DONE;
239
240 /* If COUNT is enabled */
241 if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
242 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
243 hrtimer_start(&vcpu->arch.comparecount_timer,
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
245 } else {
246 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
247 }
248
249 return er;
250}
251
252enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
253{
254 struct mips_coproc *cop0 = vcpu->arch.cop0;
255 enum emulation_result er = EMULATE_DONE;
256
257 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
258 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
259 kvm_read_c0_guest_epc(cop0));
260 kvm_clear_c0_guest_status(cop0, ST0_EXL);
261 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
262
263 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
264 kvm_clear_c0_guest_status(cop0, ST0_ERL);
265 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
266 } else {
267 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
268 vcpu->arch.pc);
269 er = EMULATE_FAIL;
270 }
271
272 return er;
273}
274
275enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
276{
277 enum emulation_result er = EMULATE_DONE;
278
279 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
280 vcpu->arch.pending_exceptions);
281
282 ++vcpu->stat.wait_exits;
283 trace_kvm_exit(vcpu, WAIT_EXITS);
284 if (!vcpu->arch.pending_exceptions) {
285 vcpu->arch.wait = 1;
286 kvm_vcpu_block(vcpu);
287
288 /* We we are runnable, then definitely go off to user space to check if any
289 * I/O interrupts are pending.
290 */
291 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
292 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
293 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
294 }
295 }
296
297 return er;
298}
299
300/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301 * this, if things ever change
302 */
303enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
304{
305 struct mips_coproc *cop0 = vcpu->arch.cop0;
306 enum emulation_result er = EMULATE_FAIL;
307 uint32_t pc = vcpu->arch.pc;
308
309 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
310 return er;
311}
312
313/* Write Guest TLB Entry @ Index */
314enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
315{
316 struct mips_coproc *cop0 = vcpu->arch.cop0;
317 int index = kvm_read_c0_guest_index(cop0);
318 enum emulation_result er = EMULATE_DONE;
319 struct kvm_mips_tlb *tlb = NULL;
320 uint32_t pc = vcpu->arch.pc;
321
322 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
323 printk("%s: illegal index: %d\n", __func__, index);
324 printk
325 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326 pc, index, kvm_read_c0_guest_entryhi(cop0),
327 kvm_read_c0_guest_entrylo0(cop0),
328 kvm_read_c0_guest_entrylo1(cop0),
329 kvm_read_c0_guest_pagemask(cop0));
330 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
331 }
332
333 tlb = &vcpu->arch.guest_tlb[index];
334#if 1
335 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
337#endif
338
339 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
340 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
341 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
342 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
343
344 kvm_debug
345 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346 pc, index, kvm_read_c0_guest_entryhi(cop0),
347 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
348 kvm_read_c0_guest_pagemask(cop0));
349
350 return er;
351}
352
353/* Write Guest TLB Entry @ Random Index */
354enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
355{
356 struct mips_coproc *cop0 = vcpu->arch.cop0;
357 enum emulation_result er = EMULATE_DONE;
358 struct kvm_mips_tlb *tlb = NULL;
359 uint32_t pc = vcpu->arch.pc;
360 int index;
361
362#if 1
363 get_random_bytes(&index, sizeof(index));
364 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
365#else
366 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
367#endif
368
369 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
370 printk("%s: illegal index: %d\n", __func__, index);
371 return EMULATE_FAIL;
372 }
373
374 tlb = &vcpu->arch.guest_tlb[index];
375
376#if 1
377 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
379#endif
380
381 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
382 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
383 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
384 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
385
386 kvm_debug
387 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388 pc, index, kvm_read_c0_guest_entryhi(cop0),
389 kvm_read_c0_guest_entrylo0(cop0),
390 kvm_read_c0_guest_entrylo1(cop0));
391
392 return er;
393}
394
395enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
396{
397 struct mips_coproc *cop0 = vcpu->arch.cop0;
398 long entryhi = kvm_read_c0_guest_entryhi(cop0);
399 enum emulation_result er = EMULATE_DONE;
400 uint32_t pc = vcpu->arch.pc;
401 int index = -1;
402
403 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
404
405 kvm_write_c0_guest_index(cop0, index);
406
407 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
408 index);
409
410 return er;
411}
412
413enum emulation_result
414kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
415 struct kvm_run *run, struct kvm_vcpu *vcpu)
416{
417 struct mips_coproc *cop0 = vcpu->arch.cop0;
418 enum emulation_result er = EMULATE_DONE;
419 int32_t rt, rd, copz, sel, co_bit, op;
420 uint32_t pc = vcpu->arch.pc;
421 unsigned long curr_pc;
422
423 /*
424 * Update PC and hold onto current PC in case there is
425 * an error and we want to rollback the PC
426 */
427 curr_pc = vcpu->arch.pc;
428 er = update_pc(vcpu, cause);
429 if (er == EMULATE_FAIL) {
430 return er;
431 }
432
433 copz = (inst >> 21) & 0x1f;
434 rt = (inst >> 16) & 0x1f;
435 rd = (inst >> 11) & 0x1f;
436 sel = inst & 0x7;
437 co_bit = (inst >> 25) & 1;
438
439 /* Verify that the register is valid */
440 if (rd > MIPS_CP0_DESAVE) {
441 printk("Invalid rd: %d\n", rd);
442 er = EMULATE_FAIL;
443 goto done;
444 }
445
446 if (co_bit) {
447 op = (inst) & 0xff;
448
449 switch (op) {
450 case tlbr_op: /* Read indexed TLB entry */
451 er = kvm_mips_emul_tlbr(vcpu);
452 break;
453 case tlbwi_op: /* Write indexed */
454 er = kvm_mips_emul_tlbwi(vcpu);
455 break;
456 case tlbwr_op: /* Write random */
457 er = kvm_mips_emul_tlbwr(vcpu);
458 break;
459 case tlbp_op: /* TLB Probe */
460 er = kvm_mips_emul_tlbp(vcpu);
461 break;
462 case rfe_op:
463 printk("!!!COP0_RFE!!!\n");
464 break;
465 case eret_op:
466 er = kvm_mips_emul_eret(vcpu);
467 goto dont_update_pc;
468 break;
469 case wait_op:
470 er = kvm_mips_emul_wait(vcpu);
471 break;
472 }
473 } else {
474 switch (copz) {
475 case mfc_op:
476#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
477 cop0->stat[rd][sel]++;
478#endif
479 /* Get reg */
480 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
481 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
482 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
483 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
484 vcpu->arch.gprs[rt] = 0x0;
485#ifdef CONFIG_KVM_MIPS_DYN_TRANS
486 kvm_mips_trans_mfc0(inst, opc, vcpu);
487#endif
488 }
489 else {
490 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
491
492#ifdef CONFIG_KVM_MIPS_DYN_TRANS
493 kvm_mips_trans_mfc0(inst, opc, vcpu);
494#endif
495 }
496
497 kvm_debug
498 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
499 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
500
501 break;
502
503 case dmfc_op:
504 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
505 break;
506
507 case mtc_op:
508#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
509 cop0->stat[rd][sel]++;
510#endif
511 if ((rd == MIPS_CP0_TLB_INDEX)
512 && (vcpu->arch.gprs[rt] >=
513 KVM_MIPS_GUEST_TLB_SIZE)) {
514 printk("Invalid TLB Index: %ld",
515 vcpu->arch.gprs[rt]);
516 er = EMULATE_FAIL;
517 break;
518 }
519#define C0_EBASE_CORE_MASK 0xff
520 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
521 /* Preserve CORE number */
522 kvm_change_c0_guest_ebase(cop0,
523 ~(C0_EBASE_CORE_MASK),
524 vcpu->arch.gprs[rt]);
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0));
527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528 uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
529 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
530 &&
531 (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
532 != nasid)) {
533
534 kvm_debug
535 ("MTCz, change ASID from %#lx to %#lx\n",
536 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
537 ASID_MASK(vcpu->arch.gprs[rt]));
538
539 /* Blow away the shadow host TLBs */
540 kvm_mips_flush_host_tlb(1);
541 }
542 kvm_write_c0_guest_entryhi(cop0,
543 vcpu->arch.gprs[rt]);
544 }
545 /* Are we writing to COUNT */
546 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
547 /* Linux doesn't seem to write into COUNT, we throw an error
548 * if we notice a write to COUNT
549 */
550 /*er = EMULATE_FAIL; */
551 goto done;
552 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
553 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
554 pc, kvm_read_c0_guest_compare(cop0),
555 vcpu->arch.gprs[rt]);
556
557 /* If we are writing to COMPARE */
558 /* Clear pending timer interrupt, if any */
559 kvm_mips_callbacks->dequeue_timer_int(vcpu);
560 kvm_write_c0_guest_compare(cop0,
561 vcpu->arch.gprs[rt]);
562 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
563 kvm_write_c0_guest_status(cop0,
564 vcpu->arch.gprs[rt]);
565 /* Make sure that CU1 and NMI bits are never set */
566 kvm_clear_c0_guest_status(cop0,
567 (ST0_CU1 | ST0_NMI));
568
569#ifdef CONFIG_KVM_MIPS_DYN_TRANS
570 kvm_mips_trans_mtc0(inst, opc, vcpu);
571#endif
572 } else {
573 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
574#ifdef CONFIG_KVM_MIPS_DYN_TRANS
575 kvm_mips_trans_mtc0(inst, opc, vcpu);
576#endif
577 }
578
579 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
580 rd, sel, cop0->reg[rd][sel]);
581 break;
582
583 case dmtc_op:
584 printk
585 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
586 vcpu->arch.pc, rt, rd, sel);
587 er = EMULATE_FAIL;
588 break;
589
590 case mfmcz_op:
591#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
592 cop0->stat[MIPS_CP0_STATUS][0]++;
593#endif
594 if (rt != 0) {
595 vcpu->arch.gprs[rt] =
596 kvm_read_c0_guest_status(cop0);
597 }
598 /* EI */
599 if (inst & 0x20) {
600 kvm_debug("[%#lx] mfmcz_op: EI\n",
601 vcpu->arch.pc);
602 kvm_set_c0_guest_status(cop0, ST0_IE);
603 } else {
604 kvm_debug("[%#lx] mfmcz_op: DI\n",
605 vcpu->arch.pc);
606 kvm_clear_c0_guest_status(cop0, ST0_IE);
607 }
608
609 break;
610
611 case wrpgpr_op:
612 {
613 uint32_t css =
614 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
615 uint32_t pss =
616 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
617 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
618 if (css || pss) {
619 er = EMULATE_FAIL;
620 break;
621 }
622 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
623 vcpu->arch.gprs[rt]);
624 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
625 }
626 break;
627 default:
628 printk
629 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
630 vcpu->arch.pc, copz);
631 er = EMULATE_FAIL;
632 break;
633 }
634 }
635
636done:
637 /*
638 * Rollback PC only if emulation was unsuccessful
639 */
640 if (er == EMULATE_FAIL) {
641 vcpu->arch.pc = curr_pc;
642 }
643
644dont_update_pc:
645 /*
646 * This is for special instructions whose emulation
647 * updates the PC, so do not overwrite the PC under
648 * any circumstances
649 */
650
651 return er;
652}
653
654enum emulation_result
655kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
656 struct kvm_run *run, struct kvm_vcpu *vcpu)
657{
658 enum emulation_result er = EMULATE_DO_MMIO;
659 int32_t op, base, rt, offset;
660 uint32_t bytes;
661 void *data = run->mmio.data;
662 unsigned long curr_pc;
663
664 /*
665 * Update PC and hold onto current PC in case there is
666 * an error and we want to rollback the PC
667 */
668 curr_pc = vcpu->arch.pc;
669 er = update_pc(vcpu, cause);
670 if (er == EMULATE_FAIL)
671 return er;
672
673 rt = (inst >> 16) & 0x1f;
674 base = (inst >> 21) & 0x1f;
675 offset = inst & 0xffff;
676 op = (inst >> 26) & 0x3f;
677
678 switch (op) {
679 case sb_op:
680 bytes = 1;
681 if (bytes > sizeof(run->mmio.data)) {
682 kvm_err("%s: bad MMIO length: %d\n", __func__,
683 run->mmio.len);
684 }
685 run->mmio.phys_addr =
686 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
687 host_cp0_badvaddr);
688 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
689 er = EMULATE_FAIL;
690 break;
691 }
692 run->mmio.len = bytes;
693 run->mmio.is_write = 1;
694 vcpu->mmio_needed = 1;
695 vcpu->mmio_is_write = 1;
696 *(u8 *) data = vcpu->arch.gprs[rt];
697 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
698 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
699 *(uint8_t *) data);
700
701 break;
702
703 case sw_op:
704 bytes = 4;
705 if (bytes > sizeof(run->mmio.data)) {
706 kvm_err("%s: bad MMIO length: %d\n", __func__,
707 run->mmio.len);
708 }
709 run->mmio.phys_addr =
710 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
711 host_cp0_badvaddr);
712 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
713 er = EMULATE_FAIL;
714 break;
715 }
716
717 run->mmio.len = bytes;
718 run->mmio.is_write = 1;
719 vcpu->mmio_needed = 1;
720 vcpu->mmio_is_write = 1;
721 *(uint32_t *) data = vcpu->arch.gprs[rt];
722
723 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
724 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
725 vcpu->arch.gprs[rt], *(uint32_t *) data);
726 break;
727
728 case sh_op:
729 bytes = 2;
730 if (bytes > sizeof(run->mmio.data)) {
731 kvm_err("%s: bad MMIO length: %d\n", __func__,
732 run->mmio.len);
733 }
734 run->mmio.phys_addr =
735 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
736 host_cp0_badvaddr);
737 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
738 er = EMULATE_FAIL;
739 break;
740 }
741
742 run->mmio.len = bytes;
743 run->mmio.is_write = 1;
744 vcpu->mmio_needed = 1;
745 vcpu->mmio_is_write = 1;
746 *(uint16_t *) data = vcpu->arch.gprs[rt];
747
748 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
749 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
750 vcpu->arch.gprs[rt], *(uint32_t *) data);
751 break;
752
753 default:
754 printk("Store not yet supported");
755 er = EMULATE_FAIL;
756 break;
757 }
758
759 /*
760 * Rollback PC if emulation was unsuccessful
761 */
762 if (er == EMULATE_FAIL) {
763 vcpu->arch.pc = curr_pc;
764 }
765
766 return er;
767}
768
769enum emulation_result
770kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
771 struct kvm_run *run, struct kvm_vcpu *vcpu)
772{
773 enum emulation_result er = EMULATE_DO_MMIO;
774 int32_t op, base, rt, offset;
775 uint32_t bytes;
776
777 rt = (inst >> 16) & 0x1f;
778 base = (inst >> 21) & 0x1f;
779 offset = inst & 0xffff;
780 op = (inst >> 26) & 0x3f;
781
782 vcpu->arch.pending_load_cause = cause;
783 vcpu->arch.io_gpr = rt;
784
785 switch (op) {
786 case lw_op:
787 bytes = 4;
788 if (bytes > sizeof(run->mmio.data)) {
789 kvm_err("%s: bad MMIO length: %d\n", __func__,
790 run->mmio.len);
791 er = EMULATE_FAIL;
792 break;
793 }
794 run->mmio.phys_addr =
795 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
796 host_cp0_badvaddr);
797 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
798 er = EMULATE_FAIL;
799 break;
800 }
801
802 run->mmio.len = bytes;
803 run->mmio.is_write = 0;
804 vcpu->mmio_needed = 1;
805 vcpu->mmio_is_write = 0;
806 break;
807
808 case lh_op:
809 case lhu_op:
810 bytes = 2;
811 if (bytes > sizeof(run->mmio.data)) {
812 kvm_err("%s: bad MMIO length: %d\n", __func__,
813 run->mmio.len);
814 er = EMULATE_FAIL;
815 break;
816 }
817 run->mmio.phys_addr =
818 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
819 host_cp0_badvaddr);
820 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
821 er = EMULATE_FAIL;
822 break;
823 }
824
825 run->mmio.len = bytes;
826 run->mmio.is_write = 0;
827 vcpu->mmio_needed = 1;
828 vcpu->mmio_is_write = 0;
829
830 if (op == lh_op)
831 vcpu->mmio_needed = 2;
832 else
833 vcpu->mmio_needed = 1;
834
835 break;
836
837 case lbu_op:
838 case lb_op:
839 bytes = 1;
840 if (bytes > sizeof(run->mmio.data)) {
841 kvm_err("%s: bad MMIO length: %d\n", __func__,
842 run->mmio.len);
843 er = EMULATE_FAIL;
844 break;
845 }
846 run->mmio.phys_addr =
847 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
848 host_cp0_badvaddr);
849 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
850 er = EMULATE_FAIL;
851 break;
852 }
853
854 run->mmio.len = bytes;
855 run->mmio.is_write = 0;
856 vcpu->mmio_is_write = 0;
857
858 if (op == lb_op)
859 vcpu->mmio_needed = 2;
860 else
861 vcpu->mmio_needed = 1;
862
863 break;
864
865 default:
866 printk("Load not yet supported");
867 er = EMULATE_FAIL;
868 break;
869 }
870
871 return er;
872}
873
874int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
875{
876 unsigned long offset = (va & ~PAGE_MASK);
877 struct kvm *kvm = vcpu->kvm;
878 unsigned long pa;
879 gfn_t gfn;
880 pfn_t pfn;
881
882 gfn = va >> PAGE_SHIFT;
883
884 if (gfn >= kvm->arch.guest_pmap_npages) {
885 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
886 kvm_mips_dump_host_tlbs();
887 kvm_arch_vcpu_dump_regs(vcpu);
888 return -1;
889 }
890 pfn = kvm->arch.guest_pmap[gfn];
891 pa = (pfn << PAGE_SHIFT) | offset;
892
893 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
894
895 mips32_SyncICache(CKSEG0ADDR(pa), 32);
896 return 0;
897}
898
899#define MIPS_CACHE_OP_INDEX_INV 0x0
900#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
901#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
902#define MIPS_CACHE_OP_IMP 0x3
903#define MIPS_CACHE_OP_HIT_INV 0x4
904#define MIPS_CACHE_OP_FILL_WB_INV 0x5
905#define MIPS_CACHE_OP_HIT_HB 0x6
906#define MIPS_CACHE_OP_FETCH_LOCK 0x7
907
908#define MIPS_CACHE_ICACHE 0x0
909#define MIPS_CACHE_DCACHE 0x1
910#define MIPS_CACHE_SEC 0x3
911
912enum emulation_result
913kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
914 struct kvm_run *run, struct kvm_vcpu *vcpu)
915{
916 struct mips_coproc *cop0 = vcpu->arch.cop0;
917 extern void (*r4k_blast_dcache) (void);
918 extern void (*r4k_blast_icache) (void);
919 enum emulation_result er = EMULATE_DONE;
920 int32_t offset, cache, op_inst, op, base;
921 struct kvm_vcpu_arch *arch = &vcpu->arch;
922 unsigned long va;
923 unsigned long curr_pc;
924
925 /*
926 * Update PC and hold onto current PC in case there is
927 * an error and we want to rollback the PC
928 */
929 curr_pc = vcpu->arch.pc;
930 er = update_pc(vcpu, cause);
931 if (er == EMULATE_FAIL)
932 return er;
933
934 base = (inst >> 21) & 0x1f;
935 op_inst = (inst >> 16) & 0x1f;
936 offset = inst & 0xffff;
937 cache = (inst >> 16) & 0x3;
938 op = (inst >> 18) & 0x7;
939
940 va = arch->gprs[base] + offset;
941
942 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
943 cache, op, base, arch->gprs[base], offset);
944
945 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
946 * the caches entirely by stepping through all the ways/indexes
947 */
948 if (op == MIPS_CACHE_OP_INDEX_INV) {
949 kvm_debug
950 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
951 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
952 arch->gprs[base], offset);
953
954 if (cache == MIPS_CACHE_DCACHE)
955 r4k_blast_dcache();
956 else if (cache == MIPS_CACHE_ICACHE)
957 r4k_blast_icache();
958 else {
959 printk("%s: unsupported CACHE INDEX operation\n",
960 __func__);
961 return EMULATE_FAIL;
962 }
963
964#ifdef CONFIG_KVM_MIPS_DYN_TRANS
965 kvm_mips_trans_cache_index(inst, opc, vcpu);
966#endif
967 goto done;
968 }
969
970 preempt_disable();
971 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
972
973 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
974 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
975 }
976 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
977 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
978 int index;
979
980 /* If an entry already exists then skip */
981 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
982 goto skip_fault;
983 }
984
985 /* If address not in the guest TLB, then give the guest a fault, the
986 * resulting handler will do the right thing
987 */
988 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
989 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
990
991 if (index < 0) {
992 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
993 vcpu->arch.host_cp0_badvaddr = va;
994 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
995 vcpu);
996 preempt_enable();
997 goto dont_update_pc;
998 } else {
999 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1000 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1001 if (!TLB_IS_VALID(*tlb, va)) {
1002 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1003 run, vcpu);
1004 preempt_enable();
1005 goto dont_update_pc;
1006 } else {
1007 /* We fault an entry from the guest tlb to the shadow host TLB */
1008 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1009 NULL,
1010 NULL);
1011 }
1012 }
1013 } else {
1014 printk
1015 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1016 cache, op, base, arch->gprs[base], offset);
1017 er = EMULATE_FAIL;
1018 preempt_enable();
1019 goto dont_update_pc;
1020
1021 }
1022
1023skip_fault:
1024 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1025 if (cache == MIPS_CACHE_DCACHE
1026 && (op == MIPS_CACHE_OP_FILL_WB_INV
1027 || op == MIPS_CACHE_OP_HIT_INV)) {
1028 flush_dcache_line(va);
1029
1030#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1031 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1032 kvm_mips_trans_cache_va(inst, opc, vcpu);
1033#endif
1034 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1035 flush_dcache_line(va);
1036 flush_icache_line(va);
1037
1038#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1039 /* Replace the CACHE instruction, with a SYNCI */
1040 kvm_mips_trans_cache_va(inst, opc, vcpu);
1041#endif
1042 } else {
1043 printk
1044 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1045 cache, op, base, arch->gprs[base], offset);
1046 er = EMULATE_FAIL;
1047 preempt_enable();
1048 goto dont_update_pc;
1049 }
1050
1051 preempt_enable();
1052
1053 dont_update_pc:
1054 /*
1055 * Rollback PC
1056 */
1057 vcpu->arch.pc = curr_pc;
1058 done:
1059 return er;
1060}
1061
1062enum emulation_result
1063kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1064 struct kvm_run *run, struct kvm_vcpu *vcpu)
1065{
1066 enum emulation_result er = EMULATE_DONE;
1067 uint32_t inst;
1068
1069 /*
1070 * Fetch the instruction.
1071 */
1072 if (cause & CAUSEF_BD) {
1073 opc += 1;
1074 }
1075
1076 inst = kvm_get_inst(opc, vcpu);
1077
1078 switch (((union mips_instruction)inst).r_format.opcode) {
1079 case cop0_op:
1080 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1081 break;
1082 case sb_op:
1083 case sh_op:
1084 case sw_op:
1085 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1086 break;
1087 case lb_op:
1088 case lbu_op:
1089 case lhu_op:
1090 case lh_op:
1091 case lw_op:
1092 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1093 break;
1094
1095 case cache_op:
1096 ++vcpu->stat.cache_exits;
1097 trace_kvm_exit(vcpu, CACHE_EXITS);
1098 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1099 break;
1100
1101 default:
1102 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1103 inst);
1104 kvm_arch_vcpu_dump_regs(vcpu);
1105 er = EMULATE_FAIL;
1106 break;
1107 }
1108
1109 return er;
1110}
1111
1112enum emulation_result
1113kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1114 struct kvm_run *run, struct kvm_vcpu *vcpu)
1115{
1116 struct mips_coproc *cop0 = vcpu->arch.cop0;
1117 struct kvm_vcpu_arch *arch = &vcpu->arch;
1118 enum emulation_result er = EMULATE_DONE;
1119
1120 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1121 /* save old pc */
1122 kvm_write_c0_guest_epc(cop0, arch->pc);
1123 kvm_set_c0_guest_status(cop0, ST0_EXL);
1124
1125 if (cause & CAUSEF_BD)
1126 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1127 else
1128 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1129
1130 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1131
1132 kvm_change_c0_guest_cause(cop0, (0xff),
1133 (T_SYSCALL << CAUSEB_EXCCODE));
1134
1135 /* Set PC to the exception entry point */
1136 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1137
1138 } else {
1139 printk("Trying to deliver SYSCALL when EXL is already set\n");
1140 er = EMULATE_FAIL;
1141 }
1142
1143 return er;
1144}
1145
1146enum emulation_result
1147kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1148 struct kvm_run *run, struct kvm_vcpu *vcpu)
1149{
1150 struct mips_coproc *cop0 = vcpu->arch.cop0;
1151 struct kvm_vcpu_arch *arch = &vcpu->arch;
1152 enum emulation_result er = EMULATE_DONE;
1153 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1154 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1155
1156 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1157 /* save old pc */
1158 kvm_write_c0_guest_epc(cop0, arch->pc);
1159 kvm_set_c0_guest_status(cop0, ST0_EXL);
1160
1161 if (cause & CAUSEF_BD)
1162 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1163 else
1164 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1165
1166 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1167 arch->pc);
1168
1169 /* set pc to the exception entry point */
1170 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1171
1172 } else {
1173 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1174 arch->pc);
1175
1176 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1177 }
1178
1179 kvm_change_c0_guest_cause(cop0, (0xff),
1180 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1181
1182 /* setup badvaddr, context and entryhi registers for the guest */
1183 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1184 /* XXXKYMA: is the context register used by linux??? */
1185 kvm_write_c0_guest_entryhi(cop0, entryhi);
1186 /* Blow away the shadow host TLBs */
1187 kvm_mips_flush_host_tlb(1);
1188
1189 return er;
1190}
1191
1192enum emulation_result
1193kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1194 struct kvm_run *run, struct kvm_vcpu *vcpu)
1195{
1196 struct mips_coproc *cop0 = vcpu->arch.cop0;
1197 struct kvm_vcpu_arch *arch = &vcpu->arch;
1198 enum emulation_result er = EMULATE_DONE;
1199 unsigned long entryhi =
1200 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1201 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1202
1203 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1204 /* save old pc */
1205 kvm_write_c0_guest_epc(cop0, arch->pc);
1206 kvm_set_c0_guest_status(cop0, ST0_EXL);
1207
1208 if (cause & CAUSEF_BD)
1209 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1210 else
1211 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1212
1213 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1214 arch->pc);
1215
1216 /* set pc to the exception entry point */
1217 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1218
1219 } else {
1220 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1221 arch->pc);
1222 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1223 }
1224
1225 kvm_change_c0_guest_cause(cop0, (0xff),
1226 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1227
1228 /* setup badvaddr, context and entryhi registers for the guest */
1229 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1230 /* XXXKYMA: is the context register used by linux??? */
1231 kvm_write_c0_guest_entryhi(cop0, entryhi);
1232 /* Blow away the shadow host TLBs */
1233 kvm_mips_flush_host_tlb(1);
1234
1235 return er;
1236}
1237
1238enum emulation_result
1239kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1240 struct kvm_run *run, struct kvm_vcpu *vcpu)
1241{
1242 struct mips_coproc *cop0 = vcpu->arch.cop0;
1243 struct kvm_vcpu_arch *arch = &vcpu->arch;
1244 enum emulation_result er = EMULATE_DONE;
1245 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1246 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1247
1248 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1249 /* save old pc */
1250 kvm_write_c0_guest_epc(cop0, arch->pc);
1251 kvm_set_c0_guest_status(cop0, ST0_EXL);
1252
1253 if (cause & CAUSEF_BD)
1254 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1255 else
1256 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1257
1258 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1259 arch->pc);
1260
1261 /* Set PC to the exception entry point */
1262 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1263 } else {
1264 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1265 arch->pc);
1266 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1267 }
1268
1269 kvm_change_c0_guest_cause(cop0, (0xff),
1270 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1271
1272 /* setup badvaddr, context and entryhi registers for the guest */
1273 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1274 /* XXXKYMA: is the context register used by linux??? */
1275 kvm_write_c0_guest_entryhi(cop0, entryhi);
1276 /* Blow away the shadow host TLBs */
1277 kvm_mips_flush_host_tlb(1);
1278
1279 return er;
1280}
1281
1282enum emulation_result
1283kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1284 struct kvm_run *run, struct kvm_vcpu *vcpu)
1285{
1286 struct mips_coproc *cop0 = vcpu->arch.cop0;
1287 struct kvm_vcpu_arch *arch = &vcpu->arch;
1288 enum emulation_result er = EMULATE_DONE;
1289 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1290 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1291
1292 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1293 /* save old pc */
1294 kvm_write_c0_guest_epc(cop0, arch->pc);
1295 kvm_set_c0_guest_status(cop0, ST0_EXL);
1296
1297 if (cause & CAUSEF_BD)
1298 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1299 else
1300 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1301
1302 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1303 arch->pc);
1304
1305 /* Set PC to the exception entry point */
1306 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1307 } else {
1308 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1309 arch->pc);
1310 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1311 }
1312
1313 kvm_change_c0_guest_cause(cop0, (0xff),
1314 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1315
1316 /* setup badvaddr, context and entryhi registers for the guest */
1317 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1318 /* XXXKYMA: is the context register used by linux??? */
1319 kvm_write_c0_guest_entryhi(cop0, entryhi);
1320 /* Blow away the shadow host TLBs */
1321 kvm_mips_flush_host_tlb(1);
1322
1323 return er;
1324}
1325
1326/* TLBMOD: store into address matching TLB with Dirty bit off */
1327enum emulation_result
1328kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1329 struct kvm_run *run, struct kvm_vcpu *vcpu)
1330{
1331 enum emulation_result er = EMULATE_DONE;
1332
1333#ifdef DEBUG
1334 /*
1335 * If address not in the guest TLB, then we are in trouble
1336 */
1337 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1338 if (index < 0) {
1339 /* XXXKYMA Invalidate and retry */
1340 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1341 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1342 __func__, entryhi);
1343 kvm_mips_dump_guest_tlbs(vcpu);
1344 kvm_mips_dump_host_tlbs();
1345 return EMULATE_FAIL;
1346 }
1347#endif
1348
1349 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1350 return er;
1351}
1352
1353enum emulation_result
1354kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1355 struct kvm_run *run, struct kvm_vcpu *vcpu)
1356{
1357 struct mips_coproc *cop0 = vcpu->arch.cop0;
1358 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1359 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1360 struct kvm_vcpu_arch *arch = &vcpu->arch;
1361 enum emulation_result er = EMULATE_DONE;
1362
1363 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1364 /* save old pc */
1365 kvm_write_c0_guest_epc(cop0, arch->pc);
1366 kvm_set_c0_guest_status(cop0, ST0_EXL);
1367
1368 if (cause & CAUSEF_BD)
1369 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1370 else
1371 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1372
1373 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1374 arch->pc);
1375
1376 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1377 } else {
1378 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1379 arch->pc);
1380 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1381 }
1382
1383 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1384
1385 /* setup badvaddr, context and entryhi registers for the guest */
1386 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1387 /* XXXKYMA: is the context register used by linux??? */
1388 kvm_write_c0_guest_entryhi(cop0, entryhi);
1389 /* Blow away the shadow host TLBs */
1390 kvm_mips_flush_host_tlb(1);
1391
1392 return er;
1393}
1394
1395enum emulation_result
1396kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1397 struct kvm_run *run, struct kvm_vcpu *vcpu)
1398{
1399 struct mips_coproc *cop0 = vcpu->arch.cop0;
1400 struct kvm_vcpu_arch *arch = &vcpu->arch;
1401 enum emulation_result er = EMULATE_DONE;
1402
1403 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1404 /* save old pc */
1405 kvm_write_c0_guest_epc(cop0, arch->pc);
1406 kvm_set_c0_guest_status(cop0, ST0_EXL);
1407
1408 if (cause & CAUSEF_BD)
1409 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1410 else
1411 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1412
1413 }
1414
1415 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1416
1417 kvm_change_c0_guest_cause(cop0, (0xff),
1418 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1419 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1420
1421 return er;
1422}
1423
1424enum emulation_result
1425kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1426 struct kvm_run *run, struct kvm_vcpu *vcpu)
1427{
1428 struct mips_coproc *cop0 = vcpu->arch.cop0;
1429 struct kvm_vcpu_arch *arch = &vcpu->arch;
1430 enum emulation_result er = EMULATE_DONE;
1431
1432 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1433 /* save old pc */
1434 kvm_write_c0_guest_epc(cop0, arch->pc);
1435 kvm_set_c0_guest_status(cop0, ST0_EXL);
1436
1437 if (cause & CAUSEF_BD)
1438 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1439 else
1440 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1441
1442 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1443
1444 kvm_change_c0_guest_cause(cop0, (0xff),
1445 (T_RES_INST << CAUSEB_EXCCODE));
1446
1447 /* Set PC to the exception entry point */
1448 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1449
1450 } else {
1451 kvm_err("Trying to deliver RI when EXL is already set\n");
1452 er = EMULATE_FAIL;
1453 }
1454
1455 return er;
1456}
1457
1458enum emulation_result
1459kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1460 struct kvm_run *run, struct kvm_vcpu *vcpu)
1461{
1462 struct mips_coproc *cop0 = vcpu->arch.cop0;
1463 struct kvm_vcpu_arch *arch = &vcpu->arch;
1464 enum emulation_result er = EMULATE_DONE;
1465
1466 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1467 /* save old pc */
1468 kvm_write_c0_guest_epc(cop0, arch->pc);
1469 kvm_set_c0_guest_status(cop0, ST0_EXL);
1470
1471 if (cause & CAUSEF_BD)
1472 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1473 else
1474 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1475
1476 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1477
1478 kvm_change_c0_guest_cause(cop0, (0xff),
1479 (T_BREAK << CAUSEB_EXCCODE));
1480
1481 /* Set PC to the exception entry point */
1482 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1483
1484 } else {
1485 printk("Trying to deliver BP when EXL is already set\n");
1486 er = EMULATE_FAIL;
1487 }
1488
1489 return er;
1490}
1491
1492/*
1493 * ll/sc, rdhwr, sync emulation
1494 */
1495
1496#define OPCODE 0xfc000000
1497#define BASE 0x03e00000
1498#define RT 0x001f0000
1499#define OFFSET 0x0000ffff
1500#define LL 0xc0000000
1501#define SC 0xe0000000
1502#define SPEC0 0x00000000
1503#define SPEC3 0x7c000000
1504#define RD 0x0000f800
1505#define FUNC 0x0000003f
1506#define SYNC 0x0000000f
1507#define RDHWR 0x0000003b
1508
1509enum emulation_result
1510kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1511 struct kvm_run *run, struct kvm_vcpu *vcpu)
1512{
1513 struct mips_coproc *cop0 = vcpu->arch.cop0;
1514 struct kvm_vcpu_arch *arch = &vcpu->arch;
1515 enum emulation_result er = EMULATE_DONE;
1516 unsigned long curr_pc;
1517 uint32_t inst;
1518
1519 /*
1520 * Update PC and hold onto current PC in case there is
1521 * an error and we want to rollback the PC
1522 */
1523 curr_pc = vcpu->arch.pc;
1524 er = update_pc(vcpu, cause);
1525 if (er == EMULATE_FAIL)
1526 return er;
1527
1528 /*
1529 * Fetch the instruction.
1530 */
1531 if (cause & CAUSEF_BD)
1532 opc += 1;
1533
1534 inst = kvm_get_inst(opc, vcpu);
1535
1536 if (inst == KVM_INVALID_INST) {
1537 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1538 return EMULATE_FAIL;
1539 }
1540
1541 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1542 int rd = (inst & RD) >> 11;
1543 int rt = (inst & RT) >> 16;
1544 switch (rd) {
1545 case 0: /* CPU number */
1546 arch->gprs[rt] = 0;
1547 break;
1548 case 1: /* SYNCI length */
1549 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1550 current_cpu_data.icache.linesz);
1551 break;
1552 case 2: /* Read count register */
1553 printk("RDHWR: Cont register\n");
1554 arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1555 break;
1556 case 3: /* Count register resolution */
1557 switch (current_cpu_data.cputype) {
1558 case CPU_20KC:
1559 case CPU_25KF:
1560 arch->gprs[rt] = 1;
1561 break;
1562 default:
1563 arch->gprs[rt] = 2;
1564 }
1565 break;
1566 case 29:
1567#if 1
1568 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1569#else
1570 /* UserLocal not implemented */
1571 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1572#endif
1573 break;
1574
1575 default:
1576 printk("RDHWR not supported\n");
1577 er = EMULATE_FAIL;
1578 break;
1579 }
1580 } else {
1581 printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
1582 er = EMULATE_FAIL;
1583 }
1584
1585 /*
1586 * Rollback PC only if emulation was unsuccessful
1587 */
1588 if (er == EMULATE_FAIL) {
1589 vcpu->arch.pc = curr_pc;
1590 }
1591 return er;
1592}
1593
1594enum emulation_result
1595kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1596{
1597 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1598 enum emulation_result er = EMULATE_DONE;
1599 unsigned long curr_pc;
1600
1601 if (run->mmio.len > sizeof(*gpr)) {
1602 printk("Bad MMIO length: %d", run->mmio.len);
1603 er = EMULATE_FAIL;
1604 goto done;
1605 }
1606
1607 /*
1608 * Update PC and hold onto current PC in case there is
1609 * an error and we want to rollback the PC
1610 */
1611 curr_pc = vcpu->arch.pc;
1612 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1613 if (er == EMULATE_FAIL)
1614 return er;
1615
1616 switch (run->mmio.len) {
1617 case 4:
1618 *gpr = *(int32_t *) run->mmio.data;
1619 break;
1620
1621 case 2:
1622 if (vcpu->mmio_needed == 2)
1623 *gpr = *(int16_t *) run->mmio.data;
1624 else
1625 *gpr = *(int16_t *) run->mmio.data;
1626
1627 break;
1628 case 1:
1629 if (vcpu->mmio_needed == 2)
1630 *gpr = *(int8_t *) run->mmio.data;
1631 else
1632 *gpr = *(u8 *) run->mmio.data;
1633 break;
1634 }
1635
1636 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1637 kvm_debug
1638 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1639 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1640 vcpu->mmio_needed);
1641
1642done:
1643 return er;
1644}
1645
1646static enum emulation_result
1647kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1648 struct kvm_run *run, struct kvm_vcpu *vcpu)
1649{
1650 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1651 struct mips_coproc *cop0 = vcpu->arch.cop0;
1652 struct kvm_vcpu_arch *arch = &vcpu->arch;
1653 enum emulation_result er = EMULATE_DONE;
1654
1655 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1656 /* save old pc */
1657 kvm_write_c0_guest_epc(cop0, arch->pc);
1658 kvm_set_c0_guest_status(cop0, ST0_EXL);
1659
1660 if (cause & CAUSEF_BD)
1661 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1662 else
1663 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1664
1665 kvm_change_c0_guest_cause(cop0, (0xff),
1666 (exccode << CAUSEB_EXCCODE));
1667
1668 /* Set PC to the exception entry point */
1669 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1670 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1671
1672 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1673 exccode, kvm_read_c0_guest_epc(cop0),
1674 kvm_read_c0_guest_badvaddr(cop0));
1675 } else {
1676 printk("Trying to deliver EXC when EXL is already set\n");
1677 er = EMULATE_FAIL;
1678 }
1679
1680 return er;
1681}
1682
1683enum emulation_result
1684kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
1685 struct kvm_run *run, struct kvm_vcpu *vcpu)
1686{
1687 enum emulation_result er = EMULATE_DONE;
1688 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1689 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1690
1691 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1692
1693 if (usermode) {
1694 switch (exccode) {
1695 case T_INT:
1696 case T_SYSCALL:
1697 case T_BREAK:
1698 case T_RES_INST:
1699 break;
1700
1701 case T_COP_UNUSABLE:
1702 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
1703 er = EMULATE_PRIV_FAIL;
1704 break;
1705
1706 case T_TLB_MOD:
1707 break;
1708
1709 case T_TLB_LD_MISS:
1710 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1711 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1712 printk("%s: LD MISS @ %#lx\n", __func__,
1713 badvaddr);
1714 cause &= ~0xff;
1715 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
1716 er = EMULATE_PRIV_FAIL;
1717 }
1718 break;
1719
1720 case T_TLB_ST_MISS:
1721 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1722 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1723 printk("%s: ST MISS @ %#lx\n", __func__,
1724 badvaddr);
1725 cause &= ~0xff;
1726 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
1727 er = EMULATE_PRIV_FAIL;
1728 }
1729 break;
1730
1731 case T_ADDR_ERR_ST:
1732 printk("%s: address error ST @ %#lx\n", __func__,
1733 badvaddr);
1734 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1735 cause &= ~0xff;
1736 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
1737 }
1738 er = EMULATE_PRIV_FAIL;
1739 break;
1740 case T_ADDR_ERR_LD:
1741 printk("%s: address error LD @ %#lx\n", __func__,
1742 badvaddr);
1743 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1744 cause &= ~0xff;
1745 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
1746 }
1747 er = EMULATE_PRIV_FAIL;
1748 break;
1749 default:
1750 er = EMULATE_PRIV_FAIL;
1751 break;
1752 }
1753 }
1754
1755 if (er == EMULATE_PRIV_FAIL) {
1756 kvm_mips_emulate_exc(cause, opc, run, vcpu);
1757 }
1758 return er;
1759}
1760
1761/* User Address (UA) fault, this could happen if
1762 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1763 * case we pass on the fault to the guest kernel and let it handle it.
1764 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1765 * case we inject the TLB from the Guest TLB into the shadow host TLB
1766 */
1767enum emulation_result
1768kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1769 struct kvm_run *run, struct kvm_vcpu *vcpu)
1770{
1771 enum emulation_result er = EMULATE_DONE;
1772 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1773 unsigned long va = vcpu->arch.host_cp0_badvaddr;
1774 int index;
1775
1776 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1777 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
1778
1779 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1780 * Check the Guest TLB, if the entry is not there then send the guest an
1781 * exception. The guest exc handler should then inject an entry into the
1782 * guest TLB
1783 */
1784 index = kvm_mips_guest_tlb_lookup(vcpu,
1785 (va & VPN2_MASK) |
1786 ASID_MASK(kvm_read_c0_guest_entryhi
1787 (vcpu->arch.cop0)));
1788 if (index < 0) {
1789 if (exccode == T_TLB_LD_MISS) {
1790 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
1791 } else if (exccode == T_TLB_ST_MISS) {
1792 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
1793 } else {
1794 printk("%s: invalid exc code: %d\n", __func__, exccode);
1795 er = EMULATE_FAIL;
1796 }
1797 } else {
1798 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1799
1800 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1801 if (!TLB_IS_VALID(*tlb, va)) {
1802 if (exccode == T_TLB_LD_MISS) {
1803 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
1804 vcpu);
1805 } else if (exccode == T_TLB_ST_MISS) {
1806 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
1807 vcpu);
1808 } else {
1809 printk("%s: invalid exc code: %d\n", __func__,
1810 exccode);
1811 er = EMULATE_FAIL;
1812 }
1813 } else {
1814#ifdef DEBUG
1815 kvm_debug
1816 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1817 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
1818#endif
1819 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1820 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
1821 NULL);
1822 }
1823 }
1824
1825 return er;
1826}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644
index 000000000000..1e5de16afe29
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.c
@@ -0,0 +1,243 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Interrupt delivery
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20
21#include <linux/kvm_host.h>
22
23#include "kvm_mips_int.h"
24
25void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
26{
27 set_bit(priority, &vcpu->arch.pending_exceptions);
28}
29
30void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
31{
32 clear_bit(priority, &vcpu->arch.pending_exceptions);
33}
34
35void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
36{
37 /* Cause bits to reflect the pending timer interrupt,
38 * the EXC code will be set when we are actually
39 * delivering the interrupt:
40 */
41 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
42
43 /* Queue up an INT exception for the core */
44 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
45
46}
47
48void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
49{
50 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
51 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
52}
53
54void
55kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
56{
57 int intr = (int)irq->irq;
58
59 /* Cause bits to reflect the pending IO interrupt,
60 * the EXC code will be set when we are actually
61 * delivering the interrupt:
62 */
63 switch (intr) {
64 case 2:
65 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
66 /* Queue up an INT exception for the core */
67 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
68 break;
69
70 case 3:
71 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
72 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
73 break;
74
75 case 4:
76 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
77 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
78 break;
79
80 default:
81 break;
82 }
83
84}
85
86void
87kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
88 struct kvm_mips_interrupt *irq)
89{
90 int intr = (int)irq->irq;
91 switch (intr) {
92 case -2:
93 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
94 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
95 break;
96
97 case -3:
98 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
99 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
100 break;
101
102 case -4:
103 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
104 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
105 break;
106
107 default:
108 break;
109 }
110
111}
112
113/* Deliver the interrupt of the corresponding priority, if possible. */
114int
115kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
116 uint32_t cause)
117{
118 int allowed = 0;
119 uint32_t exccode;
120
121 struct kvm_vcpu_arch *arch = &vcpu->arch;
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123
124 switch (priority) {
125 case MIPS_EXC_INT_TIMER:
126 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
127 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
128 && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
129 allowed = 1;
130 exccode = T_INT;
131 }
132 break;
133
134 case MIPS_EXC_INT_IO:
135 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
136 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
137 && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
138 allowed = 1;
139 exccode = T_INT;
140 }
141 break;
142
143 case MIPS_EXC_INT_IPI_1:
144 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
145 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
146 && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
147 allowed = 1;
148 exccode = T_INT;
149 }
150 break;
151
152 case MIPS_EXC_INT_IPI_2:
153 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
154 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
155 && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
156 allowed = 1;
157 exccode = T_INT;
158 }
159 break;
160
161 default:
162 break;
163 }
164
165 /* Are we allowed to deliver the interrupt ??? */
166 if (allowed) {
167
168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
169 /* save old pc */
170 kvm_write_c0_guest_epc(cop0, arch->pc);
171 kvm_set_c0_guest_status(cop0, ST0_EXL);
172
173 if (cause & CAUSEF_BD)
174 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
175 else
176 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
177
178 kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
179
180 } else
181 kvm_err("Trying to deliver interrupt when EXL is already set\n");
182
183 kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
184 (exccode << CAUSEB_EXCCODE));
185
186 /* XXXSL Set PC to the interrupt exception entry point */
187 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
188 arch->pc = KVM_GUEST_KSEG0 + 0x200;
189 else
190 arch->pc = KVM_GUEST_KSEG0 + 0x180;
191
192 clear_bit(priority, &vcpu->arch.pending_exceptions);
193 }
194
195 return allowed;
196}
197
198int
199kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
200 uint32_t cause)
201{
202 return 1;
203}
204
205void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
206{
207 unsigned long *pending = &vcpu->arch.pending_exceptions;
208 unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
209 unsigned int priority;
210
211 if (!(*pending) && !(*pending_clr))
212 return;
213
214 priority = __ffs(*pending_clr);
215 while (priority <= MIPS_EXC_MAX) {
216 if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
217 if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
218 break;
219 }
220
221 priority = find_next_bit(pending_clr,
222 BITS_PER_BYTE * sizeof(*pending_clr),
223 priority + 1);
224 }
225
226 priority = __ffs(*pending);
227 while (priority <= MIPS_EXC_MAX) {
228 if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
229 if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
230 break;
231 }
232
233 priority = find_next_bit(pending,
234 BITS_PER_BYTE * sizeof(*pending),
235 priority + 1);
236 }
237
238}
239
240int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
241{
242 return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
243}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644
index 000000000000..20da7d29eede
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.h
@@ -0,0 +1,49 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Interrupts
7* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8* Authors: Sanjay Lal <sanjayl@kymasys.com>
9*/
10
11/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
12 * for the guest in the order specified by their priorities
13 */
14
15#define MIPS_EXC_RESET 0
16#define MIPS_EXC_SRESET 1
17#define MIPS_EXC_DEBUG_ST 2
18#define MIPS_EXC_DEBUG 3
19#define MIPS_EXC_DDB 4
20#define MIPS_EXC_NMI 5
21#define MIPS_EXC_MCHK 6
22#define MIPS_EXC_INT_TIMER 7
23#define MIPS_EXC_INT_IO 8
24#define MIPS_EXC_EXECUTE 9
25#define MIPS_EXC_INT_IPI_1 10
26#define MIPS_EXC_INT_IPI_2 11
27#define MIPS_EXC_MAX 12
28/* XXXSL More to follow */
29
30#define C_TI (_ULCAST_(1) << 30)
31
32#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
33#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
34
35void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
36void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
37int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
38
39void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
40void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
41void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
42 struct kvm_mips_interrupt *irq);
43void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
44 struct kvm_mips_interrupt *irq);
45int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
46 uint32_t cause);
47int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
48 uint32_t cause);
49void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644
index 000000000000..86d3b4cc348b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_opcode.h
@@ -0,0 +1,24 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10/*
11 * Define opcode values not defined in <asm/isnt.h>
12 */
13
14#ifndef __KVM_MIPS_OPCODE_H__
15#define __KVM_MIPS_OPCODE_H__
16
17/* COP0 Ops */
18#define mfmcz_op 0x0b /* 01011 */
19#define wrpgpr_op 0x0e /* 01110 */
20
21/* COP0 opcodes (only if COP0 and CO=1): */
22#define wait_op 0x20 /* 100000 */
23
24#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644
index 000000000000..075904bcac1b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -0,0 +1,82 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: COP0 access histogram
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/kvm_host.h>
13
14char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
15 "WAIT",
16 "CACHE",
17 "Signal",
18 "Interrupt",
19 "COP0/1 Unusable",
20 "TLB Mod",
21 "TLB Miss (LD)",
22 "TLB Miss (ST)",
23 "Address Err (ST)",
24 "Address Error (LD)",
25 "System Call",
26 "Reserved Inst",
27 "Break Inst",
28 "D-Cache Flushes",
29};
30
31char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
32 "Index",
33 "Random",
34 "EntryLo0",
35 "EntryLo1",
36 "Context",
37 "PG Mask",
38 "Wired",
39 "HWREna",
40 "BadVAddr",
41 "Count",
42 "EntryHI",
43 "Compare",
44 "Status",
45 "Cause",
46 "EXC PC",
47 "PRID",
48 "Config",
49 "LLAddr",
50 "Watch Lo",
51 "Watch Hi",
52 "X Context",
53 "Reserved",
54 "Impl Dep",
55 "Debug",
56 "DEPC",
57 "PerfCnt",
58 "ErrCtl",
59 "CacheErr",
60 "TagLo",
61 "TagHi",
62 "ErrorEPC",
63 "DESAVE"
64};
65
66int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
67{
68#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
69 int i, j;
70
71 printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
72 for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
73 for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
74 if (vcpu->arch.cop0->stat[i][j])
75 printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
76 vcpu->arch.cop0->stat[i][j]);
77 }
78 }
79#endif
80
81 return 0;
82}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644
index 000000000000..89511a9258d3
--- /dev/null
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -0,0 +1,928 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7* TLB handlers run from KSEG0
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/smp.h>
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/kvm_host.h>
20
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#define KVM_GUEST_PC_TLB 0
32#define KVM_GUEST_SP_TLB 1
33
34#define PRIx64 "llx"
35
36/* Use VZ EntryHi.EHINV to invalidate TLB entries */
37#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
38
39atomic_t kvm_mips_instance;
40EXPORT_SYMBOL(kvm_mips_instance);
41
42/* These function pointers are initialized once the KVM module is loaded */
43pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
44EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
45
46void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
47EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
48
49bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
50EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{
54 return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
55}
56
57
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{
60 return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
61}
62
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
64{
65 return vcpu->kvm->arch.commpage_tlb;
66}
67
68
69/*
70 * Structure defining an tlb entry data set.
71 */
72
73void kvm_mips_dump_host_tlbs(void)
74{
75 unsigned long old_entryhi;
76 unsigned long old_pagemask;
77 struct kvm_mips_tlb tlb;
78 unsigned long flags;
79 int i;
80
81 local_irq_save(flags);
82
83 old_entryhi = read_c0_entryhi();
84 old_pagemask = read_c0_pagemask();
85
86 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
88
89 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i);
91 mtc0_tlbw_hazard();
92
93 tlb_read();
94 tlbw_use_hazard();
95
96 tlb.tlb_hi = read_c0_entryhi();
97 tlb.tlb_lo0 = read_c0_entrylo0();
98 tlb.tlb_lo1 = read_c0_entrylo1();
99 tlb.tlb_mask = read_c0_pagemask();
100
101 printk("TLB%c%3d Hi 0x%08lx ",
102 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
103 i, tlb.tlb_hi);
104 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
105 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
106 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
107 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
108 (tlb.tlb_lo0 >> 3) & 7);
109 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
110 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
111 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
112 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
113 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
114 }
115 write_c0_entryhi(old_entryhi);
116 write_c0_pagemask(old_pagemask);
117 mtc0_tlbw_hazard();
118 local_irq_restore(flags);
119}
120
121void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122{
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124 struct kvm_mips_tlb tlb;
125 int i;
126
127 printk("Guest TLBs:\n");
128 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129
130 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
131 tlb = vcpu->arch.guest_tlb[i];
132 printk("TLB%c%3d Hi 0x%08lx ",
133 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 i, tlb.tlb_hi);
135 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
137 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo0 >> 3) & 7);
140 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
141 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
142 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
143 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
144 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
145 }
146}
147
148void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
149{
150 int i;
151 volatile struct kvm_mips_tlb tlb;
152
153 printk("Shadow TLBs:\n");
154 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
155 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
156 printk("TLB%c%3d Hi 0x%08lx ",
157 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
158 i, tlb.tlb_hi);
159 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
160 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
161 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
162 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
163 (tlb.tlb_lo0 >> 3) & 7);
164 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
165 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
166 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
167 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
168 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
169 }
170}
171
172static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
173{
174 pfn_t pfn;
175
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
177 return;
178
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
180
181 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
183 }
184
185 kvm->arch.guest_pmap[gfn] = pfn;
186 return;
187}
188
189/* Translate guest KSEG0 addresses to Host PA */
190unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
191 unsigned long gva)
192{
193 gfn_t gfn;
194 uint32_t offset = gva & ~PAGE_MASK;
195 struct kvm *kvm = vcpu->kvm;
196
197 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
198 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
199 __builtin_return_address(0), gva);
200 return KVM_INVALID_PAGE;
201 }
202
203 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
204
205 if (gfn >= kvm->arch.guest_pmap_npages) {
206 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
207 gva);
208 return KVM_INVALID_PAGE;
209 }
210 kvm_mips_map_page(vcpu->kvm, gfn);
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
212}
213
214/* XXXKYMA: Must be called with interrupts disabled */
215/* set flush_dcache_mask == 0 if no dcache flush required */
216int
217kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
218 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
219{
220 unsigned long flags;
221 unsigned long old_entryhi;
222 volatile int idx;
223
224 local_irq_save(flags);
225
226
227 old_entryhi = read_c0_entryhi();
228 write_c0_entryhi(entryhi);
229 mtc0_tlbw_hazard();
230
231 tlb_probe();
232 tlb_probe_hazard();
233 idx = read_c0_index();
234
235 if (idx > current_cpu_data.tlbsize) {
236 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
237 kvm_mips_dump_host_tlbs();
238 return -1;
239 }
240
241 if (idx < 0) {
242 idx = read_c0_random() % current_cpu_data.tlbsize;
243 write_c0_index(idx);
244 mtc0_tlbw_hazard();
245 }
246 write_c0_entrylo0(entrylo0);
247 write_c0_entrylo1(entrylo1);
248 mtc0_tlbw_hazard();
249
250 tlb_write_indexed();
251 tlbw_use_hazard();
252
253#ifdef DEBUG
254 if (debug) {
255 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
256 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
257 vcpu->arch.pc, idx, read_c0_entryhi(),
258 read_c0_entrylo0(), read_c0_entrylo1());
259 }
260#endif
261
262 /* Flush D-cache */
263 if (flush_dcache_mask) {
264 if (entrylo0 & MIPS3_PG_V) {
265 ++vcpu->stat.flush_dcache_exits;
266 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
267 }
268 if (entrylo1 & MIPS3_PG_V) {
269 ++vcpu->stat.flush_dcache_exits;
270 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
271 (0x1 << PAGE_SHIFT));
272 }
273 }
274
275 /* Restore old ASID */
276 write_c0_entryhi(old_entryhi);
277 mtc0_tlbw_hazard();
278 tlbw_use_hazard();
279 local_irq_restore(flags);
280 return 0;
281}
282
283
284/* XXXKYMA: Must be called with interrupts disabled */
285int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
286 struct kvm_vcpu *vcpu)
287{
288 gfn_t gfn;
289 pfn_t pfn0, pfn1;
290 unsigned long vaddr = 0;
291 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
292 int even;
293 struct kvm *kvm = vcpu->kvm;
294 const int flush_dcache_mask = 0;
295
296
297 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
298 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
299 kvm_mips_dump_host_tlbs();
300 return -1;
301 }
302
303 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
304 if (gfn >= kvm->arch.guest_pmap_npages) {
305 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
306 gfn, badvaddr);
307 kvm_mips_dump_host_tlbs();
308 return -1;
309 }
310 even = !(gfn & 0x1);
311 vaddr = badvaddr & (PAGE_MASK << 1);
312
313 kvm_mips_map_page(vcpu->kvm, gfn);
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
315
316 if (even) {
317 pfn0 = kvm->arch.guest_pmap[gfn];
318 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
319 } else {
320 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
321 pfn1 = kvm->arch.guest_pmap[gfn];
322 }
323
324 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
325 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
326 (0x1 << 1);
327 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
328 (0x1 << 1);
329
330 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
331 flush_dcache_mask);
332}
333
334int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
335 struct kvm_vcpu *vcpu)
336{
337 pfn_t pfn0, pfn1;
338 unsigned long flags, old_entryhi = 0, vaddr = 0;
339 unsigned long entrylo0 = 0, entrylo1 = 0;
340
341
342 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
343 pfn1 = 0;
344 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
345 (0x1 << 1);
346 entrylo1 = 0;
347
348 local_irq_save(flags);
349
350 old_entryhi = read_c0_entryhi();
351 vaddr = badvaddr & (PAGE_MASK << 1);
352 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
353 mtc0_tlbw_hazard();
354 write_c0_entrylo0(entrylo0);
355 mtc0_tlbw_hazard();
356 write_c0_entrylo1(entrylo1);
357 mtc0_tlbw_hazard();
358 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
359 mtc0_tlbw_hazard();
360 tlb_write_indexed();
361 mtc0_tlbw_hazard();
362 tlbw_use_hazard();
363
364#ifdef DEBUG
365 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
366 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
367 read_c0_entrylo0(), read_c0_entrylo1());
368#endif
369
370 /* Restore old ASID */
371 write_c0_entryhi(old_entryhi);
372 mtc0_tlbw_hazard();
373 tlbw_use_hazard();
374 local_irq_restore(flags);
375
376 return 0;
377}
378
379int
380kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
381 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
382{
383 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
384 struct kvm *kvm = vcpu->kvm;
385 pfn_t pfn0, pfn1;
386
387
388 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
389 pfn0 = 0;
390 pfn1 = 0;
391 } else {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
394
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
397 }
398
399 if (hpa0)
400 *hpa0 = pfn0 << PAGE_SHIFT;
401
402 if (hpa1)
403 *hpa1 = pfn1 << PAGE_SHIFT;
404
405 /* Get attributes from the Guest TLB */
406 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
407 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
408 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
409 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
410 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
411 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
412
413#ifdef DEBUG
414 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
415 tlb->tlb_lo0, tlb->tlb_lo1);
416#endif
417
418 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
419 tlb->tlb_mask);
420}
421
422int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
423{
424 int i;
425 int index = -1;
426 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
427
428
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
432 index = i;
433 break;
434 }
435 }
436
437#ifdef DEBUG
438 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
439 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
440#endif
441
442 return index;
443}
444
445int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
446{
447 unsigned long old_entryhi, flags;
448 volatile int idx;
449
450
451 local_irq_save(flags);
452
453 old_entryhi = read_c0_entryhi();
454
455 if (KVM_GUEST_KERNEL_MODE(vcpu))
456 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
457 else {
458 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
459 }
460
461 mtc0_tlbw_hazard();
462
463 tlb_probe();
464 tlb_probe_hazard();
465 idx = read_c0_index();
466
467 /* Restore old ASID */
468 write_c0_entryhi(old_entryhi);
469 mtc0_tlbw_hazard();
470 tlbw_use_hazard();
471
472 local_irq_restore(flags);
473
474#ifdef DEBUG
475 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
476#endif
477
478 return idx;
479}
480
481int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
482{
483 int idx;
484 unsigned long flags, old_entryhi;
485
486 local_irq_save(flags);
487
488
489 old_entryhi = read_c0_entryhi();
490
491 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
492 mtc0_tlbw_hazard();
493
494 tlb_probe();
495 tlb_probe_hazard();
496 idx = read_c0_index();
497
498 if (idx >= current_cpu_data.tlbsize)
499 BUG();
500
501 if (idx > 0) {
502 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
503 mtc0_tlbw_hazard();
504
505 write_c0_entrylo0(0);
506 mtc0_tlbw_hazard();
507
508 write_c0_entrylo1(0);
509 mtc0_tlbw_hazard();
510
511 tlb_write_indexed();
512 mtc0_tlbw_hazard();
513 }
514
515 write_c0_entryhi(old_entryhi);
516 mtc0_tlbw_hazard();
517 tlbw_use_hazard();
518
519 local_irq_restore(flags);
520
521#ifdef DEBUG
522 if (idx > 0) {
523 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
524 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
525 }
526#endif
527
528 return 0;
529}
530
531/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
532int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
533{
534 unsigned long flags, old_entryhi;
535
536 if (index >= current_cpu_data.tlbsize)
537 BUG();
538
539 local_irq_save(flags);
540
541
542 old_entryhi = read_c0_entryhi();
543
544 write_c0_entryhi(UNIQUE_ENTRYHI(index));
545 mtc0_tlbw_hazard();
546
547 write_c0_index(index);
548 mtc0_tlbw_hazard();
549
550 write_c0_entrylo0(0);
551 mtc0_tlbw_hazard();
552
553 write_c0_entrylo1(0);
554 mtc0_tlbw_hazard();
555
556 tlb_write_indexed();
557 mtc0_tlbw_hazard();
558 tlbw_use_hazard();
559
560 write_c0_entryhi(old_entryhi);
561 mtc0_tlbw_hazard();
562 tlbw_use_hazard();
563
564 local_irq_restore(flags);
565
566 return 0;
567}
568
569void kvm_mips_flush_host_tlb(int skip_kseg0)
570{
571 unsigned long flags;
572 unsigned long old_entryhi, entryhi;
573 unsigned long old_pagemask;
574 int entry = 0;
575 int maxentry = current_cpu_data.tlbsize;
576
577
578 local_irq_save(flags);
579
580 old_entryhi = read_c0_entryhi();
581 old_pagemask = read_c0_pagemask();
582
583 /* Blast 'em all away. */
584 for (entry = 0; entry < maxentry; entry++) {
585
586 write_c0_index(entry);
587 mtc0_tlbw_hazard();
588
589 if (skip_kseg0) {
590 tlb_read();
591 tlbw_use_hazard();
592
593 entryhi = read_c0_entryhi();
594
595 /* Don't blow away guest kernel entries */
596 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
597 continue;
598 }
599 }
600
601 /* Make sure all entries differ. */
602 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
603 mtc0_tlbw_hazard();
604 write_c0_entrylo0(0);
605 mtc0_tlbw_hazard();
606 write_c0_entrylo1(0);
607 mtc0_tlbw_hazard();
608
609 tlb_write_indexed();
610 mtc0_tlbw_hazard();
611 }
612
613 tlbw_use_hazard();
614
615 write_c0_entryhi(old_entryhi);
616 write_c0_pagemask(old_pagemask);
617 mtc0_tlbw_hazard();
618 tlbw_use_hazard();
619
620 local_irq_restore(flags);
621}
622
623void
624kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
625 struct kvm_vcpu *vcpu)
626{
627 unsigned long asid = asid_cache(cpu);
628
629 if (!(ASID_MASK(ASID_INC(asid)))) {
630 if (cpu_has_vtag_icache) {
631 flush_icache_all();
632 }
633
634 kvm_local_flush_tlb_all(); /* start new asid cycle */
635
636 if (!asid) /* fix version if needed */
637 asid = ASID_FIRST_VERSION;
638 }
639
640 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
641}
642
643void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
644{
645 unsigned long flags;
646 unsigned long old_entryhi;
647 unsigned long old_pagemask;
648 int entry = 0;
649 int cpu = smp_processor_id();
650
651 local_irq_save(flags);
652
653 old_entryhi = read_c0_entryhi();
654 old_pagemask = read_c0_pagemask();
655
656 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
657 write_c0_index(entry);
658 mtc0_tlbw_hazard();
659 tlb_read();
660 tlbw_use_hazard();
661
662 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
663 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
664 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
665 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
666 }
667
668 write_c0_entryhi(old_entryhi);
669 write_c0_pagemask(old_pagemask);
670 mtc0_tlbw_hazard();
671
672 local_irq_restore(flags);
673
674}
675
676void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
677{
678 unsigned long flags;
679 unsigned long old_ctx;
680 int entry;
681 int cpu = smp_processor_id();
682
683 local_irq_save(flags);
684
685 old_ctx = read_c0_entryhi();
686
687 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
688 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
689 mtc0_tlbw_hazard();
690 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
691 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
692
693 write_c0_index(entry);
694 mtc0_tlbw_hazard();
695
696 tlb_write_indexed();
697 tlbw_use_hazard();
698 }
699
700 tlbw_use_hazard();
701 write_c0_entryhi(old_ctx);
702 mtc0_tlbw_hazard();
703 local_irq_restore(flags);
704}
705
706
707void kvm_local_flush_tlb_all(void)
708{
709 unsigned long flags;
710 unsigned long old_ctx;
711 int entry = 0;
712
713 local_irq_save(flags);
714 /* Save old context and create impossible VPN2 value */
715 old_ctx = read_c0_entryhi();
716 write_c0_entrylo0(0);
717 write_c0_entrylo1(0);
718
719 /* Blast 'em all away. */
720 while (entry < current_cpu_data.tlbsize) {
721 /* Make sure all entries differ. */
722 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
723 write_c0_index(entry);
724 mtc0_tlbw_hazard();
725 tlb_write_indexed();
726 entry++;
727 }
728 tlbw_use_hazard();
729 write_c0_entryhi(old_ctx);
730 mtc0_tlbw_hazard();
731
732 local_irq_restore(flags);
733}
734
735void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
736{
737 int cpu, entry;
738
739 for_each_possible_cpu(cpu) {
740 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
741 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
742 UNIQUE_ENTRYHI(entry);
743 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
744 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
745 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
746 read_c0_pagemask();
747#ifdef DEBUG
748 kvm_debug
749 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
750 cpu, entry,
751 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
752 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
753 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
754#endif
755 }
756 }
757}
758
759/* Restore ASID once we are scheduled back after preemption */
760void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
761{
762 unsigned long flags;
763 int newasid = 0;
764
765#ifdef DEBUG
766 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
767#endif
768
769 /* Alocate new kernel and user ASIDs if needed */
770
771 local_irq_save(flags);
772
773 if (((vcpu->arch.
774 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
775 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
776 vcpu->arch.guest_kernel_asid[cpu] =
777 vcpu->arch.guest_kernel_mm.context.asid[cpu];
778 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
779 vcpu->arch.guest_user_asid[cpu] =
780 vcpu->arch.guest_user_mm.context.asid[cpu];
781 newasid++;
782
783 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
784 cpu_context(cpu, current->mm));
785 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
786 cpu, vcpu->arch.guest_kernel_asid[cpu]);
787 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
788 vcpu->arch.guest_user_asid[cpu]);
789 }
790
791 if (vcpu->arch.last_sched_cpu != cpu) {
792 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
793 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
794 }
795
796 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
797#if 0
798 if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
799 kvm_mips_flush_host_tlb(0);
800 kvm_shadow_tlb_load(vcpu);
801 }
802#endif
803
804 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
808 ehb();
809 }
810 } else {
811 /* New ASIDs were allocated for the VM */
812
813 /* Were we in guest context? If so then the pre-empted ASID is no longer
814 * valid, we need to set it to what it should be based on the mode of
815 * the Guest (Kernel/User)
816 */
817 if (current->flags & PF_VCPU) {
818 if (KVM_GUEST_KERNEL_MODE(vcpu))
819 write_c0_entryhi(ASID_MASK(vcpu->arch.
820 guest_kernel_asid[cpu]));
821 else
822 write_c0_entryhi(ASID_MASK(vcpu->arch.
823 guest_user_asid[cpu]));
824 ehb();
825 }
826 }
827
828 local_irq_restore(flags);
829
830}
831
832/* ASID can change if another task is scheduled during preemption */
833void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
834{
835 unsigned long flags;
836 uint32_t cpu;
837
838 local_irq_save(flags);
839
840 cpu = smp_processor_id();
841
842
843 vcpu->arch.preempt_entryhi = read_c0_entryhi();
844 vcpu->arch.last_sched_cpu = cpu;
845
846#if 0
847 if ((atomic_read(&kvm_mips_instance) > 1)) {
848 kvm_shadow_tlb_put(vcpu);
849 }
850#endif
851
852 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
853 ASID_VERSION_MASK)) {
854 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
855 cpu_context(cpu, current->mm));
856 drop_mmu_context(current->mm, cpu);
857 }
858 write_c0_entryhi(cpu_asid(cpu, current->mm));
859 ehb();
860
861 local_irq_restore(flags);
862}
863
864uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
865{
866 struct mips_coproc *cop0 = vcpu->arch.cop0;
867 unsigned long paddr, flags;
868 uint32_t inst;
869 int index;
870
871 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
872 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
873 local_irq_save(flags);
874 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
875 if (index >= 0) {
876 inst = *(opc);
877 } else {
878 index =
879 kvm_mips_guest_tlb_lookup(vcpu,
880 ((unsigned long) opc & VPN2_MASK)
881 |
882 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
883 if (index < 0) {
884 kvm_err
885 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
886 __func__, opc, vcpu, read_c0_entryhi());
887 kvm_mips_dump_host_tlbs();
888 local_irq_restore(flags);
889 return KVM_INVALID_INST;
890 }
891 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
892 &vcpu->arch.
893 guest_tlb[index],
894 NULL, NULL);
895 inst = *(opc);
896 }
897 local_irq_restore(flags);
898 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
899 paddr =
900 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
901 (unsigned long) opc);
902 inst = *(uint32_t *) CKSEG0ADDR(paddr);
903 } else {
904 kvm_err("%s: illegal address: %p\n", __func__, opc);
905 return KVM_INVALID_INST;
906 }
907
908 return inst;
909}
910
911EXPORT_SYMBOL(kvm_local_flush_tlb_all);
912EXPORT_SYMBOL(kvm_shadow_tlb_put);
913EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
914EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
915EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
916EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
917EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
918EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
919EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
920EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
921EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
922EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
923EXPORT_SYMBOL(kvm_shadow_tlb_load);
924EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
925EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
926EXPORT_SYMBOL(kvm_get_inst);
927EXPORT_SYMBOL(kvm_arch_vcpu_load);
928EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644
index 000000000000..466aeef044bd
--- /dev/null
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -0,0 +1,482 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16
17#include <linux/kvm_host.h>
18
19#include "kvm_mips_opcode.h"
20#include "kvm_mips_int.h"
21
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24 gpa_t gpa;
25 uint32_t kseg = KSEGX(gva);
26
27 if ((kseg == CKSEG0) || (kseg == CKSEG1))
28 gpa = CPHYSADDR(gva);
29 else {
30 printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31 kvm_mips_dump_host_tlbs();
32 gpa = KVM_INVALID_ADDR;
33 }
34
35#ifdef DEBUG
36 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
37#endif
38
39 return gpa;
40}
41
42
43static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44{
45 struct kvm_run *run = vcpu->run;
46 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
47 unsigned long cause = vcpu->arch.host_cp0_cause;
48 enum emulation_result er = EMULATE_DONE;
49 int ret = RESUME_GUEST;
50
51 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
52 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
53 } else
54 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
55
56 switch (er) {
57 case EMULATE_DONE:
58 ret = RESUME_GUEST;
59 break;
60
61 case EMULATE_FAIL:
62 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
63 ret = RESUME_HOST;
64 break;
65
66 case EMULATE_WAIT:
67 run->exit_reason = KVM_EXIT_INTR;
68 ret = RESUME_HOST;
69 break;
70
71 default:
72 BUG();
73 }
74 return ret;
75}
76
77static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
78{
79 struct kvm_run *run = vcpu->run;
80 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
81 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
82 unsigned long cause = vcpu->arch.host_cp0_cause;
83 enum emulation_result er = EMULATE_DONE;
84 int ret = RESUME_GUEST;
85
86 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
87 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
88#ifdef DEBUG
89 kvm_debug
90 ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
91 cause, opc, badvaddr);
92#endif
93 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
94
95 if (er == EMULATE_DONE)
96 ret = RESUME_GUEST;
97 else {
98 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
99 ret = RESUME_HOST;
100 }
101 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
102 /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
103 * using HIGHMEM. Need to address this in a HIGHMEM kernel
104 */
105 printk
106 ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
107 cause, opc, badvaddr);
108 kvm_mips_dump_host_tlbs();
109 kvm_arch_vcpu_dump_regs(vcpu);
110 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
111 ret = RESUME_HOST;
112 } else {
113 printk
114 ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
115 cause, opc, badvaddr);
116 kvm_mips_dump_host_tlbs();
117 kvm_arch_vcpu_dump_regs(vcpu);
118 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
119 ret = RESUME_HOST;
120 }
121 return ret;
122}
123
124static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
125{
126 struct kvm_run *run = vcpu->run;
127 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
128 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
129 unsigned long cause = vcpu->arch.host_cp0_cause;
130 enum emulation_result er = EMULATE_DONE;
131 int ret = RESUME_GUEST;
132
133 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
134 && KVM_GUEST_KERNEL_MODE(vcpu)) {
135 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
136 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
137 ret = RESUME_HOST;
138 }
139 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
140 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
141#ifdef DEBUG
142 kvm_debug
143 ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
144 cause, opc, badvaddr);
145#endif
146 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
147 if (er == EMULATE_DONE)
148 ret = RESUME_GUEST;
149 else {
150 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
151 ret = RESUME_HOST;
152 }
153 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
154 /* All KSEG0 faults are handled by KVM, as the guest kernel does not
155 * expect to ever get them
156 */
157 if (kvm_mips_handle_kseg0_tlb_fault
158 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
159 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
160 ret = RESUME_HOST;
161 }
162 } else {
163 kvm_err
164 ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
165 cause, opc, badvaddr);
166 kvm_mips_dump_host_tlbs();
167 kvm_arch_vcpu_dump_regs(vcpu);
168 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169 ret = RESUME_HOST;
170 }
171 return ret;
172}
173
174static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
175{
176 struct kvm_run *run = vcpu->run;
177 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
178 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
179 unsigned long cause = vcpu->arch.host_cp0_cause;
180 enum emulation_result er = EMULATE_DONE;
181 int ret = RESUME_GUEST;
182
183 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
184 && KVM_GUEST_KERNEL_MODE(vcpu)) {
185 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
186 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
187 ret = RESUME_HOST;
188 }
189 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
190 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
191#ifdef DEBUG
192 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
193 vcpu->arch.pc, badvaddr);
194#endif
195
196 /* User Address (UA) fault, this could happen if
197 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
198 * case we pass on the fault to the guest kernel and let it handle it.
199 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
200 * case we inject the TLB from the Guest TLB into the shadow host TLB
201 */
202
203 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
204 if (er == EMULATE_DONE)
205 ret = RESUME_GUEST;
206 else {
207 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
208 ret = RESUME_HOST;
209 }
210 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211 if (kvm_mips_handle_kseg0_tlb_fault
212 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214 ret = RESUME_HOST;
215 }
216 } else {
217 printk
218 ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
219 cause, opc, badvaddr);
220 kvm_mips_dump_host_tlbs();
221 kvm_arch_vcpu_dump_regs(vcpu);
222 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
223 ret = RESUME_HOST;
224 }
225 return ret;
226}
227
228static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
229{
230 struct kvm_run *run = vcpu->run;
231 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
232 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
233 unsigned long cause = vcpu->arch.host_cp0_cause;
234 enum emulation_result er = EMULATE_DONE;
235 int ret = RESUME_GUEST;
236
237 if (KVM_GUEST_KERNEL_MODE(vcpu)
238 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
239#ifdef DEBUG
240 kvm_debug("Emulate Store to MMIO space\n");
241#endif
242 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
243 if (er == EMULATE_FAIL) {
244 printk("Emulate Store to MMIO space failed\n");
245 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
246 ret = RESUME_HOST;
247 } else {
248 run->exit_reason = KVM_EXIT_MMIO;
249 ret = RESUME_HOST;
250 }
251 } else {
252 printk
253 ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
254 cause, opc, badvaddr);
255 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256 ret = RESUME_HOST;
257 }
258 return ret;
259}
260
261static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
262{
263 struct kvm_run *run = vcpu->run;
264 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
265 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
266 unsigned long cause = vcpu->arch.host_cp0_cause;
267 enum emulation_result er = EMULATE_DONE;
268 int ret = RESUME_GUEST;
269
270 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
271#ifdef DEBUG
272 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
273#endif
274 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
275 if (er == EMULATE_FAIL) {
276 printk("Emulate Load from MMIO space failed\n");
277 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278 ret = RESUME_HOST;
279 } else {
280 run->exit_reason = KVM_EXIT_MMIO;
281 ret = RESUME_HOST;
282 }
283 } else {
284 printk
285 ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
286 cause, opc, badvaddr);
287 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
288 ret = RESUME_HOST;
289 er = EMULATE_FAIL;
290 }
291 return ret;
292}
293
294static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
295{
296 struct kvm_run *run = vcpu->run;
297 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
298 unsigned long cause = vcpu->arch.host_cp0_cause;
299 enum emulation_result er = EMULATE_DONE;
300 int ret = RESUME_GUEST;
301
302 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
303 if (er == EMULATE_DONE)
304 ret = RESUME_GUEST;
305 else {
306 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
307 ret = RESUME_HOST;
308 }
309 return ret;
310}
311
312static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
313{
314 struct kvm_run *run = vcpu->run;
315 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
316 unsigned long cause = vcpu->arch.host_cp0_cause;
317 enum emulation_result er = EMULATE_DONE;
318 int ret = RESUME_GUEST;
319
320 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
321 if (er == EMULATE_DONE)
322 ret = RESUME_GUEST;
323 else {
324 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
325 ret = RESUME_HOST;
326 }
327 return ret;
328}
329
330static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
331{
332 struct kvm_run *run = vcpu->run;
333 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
334 unsigned long cause = vcpu->arch.host_cp0_cause;
335 enum emulation_result er = EMULATE_DONE;
336 int ret = RESUME_GUEST;
337
338 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
339 if (er == EMULATE_DONE)
340 ret = RESUME_GUEST;
341 else {
342 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
343 ret = RESUME_HOST;
344 }
345 return ret;
346}
347
348static int
349kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
350{
351 struct mips_coproc *cop0 = vcpu->arch.cop0;
352
353 kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
354 kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
355 kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
356 kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
357 kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
358
359 kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
360 kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
361 kvm_write_c0_guest_pagemask(cop0,
362 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
363 kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
364 kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
365
366 return 0;
367}
368
369static int
370kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371{
372 struct mips_coproc *cop0 = vcpu->arch.cop0;
373
374 regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
375 regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
376 regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
377 regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
378 regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
379
380 regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
381 regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
382 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
383 kvm_read_c0_guest_pagemask(cop0);
384 regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
385 regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
386
387 regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
388 regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
389 regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
390 regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
391 regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
392
393 return 0;
394}
395
396static int kvm_trap_emul_vm_init(struct kvm *kvm)
397{
398 return 0;
399}
400
401static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
402{
403 return 0;
404}
405
406static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
407{
408 struct mips_coproc *cop0 = vcpu->arch.cop0;
409 uint32_t config1;
410 int vcpu_id = vcpu->vcpu_id;
411
412 /* Arch specific stuff, set up config registers properly so that the
413 * guest will come up as expected, for now we simulate a
414 * MIPS 24kc
415 */
416 kvm_write_c0_guest_prid(cop0, 0x00019300);
417 kvm_write_c0_guest_config(cop0,
418 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
419 (MMU_TYPE_R4000 << CP0C0_MT));
420
421 /* Read the cache characteristics from the host Config1 Register */
422 config1 = (read_c0_config1() & ~0x7f);
423
424 /* Set up MMU size */
425 config1 &= ~(0x3f << 25);
426 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
427
428 /* We unset some bits that we aren't emulating */
429 config1 &=
430 ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
431 (1 << CP0C1_WR) | (1 << CP0C1_CA));
432 kvm_write_c0_guest_config1(cop0, config1);
433
434 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
435 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
436 kvm_write_c0_guest_config3(cop0,
437 MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
438 CP0C3_ULRI));
439
440 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
441 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
442
443 /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
444 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
445
446 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
447 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
448
449 return 0;
450}
451
452static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
453 /* exit handlers */
454 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
455 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
456 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
457 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
458 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
459 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
460 .handle_syscall = kvm_trap_emul_handle_syscall,
461 .handle_res_inst = kvm_trap_emul_handle_res_inst,
462 .handle_break = kvm_trap_emul_handle_break,
463
464 .vm_init = kvm_trap_emul_vm_init,
465 .vcpu_init = kvm_trap_emul_vcpu_init,
466 .vcpu_setup = kvm_trap_emul_vcpu_setup,
467 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
468 .queue_timer_int = kvm_mips_queue_timer_int_cb,
469 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
470 .queue_io_int = kvm_mips_queue_io_int_cb,
471 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
472 .irq_deliver = kvm_mips_irq_deliver_cb,
473 .irq_clear = kvm_mips_irq_clear_cb,
474 .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
475 .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
476};
477
478int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
479{
480 *install_callbacks = &kvm_trap_emul_callbacks;
481 return 0;
482}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 000000000000..bc9e0f406c08
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,46 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_KVM_H
12
13#include <linux/tracepoint.h>
14
15#undef TRACE_SYSTEM
16#define TRACE_SYSTEM kvm
17#define TRACE_INCLUDE_PATH .
18#define TRACE_INCLUDE_FILE trace
19
20/*
21 * Tracepoints for VM eists
22 */
23extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
24
25TRACE_EVENT(kvm_exit,
26 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
27 TP_ARGS(vcpu, reason),
28 TP_STRUCT__entry(
29 __field(struct kvm_vcpu *, vcpu)
30 __field(unsigned int, reason)
31 ),
32
33 TP_fast_assign(
34 __entry->vcpu = vcpu;
35 __entry->reason = reason;
36 ),
37
38 TP_printk("[%s]PC: 0x%08lx",
39 kvm_mips_exit_types_str[__entry->reason],
40 __entry->vcpu->arch.pc)
41);
42
43#endif /* _TRACE_KVM_H */
44
45/* This part must be outside protection */
46#include <trace/define_trace.h>