aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDeng-Cheng Zhu <dengcheng.zhu@imgtec.com>2014-06-26 15:11:34 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-06-30 10:52:01 -0400
commitd116e812f9026e3cca46ce1009e577afec62916d (patch)
tree2a8f60c6beb6c5489d777cb0e2b8a92cf42127b0 /arch
parent85949977a5b499efca661fb80993693acbfac64d (diff)
MIPS: KVM: Reformat code and comments
No logic changes inside. Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/kvm_host.h10
-rw-r--r--arch/mips/include/asm/r4kcache.h3
-rw-r--r--arch/mips/kvm/kvm_locore.S55
-rw-r--r--arch/mips/kvm/kvm_mips.c179
-rw-r--r--arch/mips/kvm/kvm_mips_comm.h21
-rw-r--r--arch/mips/kvm/kvm_mips_commpage.c21
-rw-r--r--arch/mips/kvm/kvm_mips_dyntrans.c38
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c389
-rw-r--r--arch/mips/kvm/kvm_mips_int.c45
-rw-r--r--arch/mips/kvm/kvm_mips_int.h22
-rw-r--r--arch/mips/kvm/kvm_mips_opcode.h26
-rw-r--r--arch/mips/kvm/kvm_mips_stats.c18
-rw-r--r--arch/mips/kvm/kvm_tlb.c194
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c77
-rw-r--r--arch/mips/kvm/trace.h18
15 files changed, 564 insertions, 552 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index b0aa95565752..3f813f295134 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -359,13 +359,17 @@ enum emulation_result {
359#define MIPS3_PG_FRAME 0x3fffffc0 359#define MIPS3_PG_FRAME 0x3fffffc0
360 360
361#define VPN2_MASK 0xffffe000 361#define VPN2_MASK 0xffffe000
362#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ 362#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
363 ((x).tlb_lo1 & MIPS3_PG_G)) 363 ((x).tlb_lo1 & MIPS3_PG_G))
364#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) 364#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
365#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) 365#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
366#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ 366#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
367 ? ((x).tlb_lo1 & MIPS3_PG_V) \ 367 ? ((x).tlb_lo1 & MIPS3_PG_V) \
368 : ((x).tlb_lo0 & MIPS3_PG_V)) 368 : ((x).tlb_lo0 & MIPS3_PG_V))
369#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
370 ((y) & VPN2_MASK & ~(x).tlb_mask))
371#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
372 TLB_ASID(x) == ((y) & ASID_MASK))
369 373
370struct kvm_mips_tlb { 374struct kvm_mips_tlb {
371 long tlb_mask; 375 long tlb_mask;
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 0b8bd28a0df1..4520adc8699b 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -19,6 +19,9 @@
19#include <asm/mipsmtregs.h> 19#include <asm/mipsmtregs.h>
20#include <asm/uaccess.h> /* for segment_eq() */ 20#include <asm/uaccess.h> /* for segment_eq() */
21 21
22extern void (*r4k_blast_dcache)(void);
23extern void (*r4k_blast_icache)(void);
24
22/* 25/*
23 * This macro return a properly sign-extended address suitable as base address 26 * This macro return a properly sign-extended address suitable as base address
24 * for indexed cache operations. Two issues here: 27 * for indexed cache operations. Two issues here:
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
index 033ac343e72c..d7279c03c517 100644
--- a/arch/mips/kvm/kvm_locore.S
+++ b/arch/mips/kvm/kvm_locore.S
@@ -16,7 +16,6 @@
16#include <asm/stackframe.h> 16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18 18
19
20#define _C_LABEL(x) x 19#define _C_LABEL(x) x
21#define MIPSX(name) mips32_ ## name 20#define MIPSX(name) mips32_ ## name
22#define CALLFRAME_SIZ 32 21#define CALLFRAME_SIZ 32
@@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run)
91 LONG_S $24, PT_R24(k1) 90 LONG_S $24, PT_R24(k1)
92 LONG_S $25, PT_R25(k1) 91 LONG_S $25, PT_R25(k1)
93 92
94 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ 93 /*
94 * XXXKYMA k0/k1 not saved, not being used if we got here through
95 * an ioctl()
96 */
95 97
96 LONG_S $28, PT_R28(k1) 98 LONG_S $28, PT_R28(k1)
97 LONG_S $29, PT_R29(k1) 99 LONG_S $29, PT_R29(k1)
@@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run)
132 /* Save the kernel gp as well */ 134 /* Save the kernel gp as well */
133 LONG_S gp, VCPU_HOST_GP(k1) 135 LONG_S gp, VCPU_HOST_GP(k1)
134 136
135 /* Setup status register for running the guest in UM, interrupts are disabled */ 137 /*
138 * Setup status register for running the guest in UM, interrupts
139 * are disabled
140 */
136 li k0, (ST0_EXL | KSU_USER | ST0_BEV) 141 li k0, (ST0_EXL | KSU_USER | ST0_BEV)
137 mtc0 k0, CP0_STATUS 142 mtc0 k0, CP0_STATUS
138 ehb 143 ehb
@@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run)
152 mtc0 k0, CP0_STATUS 157 mtc0 k0, CP0_STATUS
153 ehb 158 ehb
154 159
155
156 /* Set Guest EPC */ 160 /* Set Guest EPC */
157 LONG_L t0, VCPU_PC(k1) 161 LONG_L t0, VCPU_PC(k1)
158 mtc0 t0, CP0_EPC 162 mtc0 t0, CP0_EPC
@@ -165,7 +169,7 @@ FEXPORT(__kvm_mips_load_asid)
165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 169 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 170 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1671: 1711:
168 /* t1: contains the base of the ASID array, need to get the cpu id */ 172 /* t1: contains the base of the ASID array, need to get the cpu id */
169 LONG_L t2, TI_CPU($28) /* smp_processor_id */ 173 LONG_L t2, TI_CPU($28) /* smp_processor_id */
170 INT_SLL t2, t2, 2 /* x4 */ 174 INT_SLL t2, t2, 2 /* x4 */
171 REG_ADDU t3, t1, t2 175 REG_ADDU t3, t1, t2
@@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1)
229 eret 233 eret
230 234
231VECTOR(MIPSX(exception), unknown) 235VECTOR(MIPSX(exception), unknown)
232/* 236/* Find out what mode we came from and jump to the proper handler. */
233 * Find out what mode we came from and jump to the proper handler.
234 */
235 mtc0 k0, CP0_ERROREPC #01: Save guest k0 237 mtc0 k0, CP0_ERROREPC #01: Save guest k0
236 ehb #02: 238 ehb #02:
237 239
@@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown)
239 INT_SRL k0, k0, 10 #03: Get rid of CPUNum 241 INT_SRL k0, k0, 10 #03: Get rid of CPUNum
240 INT_SLL k0, k0, 10 #04 242 INT_SLL k0, k0, 10 #04
241 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 243 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
242 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 244 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
245 # installed @ offset 0x2000
243 j k0 #07: jump to the function 246 j k0 #07: jump to the function
244 nop #08: branch delay slot 247 nop #08: branch delay slot
245VECTOR_END(MIPSX(exceptionEnd)) 248VECTOR_END(MIPSX(exceptionEnd))
@@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd))
248/* 251/*
249 * Generic Guest exception handler. We end up here when the guest 252 * Generic Guest exception handler. We end up here when the guest
250 * does something that causes a trap to kernel mode. 253 * does something that causes a trap to kernel mode.
251 *
252 */ 254 */
253NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) 255NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
254 /* Get the VCPU pointer from DDTATA_LO */ 256 /* Get the VCPU pointer from DDTATA_LO */
@@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
290 LONG_S $30, VCPU_R30(k1) 292 LONG_S $30, VCPU_R30(k1)
291 LONG_S $31, VCPU_R31(k1) 293 LONG_S $31, VCPU_R31(k1)
292 294
293 /* We need to save hi/lo and restore them on 295 /* We need to save hi/lo and restore them on the way out */
294 * the way out
295 */
296 mfhi t0 296 mfhi t0
297 LONG_S t0, VCPU_HI(k1) 297 LONG_S t0, VCPU_HI(k1)
298 298
@@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
321 /* Save pointer to run in s0, will be saved by the compiler */ 321 /* Save pointer to run in s0, will be saved by the compiler */
322 move s0, a0 322 move s0, a0
323 323
324 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to 324 /*
325 * process the exception */ 325 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
326 * process the exception
327 */
326 mfc0 k0,CP0_EPC 328 mfc0 k0,CP0_EPC
327 LONG_S k0, VCPU_PC(k1) 329 LONG_S k0, VCPU_PC(k1)
328 330
@@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
351 LONG_L k0, VCPU_HOST_EBASE(k1) 353 LONG_L k0, VCPU_HOST_EBASE(k1)
352 mtc0 k0,CP0_EBASE 354 mtc0 k0,CP0_EBASE
353 355
354
355 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 356 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
356 .set at 357 .set at
357 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) 358 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
@@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
369 /* Saved host state */ 370 /* Saved host state */
370 INT_ADDIU sp, sp, -PT_SIZE 371 INT_ADDIU sp, sp, -PT_SIZE
371 372
372 /* XXXKYMA do we need to load the host ASID, maybe not because the 373 /*
374 * XXXKYMA do we need to load the host ASID, maybe not because the
373 * kernel entries are marked GLOBAL, need to verify 375 * kernel entries are marked GLOBAL, need to verify
374 */ 376 */
375 377
@@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
383 385
384 /* Jump to handler */ 386 /* Jump to handler */
385FEXPORT(__kvm_mips_jump_to_handler) 387FEXPORT(__kvm_mips_jump_to_handler)
386 /* XXXKYMA: not sure if this is safe, how large is the stack?? 388 /*
389 * XXXKYMA: not sure if this is safe, how large is the stack??
387 * Now jump to the kvm_mips_handle_exit() to see if we can deal 390 * Now jump to the kvm_mips_handle_exit() to see if we can deal
388 * with this in the kernel */ 391 * with this in the kernel
392 */
389 PTR_LA t9, kvm_mips_handle_exit 393 PTR_LA t9, kvm_mips_handle_exit
390 jalr.hb t9 394 jalr.hb t9
391 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ 395 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
@@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
394 di 398 di
395 ehb 399 ehb
396 400
397 /* XXXKYMA: k0/k1 could have been blown away if we processed 401 /*
402 * XXXKYMA: k0/k1 could have been blown away if we processed
398 * an exception while we were handling the exception from the 403 * an exception while we were handling the exception from the
399 * guest, reload k1 404 * guest, reload k1
400 */ 405 */
@@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
402 move k1, s1 407 move k1, s1
403 INT_ADDIU k1, k1, VCPU_HOST_ARCH 408 INT_ADDIU k1, k1, VCPU_HOST_ARCH
404 409
405 /* Check return value, should tell us if we are returning to the 410 /*
411 * Check return value, should tell us if we are returning to the
406 * host (handle I/O etc)or resuming the guest 412 * host (handle I/O etc)or resuming the guest
407 */ 413 */
408 andi t0, v0, RESUME_HOST 414 andi t0, v0, RESUME_HOST
@@ -521,8 +527,10 @@ __kvm_mips_return_to_host:
521 LONG_L $0, PT_R0(k1) 527 LONG_L $0, PT_R0(k1)
522 LONG_L $1, PT_R1(k1) 528 LONG_L $1, PT_R1(k1)
523 529
524 /* r2/v0 is the return code, shift it down by 2 (arithmetic) 530 /*
525 * to recover the err code */ 531 * r2/v0 is the return code, shift it down by 2 (arithmetic)
532 * to recover the err code
533 */
526 INT_SRA k0, v0, 2 534 INT_SRA k0, v0, 2
527 move $2, k0 535 move $2, k0
528 536
@@ -566,7 +574,6 @@ __kvm_mips_return_to_host:
566 PTR_LI k0, 0x2000000F 574 PTR_LI k0, 0x2000000F
567 mtc0 k0, CP0_HWRENA 575 mtc0 k0, CP0_HWRENA
568 576
569
570 /* Restore RA, which is the address we will return to */ 577 /* Restore RA, which is the address we will return to */
571 LONG_L ra, PT_R31(k1) 578 LONG_L ra, PT_R31(k1)
572 j ra 579 j ra
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index cd5e4f568439..52be52adf030 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -31,38 +31,41 @@
31#define VECTORSPACING 0x100 /* for EI/VI mode */ 31#define VECTORSPACING 0x100 /* for EI/VI mode */
32#endif 32#endif
33 33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
35struct kvm_stats_debugfs_item debugfs_entries[] = { 35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) }, 36 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
37 { "cache", VCPU_STAT(cache_exits) }, 37 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
38 { "signal", VCPU_STAT(signal_exits) }, 38 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
39 { "interrupt", VCPU_STAT(int_exits) }, 39 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, 40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) }, 41 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, 42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, 43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, 44 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, 45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
46 { "syscall", VCPU_STAT(syscall_exits) }, 46 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, 47 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
48 { "break_inst", VCPU_STAT(break_inst_exits) }, 48 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, 49 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 50 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
51 {NULL} 51 {NULL}
52}; 52};
53 53
54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) 54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55{ 55{
56 int i; 56 int i;
57
57 for_each_possible_cpu(i) { 58 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0; 59 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0; 60 vcpu->arch.guest_user_asid[i] = 0;
60 } 61 }
62
61 return 0; 63 return 0;
62} 64}
63 65
64/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we 66/*
65 * are "runnable" if interrupts are pending 67 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
68 * Config7, so we are "runnable" if interrupts are pending
66 */ 69 */
67int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 70int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
68{ 71{
@@ -103,7 +106,10 @@ static void kvm_mips_init_tlbs(struct kvm *kvm)
103{ 106{
104 unsigned long wired; 107 unsigned long wired;
105 108
106 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ 109 /*
110 * Add a wired entry to the TLB, it is used to map the commpage to
111 * the Guest kernel
112 */
107 wired = read_c0_wired(); 113 wired = read_c0_wired();
108 write_c0_wired(wired + 1); 114 write_c0_wired(wired + 1);
109 mtc0_tlbw_hazard(); 115 mtc0_tlbw_hazard();
@@ -130,7 +136,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
130 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); 136 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
131 } 137 }
132 138
133
134 return 0; 139 return 0;
135} 140}
136 141
@@ -185,8 +190,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
185 } 190 }
186} 191}
187 192
188long 193long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
189kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 194 unsigned long arg)
190{ 195{
191 return -ENOIOCTLCMD; 196 return -ENOIOCTLCMD;
192} 197}
@@ -207,17 +212,17 @@ void kvm_arch_memslots_updated(struct kvm *kvm)
207} 212}
208 213
209int kvm_arch_prepare_memory_region(struct kvm *kvm, 214int kvm_arch_prepare_memory_region(struct kvm *kvm,
210 struct kvm_memory_slot *memslot, 215 struct kvm_memory_slot *memslot,
211 struct kvm_userspace_memory_region *mem, 216 struct kvm_userspace_memory_region *mem,
212 enum kvm_mr_change change) 217 enum kvm_mr_change change)
213{ 218{
214 return 0; 219 return 0;
215} 220}
216 221
217void kvm_arch_commit_memory_region(struct kvm *kvm, 222void kvm_arch_commit_memory_region(struct kvm *kvm,
218 struct kvm_userspace_memory_region *mem, 223 struct kvm_userspace_memory_region *mem,
219 const struct kvm_memory_slot *old, 224 const struct kvm_memory_slot *old,
220 enum kvm_mr_change change) 225 enum kvm_mr_change change)
221{ 226{
222 unsigned long npages = 0; 227 unsigned long npages = 0;
223 int i, err = 0; 228 int i, err = 0;
@@ -246,9 +251,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
246 npages, kvm->arch.guest_pmap); 251 npages, kvm->arch.guest_pmap);
247 252
248 /* Now setup the page table */ 253 /* Now setup the page table */
249 for (i = 0; i < npages; i++) { 254 for (i = 0; i < npages; i++)
250 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; 255 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
251 }
252 } 256 }
253 } 257 }
254out: 258out:
@@ -270,8 +274,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
270 274
271struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 275struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
272{ 276{
273 extern char mips32_exception[], mips32_exceptionEnd[];
274 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
275 int err, size, offset; 277 int err, size, offset;
276 void *gebase; 278 void *gebase;
277 int i; 279 int i;
@@ -290,14 +292,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
290 292
291 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); 293 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
292 294
293 /* Allocate space for host mode exception handlers that handle 295 /*
296 * Allocate space for host mode exception handlers that handle
294 * guest mode exits 297 * guest mode exits
295 */ 298 */
296 if (cpu_has_veic || cpu_has_vint) { 299 if (cpu_has_veic || cpu_has_vint)
297 size = 0x200 + VECTORSPACING * 64; 300 size = 0x200 + VECTORSPACING * 64;
298 } else { 301 else
299 size = 0x4000; 302 size = 0x4000;
300 }
301 303
302 /* Save Linux EBASE */ 304 /* Save Linux EBASE */
303 vcpu->arch.host_ebase = (void *)read_c0_ebase(); 305 vcpu->arch.host_ebase = (void *)read_c0_ebase();
@@ -345,7 +347,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
345 local_flush_icache_range((unsigned long)gebase, 347 local_flush_icache_range((unsigned long)gebase,
346 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 348 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
347 349
348 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ 350 /*
351 * Allocate comm page for guest kernel, a TLB will be reserved for
352 * mapping GVA @ 0xFFFF8000 to this page
353 */
349 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); 354 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
350 355
351 if (!vcpu->arch.kseg0_commpage) { 356 if (!vcpu->arch.kseg0_commpage) {
@@ -391,9 +396,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
391 kvm_arch_vcpu_free(vcpu); 396 kvm_arch_vcpu_free(vcpu);
392} 397}
393 398
394int 399int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
395kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 400 struct kvm_guest_debug *dbg)
396 struct kvm_guest_debug *dbg)
397{ 401{
398 return -ENOIOCTLCMD; 402 return -ENOIOCTLCMD;
399} 403}
@@ -430,8 +434,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
430 return r; 434 return r;
431} 435}
432 436
433int 437int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
434kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) 438 struct kvm_mips_interrupt *irq)
435{ 439{
436 int intr = (int)irq->irq; 440 int intr = (int)irq->irq;
437 struct kvm_vcpu *dvcpu = NULL; 441 struct kvm_vcpu *dvcpu = NULL;
@@ -458,23 +462,20 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
458 462
459 dvcpu->arch.wait = 0; 463 dvcpu->arch.wait = 0;
460 464
461 if (waitqueue_active(&dvcpu->wq)) { 465 if (waitqueue_active(&dvcpu->wq))
462 wake_up_interruptible(&dvcpu->wq); 466 wake_up_interruptible(&dvcpu->wq);
463 }
464 467
465 return 0; 468 return 0;
466} 469}
467 470
468int 471int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
469kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 472 struct kvm_mp_state *mp_state)
470 struct kvm_mp_state *mp_state)
471{ 473{
472 return -ENOIOCTLCMD; 474 return -ENOIOCTLCMD;
473} 475}
474 476
475int 477int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
476kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 478 struct kvm_mp_state *mp_state)
477 struct kvm_mp_state *mp_state)
478{ 479{
479 return -ENOIOCTLCMD; 480 return -ENOIOCTLCMD;
480} 481}
@@ -631,10 +632,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
631 } 632 }
632 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 633 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
633 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 634 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
635
634 return put_user(v, uaddr64); 636 return put_user(v, uaddr64);
635 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 637 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
636 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 638 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
637 u32 v32 = (u32)v; 639 u32 v32 = (u32)v;
640
638 return put_user(v32, uaddr32); 641 return put_user(v32, uaddr32);
639 } else { 642 } else {
640 return -EINVAL; 643 return -EINVAL;
@@ -727,8 +730,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
727 return 0; 730 return 0;
728} 731}
729 732
730long 733long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
731kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 734 unsigned long arg)
732{ 735{
733 struct kvm_vcpu *vcpu = filp->private_data; 736 struct kvm_vcpu *vcpu = filp->private_data;
734 void __user *argp = (void __user *)arg; 737 void __user *argp = (void __user *)arg;
@@ -738,6 +741,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
738 case KVM_SET_ONE_REG: 741 case KVM_SET_ONE_REG:
739 case KVM_GET_ONE_REG: { 742 case KVM_GET_ONE_REG: {
740 struct kvm_one_reg reg; 743 struct kvm_one_reg reg;
744
741 if (copy_from_user(&reg, argp, sizeof(reg))) 745 if (copy_from_user(&reg, argp, sizeof(reg)))
742 return -EFAULT; 746 return -EFAULT;
743 if (ioctl == KVM_SET_ONE_REG) 747 if (ioctl == KVM_SET_ONE_REG)
@@ -772,6 +776,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
772 case KVM_INTERRUPT: 776 case KVM_INTERRUPT:
773 { 777 {
774 struct kvm_mips_interrupt irq; 778 struct kvm_mips_interrupt irq;
779
775 r = -EFAULT; 780 r = -EFAULT;
776 if (copy_from_user(&irq, argp, sizeof(irq))) 781 if (copy_from_user(&irq, argp, sizeof(irq)))
777 goto out; 782 goto out;
@@ -790,9 +795,7 @@ out:
790 return r; 795 return r;
791} 796}
792 797
793/* 798/* Get (and clear) the dirty memory log for a memory slot. */
794 * Get (and clear) the dirty memory log for a memory slot.
795 */
796int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 799int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
797{ 800{
798 struct kvm_memory_slot *memslot; 801 struct kvm_memory_slot *memslot;
@@ -859,14 +862,14 @@ void kvm_arch_exit(void)
859 kvm_mips_callbacks = NULL; 862 kvm_mips_callbacks = NULL;
860} 863}
861 864
862int 865int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
863kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 866 struct kvm_sregs *sregs)
864{ 867{
865 return -ENOIOCTLCMD; 868 return -ENOIOCTLCMD;
866} 869}
867 870
868int 871int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
869kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 872 struct kvm_sregs *sregs)
870{ 873{
871 return -ENOIOCTLCMD; 874 return -ENOIOCTLCMD;
872} 875}
@@ -979,14 +982,11 @@ static void kvm_mips_comparecount_func(unsigned long data)
979 kvm_mips_callbacks->queue_timer_int(vcpu); 982 kvm_mips_callbacks->queue_timer_int(vcpu);
980 983
981 vcpu->arch.wait = 0; 984 vcpu->arch.wait = 0;
982 if (waitqueue_active(&vcpu->wq)) { 985 if (waitqueue_active(&vcpu->wq))
983 wake_up_interruptible(&vcpu->wq); 986 wake_up_interruptible(&vcpu->wq);
984 }
985} 987}
986 988
987/* 989/* low level hrtimer wake routine */
988 * low level hrtimer wake routine.
989 */
990static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) 990static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
991{ 991{
992 struct kvm_vcpu *vcpu; 992 struct kvm_vcpu *vcpu;
@@ -1010,8 +1010,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1010 return; 1010 return;
1011} 1011}
1012 1012
1013int 1013int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1014kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) 1014 struct kvm_translation *tr)
1015{ 1015{
1016 return 0; 1016 return 0;
1017} 1017}
@@ -1022,8 +1022,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1022 return kvm_mips_callbacks->vcpu_setup(vcpu); 1022 return kvm_mips_callbacks->vcpu_setup(vcpu);
1023} 1023}
1024 1024
1025static 1025static void kvm_mips_set_c0_status(void)
1026void kvm_mips_set_c0_status(void)
1027{ 1026{
1028 uint32_t status = read_c0_status(); 1027 uint32_t status = read_c0_status();
1029 1028
@@ -1053,7 +1052,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1053 run->exit_reason = KVM_EXIT_UNKNOWN; 1052 run->exit_reason = KVM_EXIT_UNKNOWN;
1054 run->ready_for_interrupt_injection = 1; 1053 run->ready_for_interrupt_injection = 1;
1055 1054
1056 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ 1055 /*
1056 * Set the appropriate status bits based on host CPU features,
1057 * before we hit the scheduler
1058 */
1057 kvm_mips_set_c0_status(); 1059 kvm_mips_set_c0_status();
1058 1060
1059 local_irq_enable(); 1061 local_irq_enable();
@@ -1061,7 +1063,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1061 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", 1063 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1062 cause, opc, run, vcpu); 1064 cause, opc, run, vcpu);
1063 1065
1064 /* Do a privilege check, if in UM most of these exit conditions end up 1066 /*
1067 * Do a privilege check, if in UM most of these exit conditions end up
1065 * causing an exception to be delivered to the Guest Kernel 1068 * causing an exception to be delivered to the Guest Kernel
1066 */ 1069 */
1067 er = kvm_mips_check_privilege(cause, opc, run, vcpu); 1070 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
@@ -1080,9 +1083,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1080 ++vcpu->stat.int_exits; 1083 ++vcpu->stat.int_exits;
1081 trace_kvm_exit(vcpu, INT_EXITS); 1084 trace_kvm_exit(vcpu, INT_EXITS);
1082 1085
1083 if (need_resched()) { 1086 if (need_resched())
1084 cond_resched(); 1087 cond_resched();
1085 }
1086 1088
1087 ret = RESUME_GUEST; 1089 ret = RESUME_GUEST;
1088 break; 1090 break;
@@ -1094,9 +1096,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1094 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); 1096 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1095 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); 1097 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1096 /* XXXKYMA: Might need to return to user space */ 1098 /* XXXKYMA: Might need to return to user space */
1097 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { 1099 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1098 ret = RESUME_HOST; 1100 ret = RESUME_HOST;
1099 }
1100 break; 1101 break;
1101 1102
1102 case T_TLB_MOD: 1103 case T_TLB_MOD:
@@ -1106,10 +1107,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1106 break; 1107 break;
1107 1108
1108 case T_TLB_ST_MISS: 1109 case T_TLB_ST_MISS:
1109 kvm_debug 1110 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1110 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", 1111 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1111 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, 1112 badvaddr);
1112 badvaddr);
1113 1113
1114 ++vcpu->stat.tlbmiss_st_exits; 1114 ++vcpu->stat.tlbmiss_st_exits;
1115 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); 1115 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
@@ -1156,10 +1156,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1156 break; 1156 break;
1157 1157
1158 default: 1158 default:
1159 kvm_err 1159 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1160 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", 1160 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1161 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, 1161 kvm_read_c0_guest_status(vcpu->arch.cop0));
1162 kvm_read_c0_guest_status(vcpu->arch.cop0));
1163 kvm_arch_vcpu_dump_regs(vcpu); 1162 kvm_arch_vcpu_dump_regs(vcpu);
1164 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1163 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1165 ret = RESUME_HOST; 1164 ret = RESUME_HOST;
@@ -1174,7 +1173,7 @@ skip_emul:
1174 kvm_mips_deliver_interrupts(vcpu, cause); 1173 kvm_mips_deliver_interrupts(vcpu, cause);
1175 1174
1176 if (!(ret & RESUME_HOST)) { 1175 if (!(ret & RESUME_HOST)) {
1177 /* Only check for signals if not already exiting to userspace */ 1176 /* Only check for signals if not already exiting to userspace */
1178 if (signal_pending(current)) { 1177 if (signal_pending(current)) {
1179 run->exit_reason = KVM_EXIT_INTR; 1178 run->exit_reason = KVM_EXIT_INTR;
1180 ret = (-EINTR << 2) | RESUME_HOST; 1179 ret = (-EINTR << 2) | RESUME_HOST;
@@ -1195,11 +1194,13 @@ int __init kvm_mips_init(void)
1195 if (ret) 1194 if (ret)
1196 return ret; 1195 return ret;
1197 1196
1198 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. 1197 /*
1199 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) 1198 * On MIPS, kernel modules are executed from "mapped space", which
1200 * to avoid the possibility of double faulting. The issue is that the TLB code 1199 * requires TLBs. The TLB handling code is statically linked with
1201 * references routines that are part of the the KVM module, 1200 * the rest of the kernel (kvm_tlb.c) to avoid the possibility of
1202 * which are only available once the module is loaded. 1201 * double faulting. The issue is that the TLB code references
1202 * routines that are part of the the KVM module, which are only
1203 * available once the module is loaded.
1203 */ 1204 */
1204 kvm_mips_gfn_to_pfn = gfn_to_pfn; 1205 kvm_mips_gfn_to_pfn = gfn_to_pfn;
1205 kvm_mips_release_pfn_clean = kvm_release_pfn_clean; 1206 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
index a4a8c85cc8f7..08c5fa2bbc0f 100644
--- a/arch/mips/kvm/kvm_mips_comm.h
+++ b/arch/mips/kvm/kvm_mips_comm.h
@@ -1,19 +1,20 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: commpage: mapped into get kernel space 6 * KVM/MIPS: commpage: mapped into get kernel space
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#ifndef __KVM_MIPS_COMMPAGE_H__ 12#ifndef __KVM_MIPS_COMMPAGE_H__
13#define __KVM_MIPS_COMMPAGE_H__ 13#define __KVM_MIPS_COMMPAGE_H__
14 14
15struct kvm_mips_commpage { 15struct kvm_mips_commpage {
16 struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */ 16 /* COP0 state is mapped into Guest kernel via commpage */
17 struct mips_coproc cop0;
17}; 18};
18 19
19#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 20#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
index 3873b1ecc40f..ab7096ec0666 100644
--- a/arch/mips/kvm/kvm_mips_commpage.c
+++ b/arch/mips/kvm/kvm_mips_commpage.c
@@ -1,14 +1,14 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* commpage, currently used for Virtual COP0 registers. 6 * commpage, currently used for Virtual COP0 registers.
7* Mapped into the guest kernel @ 0x0. 7 * Mapped into the guest kernel @ 0x0.
8* 8 *
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com> 10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/ 11 */
12 12
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/err.h> 14#include <linux/err.h>
@@ -27,6 +27,7 @@
27void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) 27void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
28{ 28{
29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; 29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
30
30 memset(page, 0, sizeof(struct kvm_mips_commpage)); 31 memset(page, 0, sizeof(struct kvm_mips_commpage));
31 32
32 /* Specific init values for fields */ 33 /* Specific init values for fields */
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
index b80e41d858fd..fa7184df5450 100644
--- a/arch/mips/kvm/kvm_mips_dyntrans.c
+++ b/arch/mips/kvm/kvm_mips_dyntrans.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Binary Patching for privileged instructions, reduces traps. 6 * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -28,9 +28,8 @@
28#define CLEAR_TEMPLATE 0x00000020 28#define CLEAR_TEMPLATE 0x00000020
29#define SW_TEMPLATE 0xac000000 29#define SW_TEMPLATE 0xac000000
30 30
31int 31int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
32kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, 32 struct kvm_vcpu *vcpu)
33 struct kvm_vcpu *vcpu)
34{ 33{
35 int result = 0; 34 int result = 0;
36 unsigned long kseg0_opc; 35 unsigned long kseg0_opc;
@@ -47,12 +46,11 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
47} 46}
48 47
49/* 48/*
50 * Address based CACHE instructions are transformed into synci(s). A little heavy 49 * Address based CACHE instructions are transformed into synci(s). A little
51 * for just D-cache invalidates, but avoids an expensive trap 50 * heavy for just D-cache invalidates, but avoids an expensive trap
52 */ 51 */
53int 52int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
54kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, 53 struct kvm_vcpu *vcpu)
55 struct kvm_vcpu *vcpu)
56{ 54{
57 int result = 0; 55 int result = 0;
58 unsigned long kseg0_opc; 56 unsigned long kseg0_opc;
@@ -72,8 +70,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
72 return result; 70 return result;
73} 71}
74 72
75int 73int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
76kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
77{ 74{
78 int32_t rt, rd, sel; 75 int32_t rt, rd, sel;
79 uint32_t mfc0_inst; 76 uint32_t mfc0_inst;
@@ -115,8 +112,7 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
115 return 0; 112 return 0;
116} 113}
117 114
118int 115int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
119kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
120{ 116{
121 int32_t rt, rd, sel; 117 int32_t rt, rd, sel;
122 uint32_t mtc0_inst = SW_TEMPLATE; 118 uint32_t mtc0_inst = SW_TEMPLATE;
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index 8d4840090082..9ec9f1d54b9b 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Instruction/Exception emulation 6 * KVM/MIPS: Instruction/Exception emulation
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -51,18 +51,14 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
51 if (epc & 3) 51 if (epc & 3)
52 goto unaligned; 52 goto unaligned;
53 53
54 /* 54 /* Read the instruction */
55 * Read the instruction
56 */
57 insn.word = kvm_get_inst((uint32_t *) epc, vcpu); 55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
58 56
59 if (insn.word == KVM_INVALID_INST) 57 if (insn.word == KVM_INVALID_INST)
60 return KVM_INVALID_INST; 58 return KVM_INVALID_INST;
61 59
62 switch (insn.i_format.opcode) { 60 switch (insn.i_format.opcode) {
63 /* 61 /* jr and jalr are in r_format format. */
64 * jr and jalr are in r_format format.
65 */
66 case spec_op: 62 case spec_op:
67 switch (insn.r_format.func) { 63 switch (insn.r_format.func) {
68 case jalr_op: 64 case jalr_op:
@@ -124,18 +120,16 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
124 120
125 dspcontrol = rddsp(0x01); 121 dspcontrol = rddsp(0x01);
126 122
127 if (dspcontrol >= 32) { 123 if (dspcontrol >= 32)
128 epc = epc + 4 + (insn.i_format.simmediate << 2); 124 epc = epc + 4 + (insn.i_format.simmediate << 2);
129 } else 125 else
130 epc += 8; 126 epc += 8;
131 nextpc = epc; 127 nextpc = epc;
132 break; 128 break;
133 } 129 }
134 break; 130 break;
135 131
136 /* 132 /* These are unconditional and in j_format. */
137 * These are unconditional and in j_format.
138 */
139 case jal_op: 133 case jal_op:
140 arch->gprs[31] = instpc + 8; 134 arch->gprs[31] = instpc + 8;
141 case j_op: 135 case j_op:
@@ -146,9 +140,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
146 nextpc = epc; 140 nextpc = epc;
147 break; 141 break;
148 142
149 /* 143 /* These are conditional and in i_format. */
150 * These are conditional and in i_format.
151 */
152 case beq_op: 144 case beq_op:
153 case beql_op: 145 case beql_op:
154 if (arch->gprs[insn.i_format.rs] == 146 if (arch->gprs[insn.i_format.rs] ==
@@ -189,9 +181,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
189 nextpc = epc; 181 nextpc = epc;
190 break; 182 break;
191 183
192 /* 184 /* And now the FPA/cp1 branch instructions. */
193 * And now the FPA/cp1 branch instructions.
194 */
195 case cop1_op: 185 case cop1_op:
196 printk("%s: unsupported cop1_op\n", __func__); 186 printk("%s: unsupported cop1_op\n", __func__);
197 break; 187 break;
@@ -219,7 +209,8 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
219 er = EMULATE_FAIL; 209 er = EMULATE_FAIL;
220 } else { 210 } else {
221 vcpu->arch.pc = branch_pc; 211 vcpu->arch.pc = branch_pc;
222 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); 212 kvm_debug("BD update_pc(): New PC: %#lx\n",
213 vcpu->arch.pc);
223 } 214 }
224 } else 215 } else
225 vcpu->arch.pc += 4; 216 vcpu->arch.pc += 4;
@@ -240,6 +231,7 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
240static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) 231static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
241{ 232{
242 struct mips_coproc *cop0 = vcpu->arch.cop0; 233 struct mips_coproc *cop0 = vcpu->arch.cop0;
234
243 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || 235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
244 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); 236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
245} 237}
@@ -392,7 +384,6 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
392 return now; 384 return now;
393} 385}
394 386
395
396/** 387/**
397 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. 388 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
398 * @vcpu: Virtual CPU. 389 * @vcpu: Virtual CPU.
@@ -781,8 +772,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
781 vcpu->arch.wait = 1; 772 vcpu->arch.wait = 1;
782 kvm_vcpu_block(vcpu); 773 kvm_vcpu_block(vcpu);
783 774
784 /* We we are runnable, then definitely go off to user space to check if any 775 /*
785 * I/O interrupts are pending. 776 * We we are runnable, then definitely go off to user space to
777 * check if any I/O interrupts are pending.
786 */ 778 */
787 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { 779 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
788 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 780 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
@@ -793,8 +785,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
793 return er; 785 return er;
794} 786}
795 787
796/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch 788/*
797 * this, if things ever change 789 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
790 * we can catch this, if things ever change
798 */ 791 */
799enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) 792enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
800{ 793{
@@ -827,21 +820,22 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
827 } 820 }
828 821
829 tlb = &vcpu->arch.guest_tlb[index]; 822 tlb = &vcpu->arch.guest_tlb[index];
830#if 1 823 /*
831 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ 824 * Probe the shadow host TLB for the entry being overwritten, if one
825 * matches, invalidate it
826 */
832 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); 827 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
833#endif
834 828
835 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 829 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
836 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 830 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
837 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); 831 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
838 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); 832 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
839 833
840 kvm_debug 834 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
841 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 835 pc, index, kvm_read_c0_guest_entryhi(cop0),
842 pc, index, kvm_read_c0_guest_entryhi(cop0), 836 kvm_read_c0_guest_entrylo0(cop0),
843 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), 837 kvm_read_c0_guest_entrylo1(cop0),
844 kvm_read_c0_guest_pagemask(cop0)); 838 kvm_read_c0_guest_pagemask(cop0));
845 839
846 return er; 840 return er;
847} 841}
@@ -855,12 +849,8 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
855 uint32_t pc = vcpu->arch.pc; 849 uint32_t pc = vcpu->arch.pc;
856 int index; 850 int index;
857 851
858#if 1
859 get_random_bytes(&index, sizeof(index)); 852 get_random_bytes(&index, sizeof(index));
860 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); 853 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
861#else
862 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
863#endif
864 854
865 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 855 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
866 printk("%s: illegal index: %d\n", __func__, index); 856 printk("%s: illegal index: %d\n", __func__, index);
@@ -869,21 +859,21 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
869 859
870 tlb = &vcpu->arch.guest_tlb[index]; 860 tlb = &vcpu->arch.guest_tlb[index];
871 861
872#if 1 862 /*
873 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ 863 * Probe the shadow host TLB for the entry being overwritten, if one
864 * matches, invalidate it
865 */
874 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); 866 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
875#endif
876 867
877 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 868 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
878 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 869 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
879 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); 870 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
880 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); 871 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
881 872
882 kvm_debug 873 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
883 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", 874 pc, index, kvm_read_c0_guest_entryhi(cop0),
884 pc, index, kvm_read_c0_guest_entryhi(cop0), 875 kvm_read_c0_guest_entrylo0(cop0),
885 kvm_read_c0_guest_entrylo0(cop0), 876 kvm_read_c0_guest_entrylo1(cop0));
886 kvm_read_c0_guest_entrylo1(cop0));
887 877
888 return er; 878 return er;
889} 879}
@@ -906,9 +896,9 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
906 return er; 896 return er;
907} 897}
908 898
909enum emulation_result 899enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
910kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, 900 uint32_t cause, struct kvm_run *run,
911 struct kvm_run *run, struct kvm_vcpu *vcpu) 901 struct kvm_vcpu *vcpu)
912{ 902{
913 struct mips_coproc *cop0 = vcpu->arch.cop0; 903 struct mips_coproc *cop0 = vcpu->arch.cop0;
914 enum emulation_result er = EMULATE_DONE; 904 enum emulation_result er = EMULATE_DONE;
@@ -922,9 +912,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
922 */ 912 */
923 curr_pc = vcpu->arch.pc; 913 curr_pc = vcpu->arch.pc;
924 er = update_pc(vcpu, cause); 914 er = update_pc(vcpu, cause);
925 if (er == EMULATE_FAIL) { 915 if (er == EMULATE_FAIL)
926 return er; 916 return er;
927 }
928 917
929 copz = (inst >> 21) & 0x1f; 918 copz = (inst >> 21) & 0x1f;
930 rt = (inst >> 16) & 0x1f; 919 rt = (inst >> 16) & 0x1f;
@@ -973,8 +962,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
973#ifdef CONFIG_KVM_MIPS_DYN_TRANS 962#ifdef CONFIG_KVM_MIPS_DYN_TRANS
974 kvm_mips_trans_mfc0(inst, opc, vcpu); 963 kvm_mips_trans_mfc0(inst, opc, vcpu);
975#endif 964#endif
976 } 965 } else {
977 else {
978 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; 966 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
979 967
980#ifdef CONFIG_KVM_MIPS_DYN_TRANS 968#ifdef CONFIG_KVM_MIPS_DYN_TRANS
@@ -1014,17 +1002,15 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1014 kvm_read_c0_guest_ebase(cop0)); 1002 kvm_read_c0_guest_ebase(cop0));
1015 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 1003 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1016 uint32_t nasid = 1004 uint32_t nasid =
1017 vcpu->arch.gprs[rt] & ASID_MASK; 1005 vcpu->arch.gprs[rt] & ASID_MASK;
1018 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) 1006 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1019 &&
1020 ((kvm_read_c0_guest_entryhi(cop0) & 1007 ((kvm_read_c0_guest_entryhi(cop0) &
1021 ASID_MASK) != nasid)) { 1008 ASID_MASK) != nasid)) {
1022 1009 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1023 kvm_debug 1010 kvm_read_c0_guest_entryhi(cop0)
1024 ("MTCz, change ASID from %#lx to %#lx\n", 1011 & ASID_MASK,
1025 kvm_read_c0_guest_entryhi(cop0) & 1012 vcpu->arch.gprs[rt]
1026 ASID_MASK, 1013 & ASID_MASK);
1027 vcpu->arch.gprs[rt] & ASID_MASK);
1028 1014
1029 /* Blow away the shadow host TLBs */ 1015 /* Blow away the shadow host TLBs */
1030 kvm_mips_flush_host_tlb(1); 1016 kvm_mips_flush_host_tlb(1);
@@ -1049,7 +1035,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1049 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1035 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1050 kvm_write_c0_guest_status(cop0, 1036 kvm_write_c0_guest_status(cop0,
1051 vcpu->arch.gprs[rt]); 1037 vcpu->arch.gprs[rt]);
1052 /* Make sure that CU1 and NMI bits are never set */ 1038 /*
1039 * Make sure that CU1 and NMI bits are
1040 * never set
1041 */
1053 kvm_clear_c0_guest_status(cop0, 1042 kvm_clear_c0_guest_status(cop0,
1054 (ST0_CU1 | ST0_NMI)); 1043 (ST0_CU1 | ST0_NMI));
1055 1044
@@ -1058,6 +1047,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1058#endif 1047#endif
1059 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1048 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1060 uint32_t old_cause, new_cause; 1049 uint32_t old_cause, new_cause;
1050
1061 old_cause = kvm_read_c0_guest_cause(cop0); 1051 old_cause = kvm_read_c0_guest_cause(cop0);
1062 new_cause = vcpu->arch.gprs[rt]; 1052 new_cause = vcpu->arch.gprs[rt];
1063 /* Update R/W bits */ 1053 /* Update R/W bits */
@@ -1115,7 +1105,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1115 cop0->reg[MIPS_CP0_STATUS][2] & 0xf; 1105 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1116 uint32_t pss = 1106 uint32_t pss =
1117 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; 1107 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1118 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ 1108 /*
1109 * We don't support any shadow register sets, so
1110 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1111 */
1119 if (css || pss) { 1112 if (css || pss) {
1120 er = EMULATE_FAIL; 1113 er = EMULATE_FAIL;
1121 break; 1114 break;
@@ -1135,12 +1128,9 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1135 } 1128 }
1136 1129
1137done: 1130done:
1138 /* 1131 /* Rollback PC only if emulation was unsuccessful */
1139 * Rollback PC only if emulation was unsuccessful 1132 if (er == EMULATE_FAIL)
1140 */
1141 if (er == EMULATE_FAIL) {
1142 vcpu->arch.pc = curr_pc; 1133 vcpu->arch.pc = curr_pc;
1143 }
1144 1134
1145dont_update_pc: 1135dont_update_pc:
1146 /* 1136 /*
@@ -1152,9 +1142,9 @@ dont_update_pc:
1152 return er; 1142 return er;
1153} 1143}
1154 1144
1155enum emulation_result 1145enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1156kvm_mips_emulate_store(uint32_t inst, uint32_t cause, 1146 struct kvm_run *run,
1157 struct kvm_run *run, struct kvm_vcpu *vcpu) 1147 struct kvm_vcpu *vcpu)
1158{ 1148{
1159 enum emulation_result er = EMULATE_DO_MMIO; 1149 enum emulation_result er = EMULATE_DO_MMIO;
1160 int32_t op, base, rt, offset; 1150 int32_t op, base, rt, offset;
@@ -1257,19 +1247,16 @@ kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1257 break; 1247 break;
1258 } 1248 }
1259 1249
1260 /* 1250 /* Rollback PC if emulation was unsuccessful */
1261 * Rollback PC if emulation was unsuccessful 1251 if (er == EMULATE_FAIL)
1262 */
1263 if (er == EMULATE_FAIL) {
1264 vcpu->arch.pc = curr_pc; 1252 vcpu->arch.pc = curr_pc;
1265 }
1266 1253
1267 return er; 1254 return er;
1268} 1255}
1269 1256
1270enum emulation_result 1257enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1271kvm_mips_emulate_load(uint32_t inst, uint32_t cause, 1258 struct kvm_run *run,
1272 struct kvm_run *run, struct kvm_vcpu *vcpu) 1259 struct kvm_vcpu *vcpu)
1273{ 1260{
1274 enum emulation_result er = EMULATE_DO_MMIO; 1261 enum emulation_result er = EMULATE_DO_MMIO;
1275 int32_t op, base, rt, offset; 1262 int32_t op, base, rt, offset;
@@ -1410,13 +1397,12 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1410#define MIPS_CACHE_DCACHE 0x1 1397#define MIPS_CACHE_DCACHE 0x1
1411#define MIPS_CACHE_SEC 0x3 1398#define MIPS_CACHE_SEC 0x3
1412 1399
1413enum emulation_result 1400enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1414kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, 1401 uint32_t cause,
1415 struct kvm_run *run, struct kvm_vcpu *vcpu) 1402 struct kvm_run *run,
1403 struct kvm_vcpu *vcpu)
1416{ 1404{
1417 struct mips_coproc *cop0 = vcpu->arch.cop0; 1405 struct mips_coproc *cop0 = vcpu->arch.cop0;
1418 extern void (*r4k_blast_dcache) (void);
1419 extern void (*r4k_blast_icache) (void);
1420 enum emulation_result er = EMULATE_DONE; 1406 enum emulation_result er = EMULATE_DONE;
1421 int32_t offset, cache, op_inst, op, base; 1407 int32_t offset, cache, op_inst, op, base;
1422 struct kvm_vcpu_arch *arch = &vcpu->arch; 1408 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1443,14 +1429,15 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1443 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1429 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1444 cache, op, base, arch->gprs[base], offset); 1430 cache, op, base, arch->gprs[base], offset);
1445 1431
1446 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate 1432 /*
1447 * the caches entirely by stepping through all the ways/indexes 1433 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1434 * invalidate the caches entirely by stepping through all the
1435 * ways/indexes
1448 */ 1436 */
1449 if (op == MIPS_CACHE_OP_INDEX_INV) { 1437 if (op == MIPS_CACHE_OP_INDEX_INV) {
1450 kvm_debug 1438 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1451 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1439 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1452 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, 1440 arch->gprs[base], offset);
1453 arch->gprs[base], offset);
1454 1441
1455 if (cache == MIPS_CACHE_DCACHE) 1442 if (cache == MIPS_CACHE_DCACHE)
1456 r4k_blast_dcache(); 1443 r4k_blast_dcache();
@@ -1470,21 +1457,19 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1470 1457
1471 preempt_disable(); 1458 preempt_disable();
1472 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { 1459 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1473 1460 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
1474 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
1475 kvm_mips_handle_kseg0_tlb_fault(va, vcpu); 1461 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1476 }
1477 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || 1462 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1478 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { 1463 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1479 int index; 1464 int index;
1480 1465
1481 /* If an entry already exists then skip */ 1466 /* If an entry already exists then skip */
1482 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { 1467 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1483 goto skip_fault; 1468 goto skip_fault;
1484 }
1485 1469
1486 /* If address not in the guest TLB, then give the guest a fault, the 1470 /*
1487 * resulting handler will do the right thing 1471 * If address not in the guest TLB, then give the guest a fault,
1472 * the resulting handler will do the right thing
1488 */ 1473 */
1489 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | 1474 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1490 (kvm_read_c0_guest_entryhi 1475 (kvm_read_c0_guest_entryhi
@@ -1499,14 +1484,20 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1499 goto dont_update_pc; 1484 goto dont_update_pc;
1500 } else { 1485 } else {
1501 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 1486 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1502 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ 1487 /*
1488 * Check if the entry is valid, if not then setup a TLB
1489 * invalid exception to the guest
1490 */
1503 if (!TLB_IS_VALID(*tlb, va)) { 1491 if (!TLB_IS_VALID(*tlb, va)) {
1504 er = kvm_mips_emulate_tlbinv_ld(cause, NULL, 1492 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1505 run, vcpu); 1493 run, vcpu);
1506 preempt_enable(); 1494 preempt_enable();
1507 goto dont_update_pc; 1495 goto dont_update_pc;
1508 } else { 1496 } else {
1509 /* We fault an entry from the guest tlb to the shadow host TLB */ 1497 /*
1498 * We fault an entry from the guest tlb to the
1499 * shadow host TLB
1500 */
1510 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, 1501 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1511 NULL, 1502 NULL,
1512 NULL); 1503 NULL);
@@ -1530,7 +1521,10 @@ skip_fault:
1530 flush_dcache_line(va); 1521 flush_dcache_line(va);
1531 1522
1532#ifdef CONFIG_KVM_MIPS_DYN_TRANS 1523#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1533 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ 1524 /*
1525 * Replace the CACHE instruction, with a SYNCI, not the same,
1526 * but avoids a trap
1527 */
1534 kvm_mips_trans_cache_va(inst, opc, vcpu); 1528 kvm_mips_trans_cache_va(inst, opc, vcpu);
1535#endif 1529#endif
1536 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { 1530 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
@@ -1552,28 +1546,23 @@ skip_fault:
1552 1546
1553 preempt_enable(); 1547 preempt_enable();
1554 1548
1555 dont_update_pc: 1549dont_update_pc:
1556 /* 1550 /* Rollback PC */
1557 * Rollback PC
1558 */
1559 vcpu->arch.pc = curr_pc; 1551 vcpu->arch.pc = curr_pc;
1560 done: 1552done:
1561 return er; 1553 return er;
1562} 1554}
1563 1555
1564enum emulation_result 1556enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1565kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, 1557 struct kvm_run *run,
1566 struct kvm_run *run, struct kvm_vcpu *vcpu) 1558 struct kvm_vcpu *vcpu)
1567{ 1559{
1568 enum emulation_result er = EMULATE_DONE; 1560 enum emulation_result er = EMULATE_DONE;
1569 uint32_t inst; 1561 uint32_t inst;
1570 1562
1571 /* 1563 /* Fetch the instruction. */
1572 * Fetch the instruction. 1564 if (cause & CAUSEF_BD)
1573 */
1574 if (cause & CAUSEF_BD) {
1575 opc += 1; 1565 opc += 1;
1576 }
1577 1566
1578 inst = kvm_get_inst(opc, vcpu); 1567 inst = kvm_get_inst(opc, vcpu);
1579 1568
@@ -1611,9 +1600,10 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1611 return er; 1600 return er;
1612} 1601}
1613 1602
1614enum emulation_result 1603enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1615kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, 1604 uint32_t *opc,
1616 struct kvm_run *run, struct kvm_vcpu *vcpu) 1605 struct kvm_run *run,
1606 struct kvm_vcpu *vcpu)
1617{ 1607{
1618 struct mips_coproc *cop0 = vcpu->arch.cop0; 1608 struct mips_coproc *cop0 = vcpu->arch.cop0;
1619 struct kvm_vcpu_arch *arch = &vcpu->arch; 1609 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1645,9 +1635,10 @@ kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1645 return er; 1635 return er;
1646} 1636}
1647 1637
1648enum emulation_result 1638enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1649kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, 1639 uint32_t *opc,
1650 struct kvm_run *run, struct kvm_vcpu *vcpu) 1640 struct kvm_run *run,
1641 struct kvm_vcpu *vcpu)
1651{ 1642{
1652 struct mips_coproc *cop0 = vcpu->arch.cop0; 1643 struct mips_coproc *cop0 = vcpu->arch.cop0;
1653 struct kvm_vcpu_arch *arch = &vcpu->arch; 1644 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1691,9 +1682,10 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1691 return er; 1682 return er;
1692} 1683}
1693 1684
1694enum emulation_result 1685enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1695kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, 1686 uint32_t *opc,
1696 struct kvm_run *run, struct kvm_vcpu *vcpu) 1687 struct kvm_run *run,
1688 struct kvm_vcpu *vcpu)
1697{ 1689{
1698 struct mips_coproc *cop0 = vcpu->arch.cop0; 1690 struct mips_coproc *cop0 = vcpu->arch.cop0;
1699 struct kvm_vcpu_arch *arch = &vcpu->arch; 1691 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1737,9 +1729,10 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1737 return er; 1729 return er;
1738} 1730}
1739 1731
1740enum emulation_result 1732enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1741kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, 1733 uint32_t *opc,
1742 struct kvm_run *run, struct kvm_vcpu *vcpu) 1734 struct kvm_run *run,
1735 struct kvm_vcpu *vcpu)
1743{ 1736{
1744 struct mips_coproc *cop0 = vcpu->arch.cop0; 1737 struct mips_coproc *cop0 = vcpu->arch.cop0;
1745 struct kvm_vcpu_arch *arch = &vcpu->arch; 1738 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1781,9 +1774,10 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1781 return er; 1774 return er;
1782} 1775}
1783 1776
1784enum emulation_result 1777enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1785kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, 1778 uint32_t *opc,
1786 struct kvm_run *run, struct kvm_vcpu *vcpu) 1779 struct kvm_run *run,
1780 struct kvm_vcpu *vcpu)
1787{ 1781{
1788 struct mips_coproc *cop0 = vcpu->arch.cop0; 1782 struct mips_coproc *cop0 = vcpu->arch.cop0;
1789 struct kvm_vcpu_arch *arch = &vcpu->arch; 1783 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1826,9 +1820,9 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1826} 1820}
1827 1821
1828/* TLBMOD: store into address matching TLB with Dirty bit off */ 1822/* TLBMOD: store into address matching TLB with Dirty bit off */
1829enum emulation_result 1823enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1830kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, 1824 struct kvm_run *run,
1831 struct kvm_run *run, struct kvm_vcpu *vcpu) 1825 struct kvm_vcpu *vcpu)
1832{ 1826{
1833 enum emulation_result er = EMULATE_DONE; 1827 enum emulation_result er = EMULATE_DONE;
1834#ifdef DEBUG 1828#ifdef DEBUG
@@ -1837,9 +1831,7 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1837 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1831 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1838 int index; 1832 int index;
1839 1833
1840 /* 1834 /* If address not in the guest TLB, then we are in trouble */
1841 * If address not in the guest TLB, then we are in trouble
1842 */
1843 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 1835 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1844 if (index < 0) { 1836 if (index < 0) {
1845 /* XXXKYMA Invalidate and retry */ 1837 /* XXXKYMA Invalidate and retry */
@@ -1856,9 +1848,10 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1856 return er; 1848 return er;
1857} 1849}
1858 1850
1859enum emulation_result 1851enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
1860kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, 1852 uint32_t *opc,
1861 struct kvm_run *run, struct kvm_vcpu *vcpu) 1853 struct kvm_run *run,
1854 struct kvm_vcpu *vcpu)
1862{ 1855{
1863 struct mips_coproc *cop0 = vcpu->arch.cop0; 1856 struct mips_coproc *cop0 = vcpu->arch.cop0;
1864 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1857 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
@@ -1898,9 +1891,10 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1898 return er; 1891 return er;
1899} 1892}
1900 1893
1901enum emulation_result 1894enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
1902kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, 1895 uint32_t *opc,
1903 struct kvm_run *run, struct kvm_vcpu *vcpu) 1896 struct kvm_run *run,
1897 struct kvm_vcpu *vcpu)
1904{ 1898{
1905 struct mips_coproc *cop0 = vcpu->arch.cop0; 1899 struct mips_coproc *cop0 = vcpu->arch.cop0;
1906 struct kvm_vcpu_arch *arch = &vcpu->arch; 1900 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1927,9 +1921,10 @@ kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1927 return er; 1921 return er;
1928} 1922}
1929 1923
1930enum emulation_result 1924enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
1931kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, 1925 uint32_t *opc,
1932 struct kvm_run *run, struct kvm_vcpu *vcpu) 1926 struct kvm_run *run,
1927 struct kvm_vcpu *vcpu)
1933{ 1928{
1934 struct mips_coproc *cop0 = vcpu->arch.cop0; 1929 struct mips_coproc *cop0 = vcpu->arch.cop0;
1935 struct kvm_vcpu_arch *arch = &vcpu->arch; 1930 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1961,9 +1956,10 @@ kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1961 return er; 1956 return er;
1962} 1957}
1963 1958
1964enum emulation_result 1959enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
1965kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, 1960 uint32_t *opc,
1966 struct kvm_run *run, struct kvm_vcpu *vcpu) 1961 struct kvm_run *run,
1962 struct kvm_vcpu *vcpu)
1967{ 1963{
1968 struct mips_coproc *cop0 = vcpu->arch.cop0; 1964 struct mips_coproc *cop0 = vcpu->arch.cop0;
1969 struct kvm_vcpu_arch *arch = &vcpu->arch; 1965 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1995,9 +1991,7 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1995 return er; 1991 return er;
1996} 1992}
1997 1993
1998/* 1994/* ll/sc, rdhwr, sync emulation */
1999 * ll/sc, rdhwr, sync emulation
2000 */
2001 1995
2002#define OPCODE 0xfc000000 1996#define OPCODE 0xfc000000
2003#define BASE 0x03e00000 1997#define BASE 0x03e00000
@@ -2012,9 +2006,9 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
2012#define SYNC 0x0000000f 2006#define SYNC 0x0000000f
2013#define RDHWR 0x0000003b 2007#define RDHWR 0x0000003b
2014 2008
2015enum emulation_result 2009enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2016kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, 2010 struct kvm_run *run,
2017 struct kvm_run *run, struct kvm_vcpu *vcpu) 2011 struct kvm_vcpu *vcpu)
2018{ 2012{
2019 struct mips_coproc *cop0 = vcpu->arch.cop0; 2013 struct mips_coproc *cop0 = vcpu->arch.cop0;
2020 struct kvm_vcpu_arch *arch = &vcpu->arch; 2014 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -2031,9 +2025,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2031 if (er == EMULATE_FAIL) 2025 if (er == EMULATE_FAIL)
2032 return er; 2026 return er;
2033 2027
2034 /* 2028 /* Fetch the instruction. */
2035 * Fetch the instruction.
2036 */
2037 if (cause & CAUSEF_BD) 2029 if (cause & CAUSEF_BD)
2038 opc += 1; 2030 opc += 1;
2039 2031
@@ -2099,8 +2091,8 @@ emulate_ri:
2099 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); 2091 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2100} 2092}
2101 2093
2102enum emulation_result 2094enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2103kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) 2095 struct kvm_run *run)
2104{ 2096{
2105 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; 2097 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2106 enum emulation_result er = EMULATE_DONE; 2098 enum emulation_result er = EMULATE_DONE;
@@ -2142,18 +2134,18 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
2142 } 2134 }
2143 2135
2144 if (vcpu->arch.pending_load_cause & CAUSEF_BD) 2136 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2145 kvm_debug 2137 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2146 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", 2138 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2147 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, 2139 vcpu->mmio_needed);
2148 vcpu->mmio_needed);
2149 2140
2150done: 2141done:
2151 return er; 2142 return er;
2152} 2143}
2153 2144
2154static enum emulation_result 2145static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2155kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, 2146 uint32_t *opc,
2156 struct kvm_run *run, struct kvm_vcpu *vcpu) 2147 struct kvm_run *run,
2148 struct kvm_vcpu *vcpu)
2157{ 2149{
2158 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2150 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2159 struct mips_coproc *cop0 = vcpu->arch.cop0; 2151 struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2188,9 +2180,10 @@ kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
2188 return er; 2180 return er;
2189} 2181}
2190 2182
2191enum emulation_result 2183enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2192kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, 2184 uint32_t *opc,
2193 struct kvm_run *run, struct kvm_vcpu *vcpu) 2185 struct kvm_run *run,
2186 struct kvm_vcpu *vcpu)
2194{ 2187{
2195 enum emulation_result er = EMULATE_DONE; 2188 enum emulation_result er = EMULATE_DONE;
2196 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2189 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@@ -2215,7 +2208,10 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2215 break; 2208 break;
2216 2209
2217 case T_TLB_LD_MISS: 2210 case T_TLB_LD_MISS:
2218 /* We we are accessing Guest kernel space, then send an address error exception to the guest */ 2211 /*
2212 * We we are accessing Guest kernel space, then send an
2213 * address error exception to the guest
2214 */
2219 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2215 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2220 printk("%s: LD MISS @ %#lx\n", __func__, 2216 printk("%s: LD MISS @ %#lx\n", __func__,
2221 badvaddr); 2217 badvaddr);
@@ -2226,7 +2222,10 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2226 break; 2222 break;
2227 2223
2228 case T_TLB_ST_MISS: 2224 case T_TLB_ST_MISS:
2229 /* We we are accessing Guest kernel space, then send an address error exception to the guest */ 2225 /*
2226 * We we are accessing Guest kernel space, then send an
2227 * address error exception to the guest
2228 */
2230 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2229 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2231 printk("%s: ST MISS @ %#lx\n", __func__, 2230 printk("%s: ST MISS @ %#lx\n", __func__,
2232 badvaddr); 2231 badvaddr);
@@ -2260,21 +2259,23 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2260 } 2259 }
2261 } 2260 }
2262 2261
2263 if (er == EMULATE_PRIV_FAIL) { 2262 if (er == EMULATE_PRIV_FAIL)
2264 kvm_mips_emulate_exc(cause, opc, run, vcpu); 2263 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2265 } 2264
2266 return er; 2265 return er;
2267} 2266}
2268 2267
2269/* User Address (UA) fault, this could happen if 2268/*
2269 * User Address (UA) fault, this could happen if
2270 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 2270 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2271 * case we pass on the fault to the guest kernel and let it handle it. 2271 * case we pass on the fault to the guest kernel and let it handle it.
2272 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 2272 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2273 * case we inject the TLB from the Guest TLB into the shadow host TLB 2273 * case we inject the TLB from the Guest TLB into the shadow host TLB
2274 */ 2274 */
2275enum emulation_result 2275enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2276kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, 2276 uint32_t *opc,
2277 struct kvm_run *run, struct kvm_vcpu *vcpu) 2277 struct kvm_run *run,
2278 struct kvm_vcpu *vcpu)
2278{ 2279{
2279 enum emulation_result er = EMULATE_DONE; 2280 enum emulation_result er = EMULATE_DONE;
2280 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2281 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@@ -2284,10 +2285,11 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2284 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", 2285 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2285 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); 2286 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2286 2287
2287 /* KVM would not have got the exception if this entry was valid in the shadow host TLB 2288 /*
2288 * Check the Guest TLB, if the entry is not there then send the guest an 2289 * KVM would not have got the exception if this entry was valid in the
2289 * exception. The guest exc handler should then inject an entry into the 2290 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2290 * guest TLB 2291 * send the guest an exception. The guest exc handler should then inject
2292 * an entry into the guest TLB.
2291 */ 2293 */
2292 index = kvm_mips_guest_tlb_lookup(vcpu, 2294 index = kvm_mips_guest_tlb_lookup(vcpu,
2293 (va & VPN2_MASK) | 2295 (va & VPN2_MASK) |
@@ -2305,7 +2307,10 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2305 } else { 2307 } else {
2306 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 2308 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2307 2309
2308 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ 2310 /*
2311 * Check if the entry is valid, if not then setup a TLB invalid
2312 * exception to the guest
2313 */
2309 if (!TLB_IS_VALID(*tlb, va)) { 2314 if (!TLB_IS_VALID(*tlb, va)) {
2310 if (exccode == T_TLB_LD_MISS) { 2315 if (exccode == T_TLB_LD_MISS) {
2311 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, 2316 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
@@ -2319,10 +2324,12 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2319 er = EMULATE_FAIL; 2324 er = EMULATE_FAIL;
2320 } 2325 }
2321 } else { 2326 } else {
2322 kvm_debug 2327 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2323 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", 2328 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2324 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); 2329 /*
2325 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ 2330 * OK we have a Guest TLB entry, now inject it into the
2331 * shadow host TLB
2332 */
2326 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, 2333 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2327 NULL); 2334 NULL);
2328 } 2335 }
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
index 1e5de16afe29..d458c042d558 100644
--- a/arch/mips/kvm/kvm_mips_int.c
+++ b/arch/mips/kvm/kvm_mips_int.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Interrupt delivery 6 * KVM/MIPS: Interrupt delivery
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -34,7 +34,8 @@ void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
34 34
35void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) 35void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
36{ 36{
37 /* Cause bits to reflect the pending timer interrupt, 37 /*
38 * Cause bits to reflect the pending timer interrupt,
38 * the EXC code will be set when we are actually 39 * the EXC code will be set when we are actually
39 * delivering the interrupt: 40 * delivering the interrupt:
40 */ 41 */
@@ -51,12 +52,13 @@ void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
51 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); 52 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
52} 53}
53 54
54void 55void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
55kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) 56 struct kvm_mips_interrupt *irq)
56{ 57{
57 int intr = (int)irq->irq; 58 int intr = (int)irq->irq;
58 59
59 /* Cause bits to reflect the pending IO interrupt, 60 /*
61 * Cause bits to reflect the pending IO interrupt,
60 * the EXC code will be set when we are actually 62 * the EXC code will be set when we are actually
61 * delivering the interrupt: 63 * delivering the interrupt:
62 */ 64 */
@@ -83,11 +85,11 @@ kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
83 85
84} 86}
85 87
86void 88void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
87kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 89 struct kvm_mips_interrupt *irq)
88 struct kvm_mips_interrupt *irq)
89{ 90{
90 int intr = (int)irq->irq; 91 int intr = (int)irq->irq;
92
91 switch (intr) { 93 switch (intr) {
92 case -2: 94 case -2:
93 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); 95 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
@@ -111,9 +113,8 @@ kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
111} 113}
112 114
113/* Deliver the interrupt of the corresponding priority, if possible. */ 115/* Deliver the interrupt of the corresponding priority, if possible. */
114int 116int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
115kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 117 uint32_t cause)
116 uint32_t cause)
117{ 118{
118 int allowed = 0; 119 int allowed = 0;
119 uint32_t exccode; 120 uint32_t exccode;
@@ -164,7 +165,6 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
164 165
165 /* Are we allowed to deliver the interrupt ??? */ 166 /* Are we allowed to deliver the interrupt ??? */
166 if (allowed) { 167 if (allowed) {
167
168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
169 /* save old pc */ 169 /* save old pc */
170 kvm_write_c0_guest_epc(cop0, arch->pc); 170 kvm_write_c0_guest_epc(cop0, arch->pc);
@@ -195,9 +195,8 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
195 return allowed; 195 return allowed;
196} 196}
197 197
198int 198int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
199kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 199 uint32_t cause)
200 uint32_t cause)
201{ 200{
202 return 1; 201 return 1;
203} 202}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
index 20da7d29eede..4ab4bdfad703 100644
--- a/arch/mips/kvm/kvm_mips_int.h
+++ b/arch/mips/kvm/kvm_mips_int.h
@@ -1,14 +1,15 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Interrupts 6 * KVM/MIPS: Interrupts
7* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8* Authors: Sanjay Lal <sanjayl@kymasys.com> 8 * Authors: Sanjay Lal <sanjayl@kymasys.com>
9*/ 9 */
10 10
11/* MIPS Exception Priorities, exceptions (including interrupts) are queued up 11/*
12 * MIPS Exception Priorities, exceptions (including interrupts) are queued up
12 * for the guest in the order specified by their priorities 13 * for the guest in the order specified by their priorities
13 */ 14 */
14 15
@@ -27,6 +28,9 @@
27#define MIPS_EXC_MAX 12 28#define MIPS_EXC_MAX 12
28/* XXXSL More to follow */ 29/* XXXSL More to follow */
29 30
31extern char mips32_exception[], mips32_exceptionEnd[];
32extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
33
30#define C_TI (_ULCAST_(1) << 30) 34#define C_TI (_ULCAST_(1) << 30)
31 35
32#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) 36#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
index 86d3b4cc348b..03a6ae84c7df 100644
--- a/arch/mips/kvm/kvm_mips_opcode.h
+++ b/arch/mips/kvm/kvm_mips_opcode.h
@@ -1,24 +1,22 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com> 7 * Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10/*
11 * Define opcode values not defined in <asm/isnt.h>
12 */ 8 */
13 9
10/* Define opcode values not defined in <asm/isnt.h> */
11
14#ifndef __KVM_MIPS_OPCODE_H__ 12#ifndef __KVM_MIPS_OPCODE_H__
15#define __KVM_MIPS_OPCODE_H__ 13#define __KVM_MIPS_OPCODE_H__
16 14
17/* COP0 Ops */ 15/* COP0 Ops */
18#define mfmcz_op 0x0b /* 01011 */ 16#define mfmcz_op 0x0b /* 01011 */
19#define wrpgpr_op 0x0e /* 01110 */ 17#define wrpgpr_op 0x0e /* 01110 */
20 18
21/* COP0 opcodes (only if COP0 and CO=1): */ 19/* COP0 opcodes (only if COP0 and CO=1): */
22#define wait_op 0x20 /* 100000 */ 20#define wait_op 0x20 /* 100000 */
23 21
24#endif /* __KVM_MIPS_OPCODE_H__ */ 22#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
index 075904bcac1b..6efef38a324d 100644
--- a/arch/mips/kvm/kvm_mips_stats.c
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: COP0 access histogram 6 * KVM/MIPS: COP0 access histogram
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/kvm_host.h> 12#include <linux/kvm_host.h>
13 13
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index 8a5a700ad8de..bb7418bd95b9 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -1,14 +1,14 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7* TLB handlers run from KSEG0 7 * TLB handlers run from KSEG0
8* 8 *
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com> 10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/ 11 */
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
@@ -18,7 +18,6 @@
18#include <linux/kvm_host.h> 18#include <linux/kvm_host.h>
19#include <linux/srcu.h> 19#include <linux/srcu.h>
20 20
21
22#include <asm/cpu.h> 21#include <asm/cpu.h>
23#include <asm/bootinfo.h> 22#include <asm/bootinfo.h>
24#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
@@ -39,13 +38,13 @@ atomic_t kvm_mips_instance;
39EXPORT_SYMBOL(kvm_mips_instance); 38EXPORT_SYMBOL(kvm_mips_instance);
40 39
41/* These function pointers are initialized once the KVM module is loaded */ 40/* These function pointers are initialized once the KVM module is loaded */
42pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); 41pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
43EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); 42EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
44 43
45void (*kvm_mips_release_pfn_clean) (pfn_t pfn); 44void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
46EXPORT_SYMBOL(kvm_mips_release_pfn_clean); 45EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
47 46
48bool(*kvm_mips_is_error_pfn) (pfn_t pfn); 47bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
49EXPORT_SYMBOL(kvm_mips_is_error_pfn); 48EXPORT_SYMBOL(kvm_mips_is_error_pfn);
50 49
51uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
@@ -53,21 +52,17 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; 52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
54} 53}
55 54
56
57uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 55uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
58{ 56{
59 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; 57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
60} 58}
61 59
62inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) 60inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
63{ 61{
64 return vcpu->kvm->arch.commpage_tlb; 62 return vcpu->kvm->arch.commpage_tlb;
65} 63}
66 64
67 65/* Structure defining an tlb entry data set. */
68/*
69 * Structure defining an tlb entry data set.
70 */
71 66
72void kvm_mips_dump_host_tlbs(void) 67void kvm_mips_dump_host_tlbs(void)
73{ 68{
@@ -116,6 +111,7 @@ void kvm_mips_dump_host_tlbs(void)
116 mtc0_tlbw_hazard(); 111 mtc0_tlbw_hazard();
117 local_irq_restore(flags); 112 local_irq_restore(flags);
118} 113}
114EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
119 115
120void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 116void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
121{ 117{
@@ -143,6 +139,7 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
143 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
144 } 140 }
145} 141}
142EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
146 143
147static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
148{ 145{
@@ -152,7 +149,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
152 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) 149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
153 return 0; 150 return 0;
154 151
155 srcu_idx = srcu_read_lock(&kvm->srcu); 152 srcu_idx = srcu_read_lock(&kvm->srcu);
156 pfn = kvm_mips_gfn_to_pfn(kvm, gfn); 153 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
157 154
158 if (kvm_mips_is_error_pfn(pfn)) { 155 if (kvm_mips_is_error_pfn(pfn)) {
@@ -169,7 +166,7 @@ out:
169 166
170/* Translate guest KSEG0 addresses to Host PA */ 167/* Translate guest KSEG0 addresses to Host PA */
171unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, 168unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
172 unsigned long gva) 169 unsigned long gva)
173{ 170{
174 gfn_t gfn; 171 gfn_t gfn;
175 uint32_t offset = gva & ~PAGE_MASK; 172 uint32_t offset = gva & ~PAGE_MASK;
@@ -194,12 +191,13 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
194 191
195 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
196} 193}
194EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
197 195
198/* XXXKYMA: Must be called with interrupts disabled */ 196/* XXXKYMA: Must be called with interrupts disabled */
199/* set flush_dcache_mask == 0 if no dcache flush required */ 197/* set flush_dcache_mask == 0 if no dcache flush required */
200int 198int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
201kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, 199 unsigned long entrylo0, unsigned long entrylo1,
202 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask) 200 int flush_dcache_mask)
203{ 201{
204 unsigned long flags; 202 unsigned long flags;
205 unsigned long old_entryhi; 203 unsigned long old_entryhi;
@@ -207,7 +205,6 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
207 205
208 local_irq_save(flags); 206 local_irq_save(flags);
209 207
210
211 old_entryhi = read_c0_entryhi(); 208 old_entryhi = read_c0_entryhi();
212 write_c0_entryhi(entryhi); 209 write_c0_entryhi(entryhi);
213 mtc0_tlbw_hazard(); 210 mtc0_tlbw_hazard();
@@ -240,12 +237,14 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
240 if (flush_dcache_mask) { 237 if (flush_dcache_mask) {
241 if (entrylo0 & MIPS3_PG_V) { 238 if (entrylo0 & MIPS3_PG_V) {
242 ++vcpu->stat.flush_dcache_exits; 239 ++vcpu->stat.flush_dcache_exits;
243 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); 240 flush_data_cache_page((entryhi & VPN2_MASK) &
241 ~flush_dcache_mask);
244 } 242 }
245 if (entrylo1 & MIPS3_PG_V) { 243 if (entrylo1 & MIPS3_PG_V) {
246 ++vcpu->stat.flush_dcache_exits; 244 ++vcpu->stat.flush_dcache_exits;
247 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | 245 flush_data_cache_page(((entryhi & VPN2_MASK) &
248 (0x1 << PAGE_SHIFT)); 246 ~flush_dcache_mask) |
247 (0x1 << PAGE_SHIFT));
249 } 248 }
250 } 249 }
251 250
@@ -257,10 +256,9 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
257 return 0; 256 return 0;
258} 257}
259 258
260
261/* XXXKYMA: Must be called with interrupts disabled */ 259/* XXXKYMA: Must be called with interrupts disabled */
262int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, 260int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
263 struct kvm_vcpu *vcpu) 261 struct kvm_vcpu *vcpu)
264{ 262{
265 gfn_t gfn; 263 gfn_t gfn;
266 pfn_t pfn0, pfn1; 264 pfn_t pfn0, pfn1;
@@ -270,7 +268,6 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
270 struct kvm *kvm = vcpu->kvm; 268 struct kvm *kvm = vcpu->kvm;
271 const int flush_dcache_mask = 0; 269 const int flush_dcache_mask = 0;
272 270
273
274 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { 271 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
275 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); 272 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
276 kvm_mips_dump_host_tlbs(); 273 kvm_mips_dump_host_tlbs();
@@ -302,14 +299,15 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
302 } 299 }
303 300
304 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); 301 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
305 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | 302 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
306 (0x1 << 1); 303 (1 << 2) | (0x1 << 1);
307 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | 304 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
308 (0x1 << 1); 305 (1 << 2) | (0x1 << 1);
309 306
310 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 307 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
311 flush_dcache_mask); 308 flush_dcache_mask);
312} 309}
310EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
313 311
314int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 312int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
315 struct kvm_vcpu *vcpu) 313 struct kvm_vcpu *vcpu)
@@ -318,11 +316,10 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
318 unsigned long flags, old_entryhi = 0, vaddr = 0; 316 unsigned long flags, old_entryhi = 0, vaddr = 0;
319 unsigned long entrylo0 = 0, entrylo1 = 0; 317 unsigned long entrylo0 = 0, entrylo1 = 0;
320 318
321
322 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; 319 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
323 pfn1 = 0; 320 pfn1 = 0;
324 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | 321 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
325 (0x1 << 1); 322 (1 << 2) | (0x1 << 1);
326 entrylo1 = 0; 323 entrylo1 = 0;
327 324
328 local_irq_save(flags); 325 local_irq_save(flags);
@@ -341,9 +338,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
341 mtc0_tlbw_hazard(); 338 mtc0_tlbw_hazard();
342 tlbw_use_hazard(); 339 tlbw_use_hazard();
343 340
344 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", 341 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
345 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), 342 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
346 read_c0_entrylo0(), read_c0_entrylo1()); 343 read_c0_entrylo0(), read_c0_entrylo1());
347 344
348 /* Restore old ASID */ 345 /* Restore old ASID */
349 write_c0_entryhi(old_entryhi); 346 write_c0_entryhi(old_entryhi);
@@ -353,28 +350,33 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
353 350
354 return 0; 351 return 0;
355} 352}
353EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
356 354
357int 355int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
358kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 356 struct kvm_mips_tlb *tlb,
359 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1) 357 unsigned long *hpa0,
358 unsigned long *hpa1)
360{ 359{
361 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 360 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
362 struct kvm *kvm = vcpu->kvm; 361 struct kvm *kvm = vcpu->kvm;
363 pfn_t pfn0, pfn1; 362 pfn_t pfn0, pfn1;
364 363
365
366 if ((tlb->tlb_hi & VPN2_MASK) == 0) { 364 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
367 pfn0 = 0; 365 pfn0 = 0;
368 pfn1 = 0; 366 pfn1 = 0;
369 } else { 367 } else {
370 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) 368 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
369 >> PAGE_SHIFT) < 0)
371 return -1; 370 return -1;
372 371
373 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) 372 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
373 >> PAGE_SHIFT) < 0)
374 return -1; 374 return -1;
375 375
376 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; 376 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
377 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; 377 >> PAGE_SHIFT];
378 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
379 >> PAGE_SHIFT];
378 } 380 }
379 381
380 if (hpa0) 382 if (hpa0)
@@ -385,11 +387,12 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
385 387
386 /* Get attributes from the Guest TLB */ 388 /* Get attributes from the Guest TLB */
387 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? 389 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
388 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); 390 kvm_mips_get_kernel_asid(vcpu) :
391 kvm_mips_get_user_asid(vcpu));
389 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | 392 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
390 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); 393 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
391 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | 394 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
392 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); 395 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
393 396
394 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 397 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
395 tlb->tlb_lo0, tlb->tlb_lo1); 398 tlb->tlb_lo0, tlb->tlb_lo1);
@@ -397,6 +400,7 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
397 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 400 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
398 tlb->tlb_mask); 401 tlb->tlb_mask);
399} 402}
403EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
400 404
401int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 405int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
402{ 406{
@@ -404,10 +408,9 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
404 int index = -1; 408 int index = -1;
405 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 409 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
406 410
407
408 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 411 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
409 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && 412 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
410 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { 413 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
411 index = i; 414 index = i;
412 break; 415 break;
413 } 416 }
@@ -418,21 +421,23 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
418 421
419 return index; 422 return index;
420} 423}
424EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
421 425
422int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) 426int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
423{ 427{
424 unsigned long old_entryhi, flags; 428 unsigned long old_entryhi, flags;
425 volatile int idx; 429 volatile int idx;
426 430
427
428 local_irq_save(flags); 431 local_irq_save(flags);
429 432
430 old_entryhi = read_c0_entryhi(); 433 old_entryhi = read_c0_entryhi();
431 434
432 if (KVM_GUEST_KERNEL_MODE(vcpu)) 435 if (KVM_GUEST_KERNEL_MODE(vcpu))
433 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu)); 436 write_c0_entryhi((vaddr & VPN2_MASK) |
437 kvm_mips_get_kernel_asid(vcpu));
434 else { 438 else {
435 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); 439 write_c0_entryhi((vaddr & VPN2_MASK) |
440 kvm_mips_get_user_asid(vcpu));
436 } 441 }
437 442
438 mtc0_tlbw_hazard(); 443 mtc0_tlbw_hazard();
@@ -452,6 +457,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
452 457
453 return idx; 458 return idx;
454} 459}
460EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
455 461
456int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 462int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
457{ 463{
@@ -460,7 +466,6 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
460 466
461 local_irq_save(flags); 467 local_irq_save(flags);
462 468
463
464 old_entryhi = read_c0_entryhi(); 469 old_entryhi = read_c0_entryhi();
465 470
466 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); 471 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
@@ -499,8 +504,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
499 504
500 return 0; 505 return 0;
501} 506}
507EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
502 508
503/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/ 509/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
504int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) 510int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
505{ 511{
506 unsigned long flags, old_entryhi; 512 unsigned long flags, old_entryhi;
@@ -510,7 +516,6 @@ int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
510 516
511 local_irq_save(flags); 517 local_irq_save(flags);
512 518
513
514 old_entryhi = read_c0_entryhi(); 519 old_entryhi = read_c0_entryhi();
515 520
516 write_c0_entryhi(UNIQUE_ENTRYHI(index)); 521 write_c0_entryhi(UNIQUE_ENTRYHI(index));
@@ -546,7 +551,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
546 int entry = 0; 551 int entry = 0;
547 int maxentry = current_cpu_data.tlbsize; 552 int maxentry = current_cpu_data.tlbsize;
548 553
549
550 local_irq_save(flags); 554 local_irq_save(flags);
551 555
552 old_entryhi = read_c0_entryhi(); 556 old_entryhi = read_c0_entryhi();
@@ -554,7 +558,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
554 558
555 /* Blast 'em all away. */ 559 /* Blast 'em all away. */
556 for (entry = 0; entry < maxentry; entry++) { 560 for (entry = 0; entry < maxentry; entry++) {
557
558 write_c0_index(entry); 561 write_c0_index(entry);
559 mtc0_tlbw_hazard(); 562 mtc0_tlbw_hazard();
560 563
@@ -565,9 +568,8 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
565 entryhi = read_c0_entryhi(); 568 entryhi = read_c0_entryhi();
566 569
567 /* Don't blow away guest kernel entries */ 570 /* Don't blow away guest kernel entries */
568 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) { 571 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
569 continue; 572 continue;
570 }
571 } 573 }
572 574
573 /* Make sure all entries differ. */ 575 /* Make sure all entries differ. */
@@ -591,17 +593,17 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
591 593
592 local_irq_restore(flags); 594 local_irq_restore(flags);
593} 595}
596EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
594 597
595void 598void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
596kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 599 struct kvm_vcpu *vcpu)
597 struct kvm_vcpu *vcpu)
598{ 600{
599 unsigned long asid = asid_cache(cpu); 601 unsigned long asid = asid_cache(cpu);
600 602
601 if (!((asid += ASID_INC) & ASID_MASK)) { 603 asid += ASID_INC;
602 if (cpu_has_vtag_icache) { 604 if (!(asid & ASID_MASK)) {
605 if (cpu_has_vtag_icache)
603 flush_icache_all(); 606 flush_icache_all();
604 }
605 607
606 kvm_local_flush_tlb_all(); /* start new asid cycle */ 608 kvm_local_flush_tlb_all(); /* start new asid cycle */
607 609
@@ -639,6 +641,7 @@ void kvm_local_flush_tlb_all(void)
639 641
640 local_irq_restore(flags); 642 local_irq_restore(flags);
641} 643}
644EXPORT_SYMBOL(kvm_local_flush_tlb_all);
642 645
643/** 646/**
644 * kvm_mips_migrate_count() - Migrate timer. 647 * kvm_mips_migrate_count() - Migrate timer.
@@ -699,7 +702,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
699 } 702 }
700 703
701 if (!newasid) { 704 if (!newasid) {
702 /* If we preempted while the guest was executing, then reload the pre-empted ASID */ 705 /*
706 * If we preempted while the guest was executing, then reload
707 * the pre-empted ASID
708 */
703 if (current->flags & PF_VCPU) { 709 if (current->flags & PF_VCPU) {
704 write_c0_entryhi(vcpu->arch. 710 write_c0_entryhi(vcpu->arch.
705 preempt_entryhi & ASID_MASK); 711 preempt_entryhi & ASID_MASK);
@@ -708,9 +714,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
708 } else { 714 } else {
709 /* New ASIDs were allocated for the VM */ 715 /* New ASIDs were allocated for the VM */
710 716
711 /* Were we in guest context? If so then the pre-empted ASID is no longer 717 /*
712 * valid, we need to set it to what it should be based on the mode of 718 * Were we in guest context? If so then the pre-empted ASID is
713 * the Guest (Kernel/User) 719 * no longer valid, we need to set it to what it should be based
720 * on the mode of the Guest (Kernel/User)
714 */ 721 */
715 if (current->flags & PF_VCPU) { 722 if (current->flags & PF_VCPU) {
716 if (KVM_GUEST_KERNEL_MODE(vcpu)) 723 if (KVM_GUEST_KERNEL_MODE(vcpu))
@@ -728,6 +735,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
728 local_irq_restore(flags); 735 local_irq_restore(flags);
729 736
730} 737}
738EXPORT_SYMBOL(kvm_arch_vcpu_load);
731 739
732/* ASID can change if another task is scheduled during preemption */ 740/* ASID can change if another task is scheduled during preemption */
733void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 741void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -739,7 +747,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
739 747
740 cpu = smp_processor_id(); 748 cpu = smp_processor_id();
741 749
742
743 vcpu->arch.preempt_entryhi = read_c0_entryhi(); 750 vcpu->arch.preempt_entryhi = read_c0_entryhi();
744 vcpu->arch.last_sched_cpu = cpu; 751 vcpu->arch.last_sched_cpu = cpu;
745 752
@@ -754,11 +761,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
754 761
755 local_irq_restore(flags); 762 local_irq_restore(flags);
756} 763}
764EXPORT_SYMBOL(kvm_arch_vcpu_put);
757 765
758uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) 766uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
759{ 767{
760 struct mips_coproc *cop0 = vcpu->arch.cop0; 768 struct mips_coproc *cop0 = vcpu->arch.cop0;
761 unsigned long paddr, flags; 769 unsigned long paddr, flags, vpn2, asid;
762 uint32_t inst; 770 uint32_t inst;
763 int index; 771 int index;
764 772
@@ -769,16 +777,12 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
769 if (index >= 0) { 777 if (index >= 0) {
770 inst = *(opc); 778 inst = *(opc);
771 } else { 779 } else {
772 index = 780 vpn2 = (unsigned long) opc & VPN2_MASK;
773 kvm_mips_guest_tlb_lookup(vcpu, 781 asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
774 ((unsigned long) opc & VPN2_MASK) 782 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
775 |
776 (kvm_read_c0_guest_entryhi
777 (cop0) & ASID_MASK));
778 if (index < 0) { 783 if (index < 0) {
779 kvm_err 784 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
780 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", 785 __func__, opc, vcpu, read_c0_entryhi());
781 __func__, opc, vcpu, read_c0_entryhi());
782 kvm_mips_dump_host_tlbs(); 786 kvm_mips_dump_host_tlbs();
783 local_irq_restore(flags); 787 local_irq_restore(flags);
784 return KVM_INVALID_INST; 788 return KVM_INVALID_INST;
@@ -793,7 +797,7 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
793 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { 797 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
794 paddr = 798 paddr =
795 kvm_mips_translate_guest_kseg0_to_hpa(vcpu, 799 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
796 (unsigned long) opc); 800 (unsigned long) opc);
797 inst = *(uint32_t *) CKSEG0ADDR(paddr); 801 inst = *(uint32_t *) CKSEG0ADDR(paddr);
798 } else { 802 } else {
799 kvm_err("%s: illegal address: %p\n", __func__, opc); 803 kvm_err("%s: illegal address: %p\n", __func__, opc);
@@ -802,18 +806,4 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
802 806
803 return inst; 807 return inst;
804} 808}
805
806EXPORT_SYMBOL(kvm_local_flush_tlb_all);
807EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
808EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
809EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
810EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
811EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
812EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
813EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
814EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
815EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
816EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
817EXPORT_SYMBOL(kvm_get_inst); 809EXPORT_SYMBOL(kvm_get_inst);
818EXPORT_SYMBOL(kvm_arch_vcpu_load);
819EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
index 693f952b2fbb..106335b36861 100644
--- a/arch/mips/kvm/kvm_trap_emul.c
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel 6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -37,7 +37,6 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
37 return gpa; 37 return gpa;
38} 38}
39 39
40
41static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) 40static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
42{ 41{
43 struct kvm_run *run = vcpu->run; 42 struct kvm_run *run = vcpu->run;
@@ -46,9 +45,9 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
46 enum emulation_result er = EMULATE_DONE; 45 enum emulation_result er = EMULATE_DONE;
47 int ret = RESUME_GUEST; 46 int ret = RESUME_GUEST;
48 47
49 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { 48 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
50 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); 49 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
51 } else 50 else
52 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); 51 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
53 52
54 switch (er) { 53 switch (er) {
@@ -83,9 +82,8 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
83 82
84 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 83 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
85 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { 84 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
86 kvm_debug 85 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
87 ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", 86 cause, opc, badvaddr);
88 cause, opc, badvaddr);
89 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); 87 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
90 88
91 if (er == EMULATE_DONE) 89 if (er == EMULATE_DONE)
@@ -95,8 +93,10 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
95 ret = RESUME_HOST; 93 ret = RESUME_HOST;
96 } 94 }
97 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { 95 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
98 /* XXXKYMA: The guest kernel does not expect to get this fault when we are not 96 /*
99 * using HIGHMEM. Need to address this in a HIGHMEM kernel 97 * XXXKYMA: The guest kernel does not expect to get this fault
98 * when we are not using HIGHMEM. Need to address this in a
99 * HIGHMEM kernel
100 */ 100 */
101 printk 101 printk
102 ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", 102 ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
@@ -134,9 +134,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
134 } 134 }
135 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 135 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
136 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { 136 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
137 kvm_debug 137 kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
138 ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", 138 cause, opc, badvaddr);
139 cause, opc, badvaddr);
140 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); 139 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
141 if (er == EMULATE_DONE) 140 if (er == EMULATE_DONE)
142 ret = RESUME_GUEST; 141 ret = RESUME_GUEST;
@@ -145,8 +144,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
145 ret = RESUME_HOST; 144 ret = RESUME_HOST;
146 } 145 }
147 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { 146 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
148 /* All KSEG0 faults are handled by KVM, as the guest kernel does not 147 /*
149 * expect to ever get them 148 * All KSEG0 faults are handled by KVM, as the guest kernel does
149 * not expect to ever get them
150 */ 150 */
151 if (kvm_mips_handle_kseg0_tlb_fault 151 if (kvm_mips_handle_kseg0_tlb_fault
152 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { 152 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
@@ -154,9 +154,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
154 ret = RESUME_HOST; 154 ret = RESUME_HOST;
155 } 155 }
156 } else { 156 } else {
157 kvm_err 157 kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
158 ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", 158 cause, opc, badvaddr);
159 cause, opc, badvaddr);
160 kvm_mips_dump_host_tlbs(); 159 kvm_mips_dump_host_tlbs();
161 kvm_arch_vcpu_dump_regs(vcpu); 160 kvm_arch_vcpu_dump_regs(vcpu);
162 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 161 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -185,11 +184,14 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
185 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", 184 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
186 vcpu->arch.pc, badvaddr); 185 vcpu->arch.pc, badvaddr);
187 186
188 /* User Address (UA) fault, this could happen if 187 /*
189 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 188 * User Address (UA) fault, this could happen if
190 * case we pass on the fault to the guest kernel and let it handle it. 189 * (1) TLB entry not present/valid in both Guest and shadow host
191 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 190 * TLBs, in this case we pass on the fault to the guest
192 * case we inject the TLB from the Guest TLB into the shadow host TLB 191 * kernel and let it handle it.
192 * (2) TLB entry is present in the Guest TLB but not in the
193 * shadow, in this case we inject the TLB from the Guest TLB
194 * into the shadow host TLB
193 */ 195 */
194 196
195 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); 197 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
@@ -349,9 +351,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
349 uint32_t config1; 351 uint32_t config1;
350 int vcpu_id = vcpu->vcpu_id; 352 int vcpu_id = vcpu->vcpu_id;
351 353
352 /* Arch specific stuff, set up config registers properly so that the 354 /*
353 * guest will come up as expected, for now we simulate a 355 * Arch specific stuff, set up config registers properly so that the
354 * MIPS 24kc 356 * guest will come up as expected, for now we simulate a MIPS 24kc
355 */ 357 */
356 kvm_write_c0_guest_prid(cop0, 0x00019300); 358 kvm_write_c0_guest_prid(cop0, 0x00019300);
357 kvm_write_c0_guest_config(cop0, 359 kvm_write_c0_guest_config(cop0,
@@ -373,14 +375,15 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
373 375
374 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); 376 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
375 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ 377 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
376 kvm_write_c0_guest_config3(cop0, 378 kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
377 MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << 379 (1 << CP0C3_ULRI));
378 CP0C3_ULRI));
379 380
380 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ 381 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
381 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); 382 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
382 383
383 /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ 384 /*
385 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
386 */
384 kvm_write_c0_guest_intctl(cop0, 0xFC000000); 387 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
385 388
386 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ 389 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index bc9e0f406c08..c1388d40663b 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -1,11 +1,11 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com> 7 * Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/ 8 */
9 9
10#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 10#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_KVM_H 11#define _TRACE_KVM_H
@@ -17,9 +17,7 @@
17#define TRACE_INCLUDE_PATH . 17#define TRACE_INCLUDE_PATH .
18#define TRACE_INCLUDE_FILE trace 18#define TRACE_INCLUDE_FILE trace
19 19
20/* 20/* Tracepoints for VM eists */
21 * Tracepoints for VM eists
22 */
23extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; 21extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
24 22
25TRACE_EVENT(kvm_exit, 23TRACE_EVENT(kvm_exit,