aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm/kvm_mips.c
diff options
context:
space:
mode:
authorDeng-Cheng Zhu <dengcheng.zhu@imgtec.com>2014-06-26 15:11:34 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-06-30 10:52:01 -0400
commitd116e812f9026e3cca46ce1009e577afec62916d (patch)
tree2a8f60c6beb6c5489d777cb0e2b8a92cf42127b0 /arch/mips/kvm/kvm_mips.c
parent85949977a5b499efca661fb80993693acbfac64d (diff)
MIPS: KVM: Reformat code and comments
No logic changes inside. Signed-off-by: Deng-Cheng Zhu <dengcheng.zhu@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/mips/kvm/kvm_mips.c')
-rw-r--r--arch/mips/kvm/kvm_mips.c179
1 files changed, 90 insertions, 89 deletions
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index cd5e4f568439..52be52adf030 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -31,38 +31,41 @@
31#define VECTORSPACING 0x100 /* for EI/VI mode */ 31#define VECTORSPACING 0x100 /* for EI/VI mode */
32#endif 32#endif
33 33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
35struct kvm_stats_debugfs_item debugfs_entries[] = { 35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) }, 36 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
37 { "cache", VCPU_STAT(cache_exits) }, 37 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
38 { "signal", VCPU_STAT(signal_exits) }, 38 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
39 { "interrupt", VCPU_STAT(int_exits) }, 39 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, 40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) }, 41 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, 42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, 43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, 44 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, 45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
46 { "syscall", VCPU_STAT(syscall_exits) }, 46 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, 47 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
48 { "break_inst", VCPU_STAT(break_inst_exits) }, 48 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, 49 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 50 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
51 {NULL} 51 {NULL}
52}; 52};
53 53
54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) 54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55{ 55{
56 int i; 56 int i;
57
57 for_each_possible_cpu(i) { 58 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0; 59 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0; 60 vcpu->arch.guest_user_asid[i] = 0;
60 } 61 }
62
61 return 0; 63 return 0;
62} 64}
63 65
64/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we 66/*
65 * are "runnable" if interrupts are pending 67 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
68 * Config7, so we are "runnable" if interrupts are pending
66 */ 69 */
67int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 70int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
68{ 71{
@@ -103,7 +106,10 @@ static void kvm_mips_init_tlbs(struct kvm *kvm)
103{ 106{
104 unsigned long wired; 107 unsigned long wired;
105 108
106 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ 109 /*
110 * Add a wired entry to the TLB, it is used to map the commpage to
111 * the Guest kernel
112 */
107 wired = read_c0_wired(); 113 wired = read_c0_wired();
108 write_c0_wired(wired + 1); 114 write_c0_wired(wired + 1);
109 mtc0_tlbw_hazard(); 115 mtc0_tlbw_hazard();
@@ -130,7 +136,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
130 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); 136 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
131 } 137 }
132 138
133
134 return 0; 139 return 0;
135} 140}
136 141
@@ -185,8 +190,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
185 } 190 }
186} 191}
187 192
188long 193long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
189kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 194 unsigned long arg)
190{ 195{
191 return -ENOIOCTLCMD; 196 return -ENOIOCTLCMD;
192} 197}
@@ -207,17 +212,17 @@ void kvm_arch_memslots_updated(struct kvm *kvm)
207} 212}
208 213
209int kvm_arch_prepare_memory_region(struct kvm *kvm, 214int kvm_arch_prepare_memory_region(struct kvm *kvm,
210 struct kvm_memory_slot *memslot, 215 struct kvm_memory_slot *memslot,
211 struct kvm_userspace_memory_region *mem, 216 struct kvm_userspace_memory_region *mem,
212 enum kvm_mr_change change) 217 enum kvm_mr_change change)
213{ 218{
214 return 0; 219 return 0;
215} 220}
216 221
217void kvm_arch_commit_memory_region(struct kvm *kvm, 222void kvm_arch_commit_memory_region(struct kvm *kvm,
218 struct kvm_userspace_memory_region *mem, 223 struct kvm_userspace_memory_region *mem,
219 const struct kvm_memory_slot *old, 224 const struct kvm_memory_slot *old,
220 enum kvm_mr_change change) 225 enum kvm_mr_change change)
221{ 226{
222 unsigned long npages = 0; 227 unsigned long npages = 0;
223 int i, err = 0; 228 int i, err = 0;
@@ -246,9 +251,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
246 npages, kvm->arch.guest_pmap); 251 npages, kvm->arch.guest_pmap);
247 252
248 /* Now setup the page table */ 253 /* Now setup the page table */
249 for (i = 0; i < npages; i++) { 254 for (i = 0; i < npages; i++)
250 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; 255 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
251 }
252 } 256 }
253 } 257 }
254out: 258out:
@@ -270,8 +274,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
270 274
271struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 275struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
272{ 276{
273 extern char mips32_exception[], mips32_exceptionEnd[];
274 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
275 int err, size, offset; 277 int err, size, offset;
276 void *gebase; 278 void *gebase;
277 int i; 279 int i;
@@ -290,14 +292,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
290 292
291 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); 293 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
292 294
293 /* Allocate space for host mode exception handlers that handle 295 /*
296 * Allocate space for host mode exception handlers that handle
294 * guest mode exits 297 * guest mode exits
295 */ 298 */
296 if (cpu_has_veic || cpu_has_vint) { 299 if (cpu_has_veic || cpu_has_vint)
297 size = 0x200 + VECTORSPACING * 64; 300 size = 0x200 + VECTORSPACING * 64;
298 } else { 301 else
299 size = 0x4000; 302 size = 0x4000;
300 }
301 303
302 /* Save Linux EBASE */ 304 /* Save Linux EBASE */
303 vcpu->arch.host_ebase = (void *)read_c0_ebase(); 305 vcpu->arch.host_ebase = (void *)read_c0_ebase();
@@ -345,7 +347,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
345 local_flush_icache_range((unsigned long)gebase, 347 local_flush_icache_range((unsigned long)gebase,
346 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 348 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
347 349
348 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ 350 /*
351 * Allocate comm page for guest kernel, a TLB will be reserved for
352 * mapping GVA @ 0xFFFF8000 to this page
353 */
349 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); 354 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
350 355
351 if (!vcpu->arch.kseg0_commpage) { 356 if (!vcpu->arch.kseg0_commpage) {
@@ -391,9 +396,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
391 kvm_arch_vcpu_free(vcpu); 396 kvm_arch_vcpu_free(vcpu);
392} 397}
393 398
394int 399int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
395kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 400 struct kvm_guest_debug *dbg)
396 struct kvm_guest_debug *dbg)
397{ 401{
398 return -ENOIOCTLCMD; 402 return -ENOIOCTLCMD;
399} 403}
@@ -430,8 +434,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
430 return r; 434 return r;
431} 435}
432 436
433int 437int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
434kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) 438 struct kvm_mips_interrupt *irq)
435{ 439{
436 int intr = (int)irq->irq; 440 int intr = (int)irq->irq;
437 struct kvm_vcpu *dvcpu = NULL; 441 struct kvm_vcpu *dvcpu = NULL;
@@ -458,23 +462,20 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
458 462
459 dvcpu->arch.wait = 0; 463 dvcpu->arch.wait = 0;
460 464
461 if (waitqueue_active(&dvcpu->wq)) { 465 if (waitqueue_active(&dvcpu->wq))
462 wake_up_interruptible(&dvcpu->wq); 466 wake_up_interruptible(&dvcpu->wq);
463 }
464 467
465 return 0; 468 return 0;
466} 469}
467 470
468int 471int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
469kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 472 struct kvm_mp_state *mp_state)
470 struct kvm_mp_state *mp_state)
471{ 473{
472 return -ENOIOCTLCMD; 474 return -ENOIOCTLCMD;
473} 475}
474 476
475int 477int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
476kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 478 struct kvm_mp_state *mp_state)
477 struct kvm_mp_state *mp_state)
478{ 479{
479 return -ENOIOCTLCMD; 480 return -ENOIOCTLCMD;
480} 481}
@@ -631,10 +632,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
631 } 632 }
632 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 633 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
633 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 634 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
635
634 return put_user(v, uaddr64); 636 return put_user(v, uaddr64);
635 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 637 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
636 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 638 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
637 u32 v32 = (u32)v; 639 u32 v32 = (u32)v;
640
638 return put_user(v32, uaddr32); 641 return put_user(v32, uaddr32);
639 } else { 642 } else {
640 return -EINVAL; 643 return -EINVAL;
@@ -727,8 +730,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
727 return 0; 730 return 0;
728} 731}
729 732
730long 733long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
731kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 734 unsigned long arg)
732{ 735{
733 struct kvm_vcpu *vcpu = filp->private_data; 736 struct kvm_vcpu *vcpu = filp->private_data;
734 void __user *argp = (void __user *)arg; 737 void __user *argp = (void __user *)arg;
@@ -738,6 +741,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
738 case KVM_SET_ONE_REG: 741 case KVM_SET_ONE_REG:
739 case KVM_GET_ONE_REG: { 742 case KVM_GET_ONE_REG: {
740 struct kvm_one_reg reg; 743 struct kvm_one_reg reg;
744
741 if (copy_from_user(&reg, argp, sizeof(reg))) 745 if (copy_from_user(&reg, argp, sizeof(reg)))
742 return -EFAULT; 746 return -EFAULT;
743 if (ioctl == KVM_SET_ONE_REG) 747 if (ioctl == KVM_SET_ONE_REG)
@@ -772,6 +776,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
772 case KVM_INTERRUPT: 776 case KVM_INTERRUPT:
773 { 777 {
774 struct kvm_mips_interrupt irq; 778 struct kvm_mips_interrupt irq;
779
775 r = -EFAULT; 780 r = -EFAULT;
776 if (copy_from_user(&irq, argp, sizeof(irq))) 781 if (copy_from_user(&irq, argp, sizeof(irq)))
777 goto out; 782 goto out;
@@ -790,9 +795,7 @@ out:
790 return r; 795 return r;
791} 796}
792 797
793/* 798/* Get (and clear) the dirty memory log for a memory slot. */
794 * Get (and clear) the dirty memory log for a memory slot.
795 */
796int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 799int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
797{ 800{
798 struct kvm_memory_slot *memslot; 801 struct kvm_memory_slot *memslot;
@@ -859,14 +862,14 @@ void kvm_arch_exit(void)
859 kvm_mips_callbacks = NULL; 862 kvm_mips_callbacks = NULL;
860} 863}
861 864
862int 865int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
863kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 866 struct kvm_sregs *sregs)
864{ 867{
865 return -ENOIOCTLCMD; 868 return -ENOIOCTLCMD;
866} 869}
867 870
868int 871int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
869kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 872 struct kvm_sregs *sregs)
870{ 873{
871 return -ENOIOCTLCMD; 874 return -ENOIOCTLCMD;
872} 875}
@@ -979,14 +982,11 @@ static void kvm_mips_comparecount_func(unsigned long data)
979 kvm_mips_callbacks->queue_timer_int(vcpu); 982 kvm_mips_callbacks->queue_timer_int(vcpu);
980 983
981 vcpu->arch.wait = 0; 984 vcpu->arch.wait = 0;
982 if (waitqueue_active(&vcpu->wq)) { 985 if (waitqueue_active(&vcpu->wq))
983 wake_up_interruptible(&vcpu->wq); 986 wake_up_interruptible(&vcpu->wq);
984 }
985} 987}
986 988
987/* 989/* low level hrtimer wake routine */
988 * low level hrtimer wake routine.
989 */
990static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) 990static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
991{ 991{
992 struct kvm_vcpu *vcpu; 992 struct kvm_vcpu *vcpu;
@@ -1010,8 +1010,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1010 return; 1010 return;
1011} 1011}
1012 1012
1013int 1013int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1014kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) 1014 struct kvm_translation *tr)
1015{ 1015{
1016 return 0; 1016 return 0;
1017} 1017}
@@ -1022,8 +1022,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1022 return kvm_mips_callbacks->vcpu_setup(vcpu); 1022 return kvm_mips_callbacks->vcpu_setup(vcpu);
1023} 1023}
1024 1024
1025static 1025static void kvm_mips_set_c0_status(void)
1026void kvm_mips_set_c0_status(void)
1027{ 1026{
1028 uint32_t status = read_c0_status(); 1027 uint32_t status = read_c0_status();
1029 1028
@@ -1053,7 +1052,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1053 run->exit_reason = KVM_EXIT_UNKNOWN; 1052 run->exit_reason = KVM_EXIT_UNKNOWN;
1054 run->ready_for_interrupt_injection = 1; 1053 run->ready_for_interrupt_injection = 1;
1055 1054
1056 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ 1055 /*
1056 * Set the appropriate status bits based on host CPU features,
1057 * before we hit the scheduler
1058 */
1057 kvm_mips_set_c0_status(); 1059 kvm_mips_set_c0_status();
1058 1060
1059 local_irq_enable(); 1061 local_irq_enable();
@@ -1061,7 +1063,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1061 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", 1063 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1062 cause, opc, run, vcpu); 1064 cause, opc, run, vcpu);
1063 1065
1064 /* Do a privilege check, if in UM most of these exit conditions end up 1066 /*
1067 * Do a privilege check, if in UM most of these exit conditions end up
1065 * causing an exception to be delivered to the Guest Kernel 1068 * causing an exception to be delivered to the Guest Kernel
1066 */ 1069 */
1067 er = kvm_mips_check_privilege(cause, opc, run, vcpu); 1070 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
@@ -1080,9 +1083,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1080 ++vcpu->stat.int_exits; 1083 ++vcpu->stat.int_exits;
1081 trace_kvm_exit(vcpu, INT_EXITS); 1084 trace_kvm_exit(vcpu, INT_EXITS);
1082 1085
1083 if (need_resched()) { 1086 if (need_resched())
1084 cond_resched(); 1087 cond_resched();
1085 }
1086 1088
1087 ret = RESUME_GUEST; 1089 ret = RESUME_GUEST;
1088 break; 1090 break;
@@ -1094,9 +1096,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1094 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); 1096 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1095 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); 1097 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1096 /* XXXKYMA: Might need to return to user space */ 1098 /* XXXKYMA: Might need to return to user space */
1097 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { 1099 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1098 ret = RESUME_HOST; 1100 ret = RESUME_HOST;
1099 }
1100 break; 1101 break;
1101 1102
1102 case T_TLB_MOD: 1103 case T_TLB_MOD:
@@ -1106,10 +1107,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1106 break; 1107 break;
1107 1108
1108 case T_TLB_ST_MISS: 1109 case T_TLB_ST_MISS:
1109 kvm_debug 1110 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1110 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", 1111 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1111 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, 1112 badvaddr);
1112 badvaddr);
1113 1113
1114 ++vcpu->stat.tlbmiss_st_exits; 1114 ++vcpu->stat.tlbmiss_st_exits;
1115 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); 1115 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
@@ -1156,10 +1156,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1156 break; 1156 break;
1157 1157
1158 default: 1158 default:
1159 kvm_err 1159 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1160 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", 1160 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1161 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, 1161 kvm_read_c0_guest_status(vcpu->arch.cop0));
1162 kvm_read_c0_guest_status(vcpu->arch.cop0));
1163 kvm_arch_vcpu_dump_regs(vcpu); 1162 kvm_arch_vcpu_dump_regs(vcpu);
1164 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1163 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1165 ret = RESUME_HOST; 1164 ret = RESUME_HOST;
@@ -1174,7 +1173,7 @@ skip_emul:
1174 kvm_mips_deliver_interrupts(vcpu, cause); 1173 kvm_mips_deliver_interrupts(vcpu, cause);
1175 1174
1176 if (!(ret & RESUME_HOST)) { 1175 if (!(ret & RESUME_HOST)) {
1177 /* Only check for signals if not already exiting to userspace */ 1176 /* Only check for signals if not already exiting to userspace */
1178 if (signal_pending(current)) { 1177 if (signal_pending(current)) {
1179 run->exit_reason = KVM_EXIT_INTR; 1178 run->exit_reason = KVM_EXIT_INTR;
1180 ret = (-EINTR << 2) | RESUME_HOST; 1179 ret = (-EINTR << 2) | RESUME_HOST;
@@ -1195,11 +1194,13 @@ int __init kvm_mips_init(void)
1195 if (ret) 1194 if (ret)
1196 return ret; 1195 return ret;
1197 1196
1198 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. 1197 /*
1199 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) 1198 * On MIPS, kernel modules are executed from "mapped space", which
1200 * to avoid the possibility of double faulting. The issue is that the TLB code 1199 * requires TLBs. The TLB handling code is statically linked with
1201 * references routines that are part of the the KVM module, 1200 * the rest of the kernel (kvm_tlb.c) to avoid the possibility of
1202 * which are only available once the module is loaded. 1201 * double faulting. The issue is that the TLB code references
1202 * routines that are part of the the KVM module, which are only
1203 * available once the module is loaded.
1203 */ 1204 */
1204 kvm_mips_gfn_to_pfn = gfn_to_pfn; 1205 kvm_mips_gfn_to_pfn = gfn_to_pfn;
1205 kvm_mips_release_pfn_clean = kvm_release_pfn_clean; 1206 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;