aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 15:16:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 15:16:46 -0400
commit8533ce72718871fb528d853391746f36243273af (patch)
treea3ac06520e45cb6a472ed83979b0d48b6c2cec15 /arch/mips/kvm
parentc9b88e9581828bb8bba06c5e7ee8ed1761172b6e (diff)
parent42cbc04fd3b5e3f9b011bf9fa3ce0b3d1e10b58b (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM changes from Paolo Bonzini: "These are the x86, MIPS and s390 changes; PPC and ARM will come in a few days. MIPS and s390 have little going on this release; just bugfixes, some small, some larger. The highlights for x86 are nested VMX improvements (Jan Kiszka), optimizations for old processor (up to Nehalem, by me and Bandan Das), and a lot of x86 emulator bugfixes (Nadav Amit). Stephen Rothwell reported a trivial conflict with the tracing branch" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (104 commits) x86/kvm: Resolve shadow warnings in macro expansion KVM: s390: rework broken SIGP STOP interrupt handling KVM: x86: always exit on EOIs for interrupts listed in the IOAPIC redir table KVM: vmx: remove duplicate vmx_mpx_supported() prototype KVM: s390: Fix memory leak on busy SIGP stop x86/kvm: Resolve shadow warning from min macro kvm: Resolve missing-field-initializers warnings Replace NR_VMX_MSR with its definition KVM: x86: Assertions to check no overrun in MSR lists KVM: x86: set rflags.rf during fault injection KVM: x86: Setting rflags.rf during rep-string emulation KVM: x86: DR6/7.RTM cannot be written KVM: nVMX: clean up nested_release_vmcs12 and code around it KVM: nVMX: fix lifetime issues for vmcs02 KVM: x86: Defining missing x86 vectors KVM: x86: emulator injects #DB when RFLAGS.RF is set KVM: x86: Cleanup of rflags.rf cleaning KVM: x86: Clear rflags.rf on emulated instructions KVM: x86: popf emulation should not change RF KVM: x86: Clearing rflags.rf upon skipped emulated instruction ...
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/Makefile8
-rw-r--r--arch/mips/kvm/callback.c (renamed from arch/mips/kvm/kvm_cb.c)0
-rw-r--r--arch/mips/kvm/commpage.c33
-rw-r--r--arch/mips/kvm/commpage.h24
-rw-r--r--arch/mips/kvm/dyntrans.c (renamed from arch/mips/kvm/kvm_mips_dyntrans.c)40
-rw-r--r--arch/mips/kvm/emulate.c (renamed from arch/mips/kvm/kvm_mips_emul.c)539
-rw-r--r--arch/mips/kvm/interrupt.c (renamed from arch/mips/kvm/kvm_mips_int.c)47
-rw-r--r--arch/mips/kvm/interrupt.h (renamed from arch/mips/kvm/kvm_mips_int.h)22
-rw-r--r--arch/mips/kvm/kvm_mips_comm.h23
-rw-r--r--arch/mips/kvm/kvm_mips_commpage.c37
-rw-r--r--arch/mips/kvm/kvm_mips_opcode.h24
-rw-r--r--arch/mips/kvm/locore.S (renamed from arch/mips/kvm/kvm_locore.S)55
-rw-r--r--arch/mips/kvm/mips.c (renamed from arch/mips/kvm/kvm_mips.c)224
-rw-r--r--arch/mips/kvm/opcode.h22
-rw-r--r--arch/mips/kvm/stats.c (renamed from arch/mips/kvm/kvm_mips_stats.c)28
-rw-r--r--arch/mips/kvm/tlb.c (renamed from arch/mips/kvm/kvm_tlb.c)258
-rw-r--r--arch/mips/kvm/trace.h18
-rw-r--r--arch/mips/kvm/trap_emul.c (renamed from arch/mips/kvm/kvm_trap_emul.c)112
18 files changed, 739 insertions, 775 deletions
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 78d87bbc99db..401fe027c261 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -5,9 +5,9 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
5 5
6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm 6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
7 7
8kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \ 8kvm-objs := $(common-objs) mips.o emulate.o locore.o \
9 kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \ 9 interrupt.o stats.o commpage.o \
10 kvm_mips_dyntrans.o kvm_trap_emul.o 10 dyntrans.o trap_emul.o
11 11
12obj-$(CONFIG_KVM) += kvm.o 12obj-$(CONFIG_KVM) += kvm.o
13obj-y += kvm_cb.o kvm_tlb.o 13obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/callback.c
index 313c2e37b978..313c2e37b978 100644
--- a/arch/mips/kvm/kvm_cb.c
+++ b/arch/mips/kvm/callback.c
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
new file mode 100644
index 000000000000..2d6e976d1add
--- /dev/null
+++ b/arch/mips/kvm/commpage.c
@@ -0,0 +1,33 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * commpage, currently used for Virtual COP0 registers.
7 * Mapped into the guest kernel @ 0x0.
8 *
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 */
12
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <asm/page.h>
20#include <asm/cacheflush.h>
21#include <asm/mmu_context.h>
22
23#include <linux/kvm_host.h>
24
25#include "commpage.h"
26
27void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
28{
29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
30
31 /* Specific init values for fields */
32 vcpu->arch.cop0 = &page->cop0;
33}
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h
new file mode 100644
index 000000000000..08c5fa2bbc0f
--- /dev/null
+++ b/arch/mips/kvm/commpage.h
@@ -0,0 +1,24 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: commpage: mapped into get kernel space
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12#ifndef __KVM_MIPS_COMMPAGE_H__
13#define __KVM_MIPS_COMMPAGE_H__
14
15struct kvm_mips_commpage {
16 /* COP0 state is mapped into Guest kernel via commpage */
17 struct mips_coproc cop0;
18};
19
20#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
21
22extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
23
24#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/dyntrans.c
index b80e41d858fd..521121bdebff 100644
--- a/arch/mips/kvm/kvm_mips_dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Binary Patching for privileged instructions, reduces traps. 6 * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -18,7 +18,7 @@
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
20 20
21#include "kvm_mips_comm.h" 21#include "commpage.h"
22 22
23#define SYNCI_TEMPLATE 0x041f0000 23#define SYNCI_TEMPLATE 0x041f0000
24#define SYNCI_BASE(x) (((x) >> 21) & 0x1f) 24#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
@@ -28,9 +28,8 @@
28#define CLEAR_TEMPLATE 0x00000020 28#define CLEAR_TEMPLATE 0x00000020
29#define SW_TEMPLATE 0xac000000 29#define SW_TEMPLATE 0xac000000
30 30
31int 31int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
32kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, 32 struct kvm_vcpu *vcpu)
33 struct kvm_vcpu *vcpu)
34{ 33{
35 int result = 0; 34 int result = 0;
36 unsigned long kseg0_opc; 35 unsigned long kseg0_opc;
@@ -47,12 +46,11 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
47} 46}
48 47
49/* 48/*
50 * Address based CACHE instructions are transformed into synci(s). A little heavy 49 * Address based CACHE instructions are transformed into synci(s). A little
51 * for just D-cache invalidates, but avoids an expensive trap 50 * heavy for just D-cache invalidates, but avoids an expensive trap
52 */ 51 */
53int 52int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
54kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, 53 struct kvm_vcpu *vcpu)
55 struct kvm_vcpu *vcpu)
56{ 54{
57 int result = 0; 55 int result = 0;
58 unsigned long kseg0_opc; 56 unsigned long kseg0_opc;
@@ -72,8 +70,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
72 return result; 70 return result;
73} 71}
74 72
75int 73int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
76kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
77{ 74{
78 int32_t rt, rd, sel; 75 int32_t rt, rd, sel;
79 uint32_t mfc0_inst; 76 uint32_t mfc0_inst;
@@ -115,8 +112,7 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
115 return 0; 112 return 0;
116} 113}
117 114
118int 115int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
119kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
120{ 116{
121 int32_t rt, rd, sel; 117 int32_t rt, rd, sel;
122 uint32_t mtc0_inst = SW_TEMPLATE; 118 uint32_t mtc0_inst = SW_TEMPLATE;
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/emulate.c
index 8d4840090082..fb3e8dfd1ff6 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/emulate.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Instruction/Exception emulation 6 * KVM/MIPS: Instruction/Exception emulation
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -29,9 +29,9 @@
29#include <asm/r4kcache.h> 29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT 30#define CONFIG_MIPS_MT
31 31
32#include "kvm_mips_opcode.h" 32#include "opcode.h"
33#include "kvm_mips_int.h" 33#include "interrupt.h"
34#include "kvm_mips_comm.h" 34#include "commpage.h"
35 35
36#include "trace.h" 36#include "trace.h"
37 37
@@ -51,18 +51,14 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
51 if (epc & 3) 51 if (epc & 3)
52 goto unaligned; 52 goto unaligned;
53 53
54 /* 54 /* Read the instruction */
55 * Read the instruction
56 */
57 insn.word = kvm_get_inst((uint32_t *) epc, vcpu); 55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
58 56
59 if (insn.word == KVM_INVALID_INST) 57 if (insn.word == KVM_INVALID_INST)
60 return KVM_INVALID_INST; 58 return KVM_INVALID_INST;
61 59
62 switch (insn.i_format.opcode) { 60 switch (insn.i_format.opcode) {
63 /* 61 /* jr and jalr are in r_format format. */
64 * jr and jalr are in r_format format.
65 */
66 case spec_op: 62 case spec_op:
67 switch (insn.r_format.func) { 63 switch (insn.r_format.func) {
68 case jalr_op: 64 case jalr_op:
@@ -124,18 +120,16 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
124 120
125 dspcontrol = rddsp(0x01); 121 dspcontrol = rddsp(0x01);
126 122
127 if (dspcontrol >= 32) { 123 if (dspcontrol >= 32)
128 epc = epc + 4 + (insn.i_format.simmediate << 2); 124 epc = epc + 4 + (insn.i_format.simmediate << 2);
129 } else 125 else
130 epc += 8; 126 epc += 8;
131 nextpc = epc; 127 nextpc = epc;
132 break; 128 break;
133 } 129 }
134 break; 130 break;
135 131
136 /* 132 /* These are unconditional and in j_format. */
137 * These are unconditional and in j_format.
138 */
139 case jal_op: 133 case jal_op:
140 arch->gprs[31] = instpc + 8; 134 arch->gprs[31] = instpc + 8;
141 case j_op: 135 case j_op:
@@ -146,9 +140,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
146 nextpc = epc; 140 nextpc = epc;
147 break; 141 break;
148 142
149 /* 143 /* These are conditional and in i_format. */
150 * These are conditional and in i_format.
151 */
152 case beq_op: 144 case beq_op:
153 case beql_op: 145 case beql_op:
154 if (arch->gprs[insn.i_format.rs] == 146 if (arch->gprs[insn.i_format.rs] ==
@@ -189,22 +181,20 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
189 nextpc = epc; 181 nextpc = epc;
190 break; 182 break;
191 183
192 /* 184 /* And now the FPA/cp1 branch instructions. */
193 * And now the FPA/cp1 branch instructions.
194 */
195 case cop1_op: 185 case cop1_op:
196 printk("%s: unsupported cop1_op\n", __func__); 186 kvm_err("%s: unsupported cop1_op\n", __func__);
197 break; 187 break;
198 } 188 }
199 189
200 return nextpc; 190 return nextpc;
201 191
202unaligned: 192unaligned:
203 printk("%s: unaligned epc\n", __func__); 193 kvm_err("%s: unaligned epc\n", __func__);
204 return nextpc; 194 return nextpc;
205 195
206sigill: 196sigill:
207 printk("%s: DSP branch but not DSP ASE\n", __func__); 197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
208 return nextpc; 198 return nextpc;
209} 199}
210 200
@@ -219,7 +209,8 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
219 er = EMULATE_FAIL; 209 er = EMULATE_FAIL;
220 } else { 210 } else {
221 vcpu->arch.pc = branch_pc; 211 vcpu->arch.pc = branch_pc;
222 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); 212 kvm_debug("BD update_pc(): New PC: %#lx\n",
213 vcpu->arch.pc);
223 } 214 }
224 } else 215 } else
225 vcpu->arch.pc += 4; 216 vcpu->arch.pc += 4;
@@ -240,6 +231,7 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
240static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) 231static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
241{ 232{
242 struct mips_coproc *cop0 = vcpu->arch.cop0; 233 struct mips_coproc *cop0 = vcpu->arch.cop0;
234
243 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || 235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
244 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); 236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
245} 237}
@@ -392,7 +384,6 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
392 return now; 384 return now;
393} 385}
394 386
395
396/** 387/**
397 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. 388 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
398 * @vcpu: Virtual CPU. 389 * @vcpu: Virtual CPU.
@@ -760,8 +751,8 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
760 kvm_clear_c0_guest_status(cop0, ST0_ERL); 751 kvm_clear_c0_guest_status(cop0, ST0_ERL);
761 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); 752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
762 } else { 753 } else {
763 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 754 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
764 vcpu->arch.pc); 755 vcpu->arch.pc);
765 er = EMULATE_FAIL; 756 er = EMULATE_FAIL;
766 } 757 }
767 758
@@ -770,8 +761,6 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
770 761
771enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) 762enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
772{ 763{
773 enum emulation_result er = EMULATE_DONE;
774
775 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, 764 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
776 vcpu->arch.pending_exceptions); 765 vcpu->arch.pending_exceptions);
777 766
@@ -781,8 +770,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
781 vcpu->arch.wait = 1; 770 vcpu->arch.wait = 1;
782 kvm_vcpu_block(vcpu); 771 kvm_vcpu_block(vcpu);
783 772
784 /* We we are runnable, then definitely go off to user space to check if any 773 /*
785 * I/O interrupts are pending. 774 * We we are runnable, then definitely go off to user space to
775 * check if any I/O interrupts are pending.
786 */ 776 */
787 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { 777 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
788 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 778 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
@@ -790,20 +780,20 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
790 } 780 }
791 } 781 }
792 782
793 return er; 783 return EMULATE_DONE;
794} 784}
795 785
796/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch 786/*
797 * this, if things ever change 787 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
788 * we can catch this, if things ever change
798 */ 789 */
799enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) 790enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
800{ 791{
801 struct mips_coproc *cop0 = vcpu->arch.cop0; 792 struct mips_coproc *cop0 = vcpu->arch.cop0;
802 enum emulation_result er = EMULATE_FAIL;
803 uint32_t pc = vcpu->arch.pc; 793 uint32_t pc = vcpu->arch.pc;
804 794
805 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); 795 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
806 return er; 796 return EMULATE_FAIL;
807} 797}
808 798
809/* Write Guest TLB Entry @ Index */ 799/* Write Guest TLB Entry @ Index */
@@ -811,88 +801,76 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
811{ 801{
812 struct mips_coproc *cop0 = vcpu->arch.cop0; 802 struct mips_coproc *cop0 = vcpu->arch.cop0;
813 int index = kvm_read_c0_guest_index(cop0); 803 int index = kvm_read_c0_guest_index(cop0);
814 enum emulation_result er = EMULATE_DONE;
815 struct kvm_mips_tlb *tlb = NULL; 804 struct kvm_mips_tlb *tlb = NULL;
816 uint32_t pc = vcpu->arch.pc; 805 uint32_t pc = vcpu->arch.pc;
817 806
818 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 807 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
819 printk("%s: illegal index: %d\n", __func__, index); 808 kvm_debug("%s: illegal index: %d\n", __func__, index);
820 printk 809 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
821 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 810 pc, index, kvm_read_c0_guest_entryhi(cop0),
822 pc, index, kvm_read_c0_guest_entryhi(cop0), 811 kvm_read_c0_guest_entrylo0(cop0),
823 kvm_read_c0_guest_entrylo0(cop0), 812 kvm_read_c0_guest_entrylo1(cop0),
824 kvm_read_c0_guest_entrylo1(cop0), 813 kvm_read_c0_guest_pagemask(cop0));
825 kvm_read_c0_guest_pagemask(cop0));
826 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; 814 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
827 } 815 }
828 816
829 tlb = &vcpu->arch.guest_tlb[index]; 817 tlb = &vcpu->arch.guest_tlb[index];
830#if 1 818 /*
831 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ 819 * Probe the shadow host TLB for the entry being overwritten, if one
820 * matches, invalidate it
821 */
832 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); 822 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
833#endif
834 823
835 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 824 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
836 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 825 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
837 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); 826 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
838 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); 827 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
839 828
840 kvm_debug 829 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
841 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 830 pc, index, kvm_read_c0_guest_entryhi(cop0),
842 pc, index, kvm_read_c0_guest_entryhi(cop0), 831 kvm_read_c0_guest_entrylo0(cop0),
843 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), 832 kvm_read_c0_guest_entrylo1(cop0),
844 kvm_read_c0_guest_pagemask(cop0)); 833 kvm_read_c0_guest_pagemask(cop0));
845 834
846 return er; 835 return EMULATE_DONE;
847} 836}
848 837
849/* Write Guest TLB Entry @ Random Index */ 838/* Write Guest TLB Entry @ Random Index */
850enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) 839enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
851{ 840{
852 struct mips_coproc *cop0 = vcpu->arch.cop0; 841 struct mips_coproc *cop0 = vcpu->arch.cop0;
853 enum emulation_result er = EMULATE_DONE;
854 struct kvm_mips_tlb *tlb = NULL; 842 struct kvm_mips_tlb *tlb = NULL;
855 uint32_t pc = vcpu->arch.pc; 843 uint32_t pc = vcpu->arch.pc;
856 int index; 844 int index;
857 845
858#if 1
859 get_random_bytes(&index, sizeof(index)); 846 get_random_bytes(&index, sizeof(index));
860 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); 847 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
861#else
862 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
863#endif
864
865 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
866 printk("%s: illegal index: %d\n", __func__, index);
867 return EMULATE_FAIL;
868 }
869 848
870 tlb = &vcpu->arch.guest_tlb[index]; 849 tlb = &vcpu->arch.guest_tlb[index];
871 850
872#if 1 851 /*
873 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ 852 * Probe the shadow host TLB for the entry being overwritten, if one
853 * matches, invalidate it
854 */
874 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); 855 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
875#endif
876 856
877 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 857 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
878 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 858 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
879 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); 859 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
880 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); 860 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
881 861
882 kvm_debug 862 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
883 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", 863 pc, index, kvm_read_c0_guest_entryhi(cop0),
884 pc, index, kvm_read_c0_guest_entryhi(cop0), 864 kvm_read_c0_guest_entrylo0(cop0),
885 kvm_read_c0_guest_entrylo0(cop0), 865 kvm_read_c0_guest_entrylo1(cop0));
886 kvm_read_c0_guest_entrylo1(cop0));
887 866
888 return er; 867 return EMULATE_DONE;
889} 868}
890 869
891enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) 870enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
892{ 871{
893 struct mips_coproc *cop0 = vcpu->arch.cop0; 872 struct mips_coproc *cop0 = vcpu->arch.cop0;
894 long entryhi = kvm_read_c0_guest_entryhi(cop0); 873 long entryhi = kvm_read_c0_guest_entryhi(cop0);
895 enum emulation_result er = EMULATE_DONE;
896 uint32_t pc = vcpu->arch.pc; 874 uint32_t pc = vcpu->arch.pc;
897 int index = -1; 875 int index = -1;
898 876
@@ -903,12 +881,12 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
903 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, 881 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
904 index); 882 index);
905 883
906 return er; 884 return EMULATE_DONE;
907} 885}
908 886
909enum emulation_result 887enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
910kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, 888 uint32_t cause, struct kvm_run *run,
911 struct kvm_run *run, struct kvm_vcpu *vcpu) 889 struct kvm_vcpu *vcpu)
912{ 890{
913 struct mips_coproc *cop0 = vcpu->arch.cop0; 891 struct mips_coproc *cop0 = vcpu->arch.cop0;
914 enum emulation_result er = EMULATE_DONE; 892 enum emulation_result er = EMULATE_DONE;
@@ -922,9 +900,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
922 */ 900 */
923 curr_pc = vcpu->arch.pc; 901 curr_pc = vcpu->arch.pc;
924 er = update_pc(vcpu, cause); 902 er = update_pc(vcpu, cause);
925 if (er == EMULATE_FAIL) { 903 if (er == EMULATE_FAIL)
926 return er; 904 return er;
927 }
928 905
929 copz = (inst >> 21) & 0x1f; 906 copz = (inst >> 21) & 0x1f;
930 rt = (inst >> 16) & 0x1f; 907 rt = (inst >> 16) & 0x1f;
@@ -949,7 +926,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
949 er = kvm_mips_emul_tlbp(vcpu); 926 er = kvm_mips_emul_tlbp(vcpu);
950 break; 927 break;
951 case rfe_op: 928 case rfe_op:
952 printk("!!!COP0_RFE!!!\n"); 929 kvm_err("!!!COP0_RFE!!!\n");
953 break; 930 break;
954 case eret_op: 931 case eret_op:
955 er = kvm_mips_emul_eret(vcpu); 932 er = kvm_mips_emul_eret(vcpu);
@@ -973,8 +950,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
973#ifdef CONFIG_KVM_MIPS_DYN_TRANS 950#ifdef CONFIG_KVM_MIPS_DYN_TRANS
974 kvm_mips_trans_mfc0(inst, opc, vcpu); 951 kvm_mips_trans_mfc0(inst, opc, vcpu);
975#endif 952#endif
976 } 953 } else {
977 else {
978 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; 954 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
979 955
980#ifdef CONFIG_KVM_MIPS_DYN_TRANS 956#ifdef CONFIG_KVM_MIPS_DYN_TRANS
@@ -999,8 +975,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
999 if ((rd == MIPS_CP0_TLB_INDEX) 975 if ((rd == MIPS_CP0_TLB_INDEX)
1000 && (vcpu->arch.gprs[rt] >= 976 && (vcpu->arch.gprs[rt] >=
1001 KVM_MIPS_GUEST_TLB_SIZE)) { 977 KVM_MIPS_GUEST_TLB_SIZE)) {
1002 printk("Invalid TLB Index: %ld", 978 kvm_err("Invalid TLB Index: %ld",
1003 vcpu->arch.gprs[rt]); 979 vcpu->arch.gprs[rt]);
1004 er = EMULATE_FAIL; 980 er = EMULATE_FAIL;
1005 break; 981 break;
1006 } 982 }
@@ -1010,21 +986,19 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1010 kvm_change_c0_guest_ebase(cop0, 986 kvm_change_c0_guest_ebase(cop0,
1011 ~(C0_EBASE_CORE_MASK), 987 ~(C0_EBASE_CORE_MASK),
1012 vcpu->arch.gprs[rt]); 988 vcpu->arch.gprs[rt]);
1013 printk("MTCz, cop0->reg[EBASE]: %#lx\n", 989 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1014 kvm_read_c0_guest_ebase(cop0)); 990 kvm_read_c0_guest_ebase(cop0));
1015 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 991 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1016 uint32_t nasid = 992 uint32_t nasid =
1017 vcpu->arch.gprs[rt] & ASID_MASK; 993 vcpu->arch.gprs[rt] & ASID_MASK;
1018 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) 994 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1019 &&
1020 ((kvm_read_c0_guest_entryhi(cop0) & 995 ((kvm_read_c0_guest_entryhi(cop0) &
1021 ASID_MASK) != nasid)) { 996 ASID_MASK) != nasid)) {
1022 997 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1023 kvm_debug 998 kvm_read_c0_guest_entryhi(cop0)
1024 ("MTCz, change ASID from %#lx to %#lx\n", 999 & ASID_MASK,
1025 kvm_read_c0_guest_entryhi(cop0) & 1000 vcpu->arch.gprs[rt]
1026 ASID_MASK, 1001 & ASID_MASK);
1027 vcpu->arch.gprs[rt] & ASID_MASK);
1028 1002
1029 /* Blow away the shadow host TLBs */ 1003 /* Blow away the shadow host TLBs */
1030 kvm_mips_flush_host_tlb(1); 1004 kvm_mips_flush_host_tlb(1);
@@ -1049,7 +1023,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1049 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1023 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1050 kvm_write_c0_guest_status(cop0, 1024 kvm_write_c0_guest_status(cop0,
1051 vcpu->arch.gprs[rt]); 1025 vcpu->arch.gprs[rt]);
1052 /* Make sure that CU1 and NMI bits are never set */ 1026 /*
1027 * Make sure that CU1 and NMI bits are
1028 * never set
1029 */
1053 kvm_clear_c0_guest_status(cop0, 1030 kvm_clear_c0_guest_status(cop0,
1054 (ST0_CU1 | ST0_NMI)); 1031 (ST0_CU1 | ST0_NMI));
1055 1032
@@ -1058,6 +1035,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1058#endif 1035#endif
1059 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1036 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1060 uint32_t old_cause, new_cause; 1037 uint32_t old_cause, new_cause;
1038
1061 old_cause = kvm_read_c0_guest_cause(cop0); 1039 old_cause = kvm_read_c0_guest_cause(cop0);
1062 new_cause = vcpu->arch.gprs[rt]; 1040 new_cause = vcpu->arch.gprs[rt];
1063 /* Update R/W bits */ 1041 /* Update R/W bits */
@@ -1082,9 +1060,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1082 break; 1060 break;
1083 1061
1084 case dmtc_op: 1062 case dmtc_op:
1085 printk 1063 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1086 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", 1064 vcpu->arch.pc, rt, rd, sel);
1087 vcpu->arch.pc, rt, rd, sel);
1088 er = EMULATE_FAIL; 1065 er = EMULATE_FAIL;
1089 break; 1066 break;
1090 1067
@@ -1115,7 +1092,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1115 cop0->reg[MIPS_CP0_STATUS][2] & 0xf; 1092 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1116 uint32_t pss = 1093 uint32_t pss =
1117 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; 1094 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1118 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ 1095 /*
1096 * We don't support any shadow register sets, so
1097 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1098 */
1119 if (css || pss) { 1099 if (css || pss) {
1120 er = EMULATE_FAIL; 1100 er = EMULATE_FAIL;
1121 break; 1101 break;
@@ -1126,21 +1106,17 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
1126 } 1106 }
1127 break; 1107 break;
1128 default: 1108 default:
1129 printk 1109 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1130 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", 1110 vcpu->arch.pc, copz);
1131 vcpu->arch.pc, copz);
1132 er = EMULATE_FAIL; 1111 er = EMULATE_FAIL;
1133 break; 1112 break;
1134 } 1113 }
1135 } 1114 }
1136 1115
1137done: 1116done:
1138 /* 1117 /* Rollback PC only if emulation was unsuccessful */
1139 * Rollback PC only if emulation was unsuccessful 1118 if (er == EMULATE_FAIL)
1140 */
1141 if (er == EMULATE_FAIL) {
1142 vcpu->arch.pc = curr_pc; 1119 vcpu->arch.pc = curr_pc;
1143 }
1144 1120
1145dont_update_pc: 1121dont_update_pc:
1146 /* 1122 /*
@@ -1152,9 +1128,9 @@ dont_update_pc:
1152 return er; 1128 return er;
1153} 1129}
1154 1130
1155enum emulation_result 1131enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1156kvm_mips_emulate_store(uint32_t inst, uint32_t cause, 1132 struct kvm_run *run,
1157 struct kvm_run *run, struct kvm_vcpu *vcpu) 1133 struct kvm_vcpu *vcpu)
1158{ 1134{
1159 enum emulation_result er = EMULATE_DO_MMIO; 1135 enum emulation_result er = EMULATE_DO_MMIO;
1160 int32_t op, base, rt, offset; 1136 int32_t op, base, rt, offset;
@@ -1252,24 +1228,21 @@ kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1252 break; 1228 break;
1253 1229
1254 default: 1230 default:
1255 printk("Store not yet supported"); 1231 kvm_err("Store not yet supported");
1256 er = EMULATE_FAIL; 1232 er = EMULATE_FAIL;
1257 break; 1233 break;
1258 } 1234 }
1259 1235
1260 /* 1236 /* Rollback PC if emulation was unsuccessful */
1261 * Rollback PC if emulation was unsuccessful 1237 if (er == EMULATE_FAIL)
1262 */
1263 if (er == EMULATE_FAIL) {
1264 vcpu->arch.pc = curr_pc; 1238 vcpu->arch.pc = curr_pc;
1265 }
1266 1239
1267 return er; 1240 return er;
1268} 1241}
1269 1242
1270enum emulation_result 1243enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1271kvm_mips_emulate_load(uint32_t inst, uint32_t cause, 1244 struct kvm_run *run,
1272 struct kvm_run *run, struct kvm_vcpu *vcpu) 1245 struct kvm_vcpu *vcpu)
1273{ 1246{
1274 enum emulation_result er = EMULATE_DO_MMIO; 1247 enum emulation_result er = EMULATE_DO_MMIO;
1275 int32_t op, base, rt, offset; 1248 int32_t op, base, rt, offset;
@@ -1364,7 +1337,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1364 break; 1337 break;
1365 1338
1366 default: 1339 default:
1367 printk("Load not yet supported"); 1340 kvm_err("Load not yet supported");
1368 er = EMULATE_FAIL; 1341 er = EMULATE_FAIL;
1369 break; 1342 break;
1370 } 1343 }
@@ -1383,7 +1356,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1383 gfn = va >> PAGE_SHIFT; 1356 gfn = va >> PAGE_SHIFT;
1384 1357
1385 if (gfn >= kvm->arch.guest_pmap_npages) { 1358 if (gfn >= kvm->arch.guest_pmap_npages) {
1386 printk("%s: Invalid gfn: %#llx\n", __func__, gfn); 1359 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1387 kvm_mips_dump_host_tlbs(); 1360 kvm_mips_dump_host_tlbs();
1388 kvm_arch_vcpu_dump_regs(vcpu); 1361 kvm_arch_vcpu_dump_regs(vcpu);
1389 return -1; 1362 return -1;
@@ -1391,7 +1364,8 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1391 pfn = kvm->arch.guest_pmap[gfn]; 1364 pfn = kvm->arch.guest_pmap[gfn];
1392 pa = (pfn << PAGE_SHIFT) | offset; 1365 pa = (pfn << PAGE_SHIFT) | offset;
1393 1366
1394 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); 1367 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1368 CKSEG0ADDR(pa));
1395 1369
1396 local_flush_icache_range(CKSEG0ADDR(pa), 32); 1370 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1397 return 0; 1371 return 0;
@@ -1410,13 +1384,12 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1410#define MIPS_CACHE_DCACHE 0x1 1384#define MIPS_CACHE_DCACHE 0x1
1411#define MIPS_CACHE_SEC 0x3 1385#define MIPS_CACHE_SEC 0x3
1412 1386
1413enum emulation_result 1387enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1414kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, 1388 uint32_t cause,
1415 struct kvm_run *run, struct kvm_vcpu *vcpu) 1389 struct kvm_run *run,
1390 struct kvm_vcpu *vcpu)
1416{ 1391{
1417 struct mips_coproc *cop0 = vcpu->arch.cop0; 1392 struct mips_coproc *cop0 = vcpu->arch.cop0;
1418 extern void (*r4k_blast_dcache) (void);
1419 extern void (*r4k_blast_icache) (void);
1420 enum emulation_result er = EMULATE_DONE; 1393 enum emulation_result er = EMULATE_DONE;
1421 int32_t offset, cache, op_inst, op, base; 1394 int32_t offset, cache, op_inst, op, base;
1422 struct kvm_vcpu_arch *arch = &vcpu->arch; 1395 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1443,22 +1416,23 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1443 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1416 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1444 cache, op, base, arch->gprs[base], offset); 1417 cache, op, base, arch->gprs[base], offset);
1445 1418
1446 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate 1419 /*
1447 * the caches entirely by stepping through all the ways/indexes 1420 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1421 * invalidate the caches entirely by stepping through all the
1422 * ways/indexes
1448 */ 1423 */
1449 if (op == MIPS_CACHE_OP_INDEX_INV) { 1424 if (op == MIPS_CACHE_OP_INDEX_INV) {
1450 kvm_debug 1425 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1451 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1426 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1452 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, 1427 arch->gprs[base], offset);
1453 arch->gprs[base], offset);
1454 1428
1455 if (cache == MIPS_CACHE_DCACHE) 1429 if (cache == MIPS_CACHE_DCACHE)
1456 r4k_blast_dcache(); 1430 r4k_blast_dcache();
1457 else if (cache == MIPS_CACHE_ICACHE) 1431 else if (cache == MIPS_CACHE_ICACHE)
1458 r4k_blast_icache(); 1432 r4k_blast_icache();
1459 else { 1433 else {
1460 printk("%s: unsupported CACHE INDEX operation\n", 1434 kvm_err("%s: unsupported CACHE INDEX operation\n",
1461 __func__); 1435 __func__);
1462 return EMULATE_FAIL; 1436 return EMULATE_FAIL;
1463 } 1437 }
1464 1438
@@ -1470,21 +1444,19 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1470 1444
1471 preempt_disable(); 1445 preempt_disable();
1472 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { 1446 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1473 1447 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
1474 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
1475 kvm_mips_handle_kseg0_tlb_fault(va, vcpu); 1448 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1476 }
1477 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || 1449 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1478 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { 1450 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1479 int index; 1451 int index;
1480 1452
1481 /* If an entry already exists then skip */ 1453 /* If an entry already exists then skip */
1482 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { 1454 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1483 goto skip_fault; 1455 goto skip_fault;
1484 }
1485 1456
1486 /* If address not in the guest TLB, then give the guest a fault, the 1457 /*
1487 * resulting handler will do the right thing 1458 * If address not in the guest TLB, then give the guest a fault,
1459 * the resulting handler will do the right thing
1488 */ 1460 */
1489 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | 1461 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1490 (kvm_read_c0_guest_entryhi 1462 (kvm_read_c0_guest_entryhi
@@ -1499,23 +1471,28 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1499 goto dont_update_pc; 1471 goto dont_update_pc;
1500 } else { 1472 } else {
1501 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 1473 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1502 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ 1474 /*
1475 * Check if the entry is valid, if not then setup a TLB
1476 * invalid exception to the guest
1477 */
1503 if (!TLB_IS_VALID(*tlb, va)) { 1478 if (!TLB_IS_VALID(*tlb, va)) {
1504 er = kvm_mips_emulate_tlbinv_ld(cause, NULL, 1479 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1505 run, vcpu); 1480 run, vcpu);
1506 preempt_enable(); 1481 preempt_enable();
1507 goto dont_update_pc; 1482 goto dont_update_pc;
1508 } else { 1483 } else {
1509 /* We fault an entry from the guest tlb to the shadow host TLB */ 1484 /*
1485 * We fault an entry from the guest tlb to the
1486 * shadow host TLB
1487 */
1510 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, 1488 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1511 NULL, 1489 NULL,
1512 NULL); 1490 NULL);
1513 } 1491 }
1514 } 1492 }
1515 } else { 1493 } else {
1516 printk 1494 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1517 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1495 cache, op, base, arch->gprs[base], offset);
1518 cache, op, base, arch->gprs[base], offset);
1519 er = EMULATE_FAIL; 1496 er = EMULATE_FAIL;
1520 preempt_enable(); 1497 preempt_enable();
1521 goto dont_update_pc; 1498 goto dont_update_pc;
@@ -1530,7 +1507,10 @@ skip_fault:
1530 flush_dcache_line(va); 1507 flush_dcache_line(va);
1531 1508
1532#ifdef CONFIG_KVM_MIPS_DYN_TRANS 1509#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1533 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ 1510 /*
1511 * Replace the CACHE instruction, with a SYNCI, not the same,
1512 * but avoids a trap
1513 */
1534 kvm_mips_trans_cache_va(inst, opc, vcpu); 1514 kvm_mips_trans_cache_va(inst, opc, vcpu);
1535#endif 1515#endif
1536 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { 1516 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
@@ -1542,9 +1522,8 @@ skip_fault:
1542 kvm_mips_trans_cache_va(inst, opc, vcpu); 1522 kvm_mips_trans_cache_va(inst, opc, vcpu);
1543#endif 1523#endif
1544 } else { 1524 } else {
1545 printk 1525 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1546 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1526 cache, op, base, arch->gprs[base], offset);
1547 cache, op, base, arch->gprs[base], offset);
1548 er = EMULATE_FAIL; 1527 er = EMULATE_FAIL;
1549 preempt_enable(); 1528 preempt_enable();
1550 goto dont_update_pc; 1529 goto dont_update_pc;
@@ -1552,28 +1531,23 @@ skip_fault:
1552 1531
1553 preempt_enable(); 1532 preempt_enable();
1554 1533
1555 dont_update_pc: 1534dont_update_pc:
1556 /* 1535 /* Rollback PC */
1557 * Rollback PC
1558 */
1559 vcpu->arch.pc = curr_pc; 1536 vcpu->arch.pc = curr_pc;
1560 done: 1537done:
1561 return er; 1538 return er;
1562} 1539}
1563 1540
1564enum emulation_result 1541enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1565kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, 1542 struct kvm_run *run,
1566 struct kvm_run *run, struct kvm_vcpu *vcpu) 1543 struct kvm_vcpu *vcpu)
1567{ 1544{
1568 enum emulation_result er = EMULATE_DONE; 1545 enum emulation_result er = EMULATE_DONE;
1569 uint32_t inst; 1546 uint32_t inst;
1570 1547
1571 /* 1548 /* Fetch the instruction. */
1572 * Fetch the instruction. 1549 if (cause & CAUSEF_BD)
1573 */
1574 if (cause & CAUSEF_BD) {
1575 opc += 1; 1550 opc += 1;
1576 }
1577 1551
1578 inst = kvm_get_inst(opc, vcpu); 1552 inst = kvm_get_inst(opc, vcpu);
1579 1553
@@ -1601,8 +1575,8 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1601 break; 1575 break;
1602 1576
1603 default: 1577 default:
1604 printk("Instruction emulation not supported (%p/%#x)\n", opc, 1578 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1605 inst); 1579 inst);
1606 kvm_arch_vcpu_dump_regs(vcpu); 1580 kvm_arch_vcpu_dump_regs(vcpu);
1607 er = EMULATE_FAIL; 1581 er = EMULATE_FAIL;
1608 break; 1582 break;
@@ -1611,9 +1585,10 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1611 return er; 1585 return er;
1612} 1586}
1613 1587
1614enum emulation_result 1588enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1615kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, 1589 uint32_t *opc,
1616 struct kvm_run *run, struct kvm_vcpu *vcpu) 1590 struct kvm_run *run,
1591 struct kvm_vcpu *vcpu)
1617{ 1592{
1618 struct mips_coproc *cop0 = vcpu->arch.cop0; 1593 struct mips_coproc *cop0 = vcpu->arch.cop0;
1619 struct kvm_vcpu_arch *arch = &vcpu->arch; 1594 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1638,20 +1613,20 @@ kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1638 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1613 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1639 1614
1640 } else { 1615 } else {
1641 printk("Trying to deliver SYSCALL when EXL is already set\n"); 1616 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1642 er = EMULATE_FAIL; 1617 er = EMULATE_FAIL;
1643 } 1618 }
1644 1619
1645 return er; 1620 return er;
1646} 1621}
1647 1622
1648enum emulation_result 1623enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1649kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, 1624 uint32_t *opc,
1650 struct kvm_run *run, struct kvm_vcpu *vcpu) 1625 struct kvm_run *run,
1626 struct kvm_vcpu *vcpu)
1651{ 1627{
1652 struct mips_coproc *cop0 = vcpu->arch.cop0; 1628 struct mips_coproc *cop0 = vcpu->arch.cop0;
1653 struct kvm_vcpu_arch *arch = &vcpu->arch; 1629 struct kvm_vcpu_arch *arch = &vcpu->arch;
1654 enum emulation_result er = EMULATE_DONE;
1655 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 1630 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1656 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1631 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1657 1632
@@ -1688,16 +1663,16 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1688 /* Blow away the shadow host TLBs */ 1663 /* Blow away the shadow host TLBs */
1689 kvm_mips_flush_host_tlb(1); 1664 kvm_mips_flush_host_tlb(1);
1690 1665
1691 return er; 1666 return EMULATE_DONE;
1692} 1667}
1693 1668
1694enum emulation_result 1669enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1695kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, 1670 uint32_t *opc,
1696 struct kvm_run *run, struct kvm_vcpu *vcpu) 1671 struct kvm_run *run,
1672 struct kvm_vcpu *vcpu)
1697{ 1673{
1698 struct mips_coproc *cop0 = vcpu->arch.cop0; 1674 struct mips_coproc *cop0 = vcpu->arch.cop0;
1699 struct kvm_vcpu_arch *arch = &vcpu->arch; 1675 struct kvm_vcpu_arch *arch = &vcpu->arch;
1700 enum emulation_result er = EMULATE_DONE;
1701 unsigned long entryhi = 1676 unsigned long entryhi =
1702 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1677 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1703 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1678 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
@@ -1734,16 +1709,16 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1734 /* Blow away the shadow host TLBs */ 1709 /* Blow away the shadow host TLBs */
1735 kvm_mips_flush_host_tlb(1); 1710 kvm_mips_flush_host_tlb(1);
1736 1711
1737 return er; 1712 return EMULATE_DONE;
1738} 1713}
1739 1714
1740enum emulation_result 1715enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1741kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, 1716 uint32_t *opc,
1742 struct kvm_run *run, struct kvm_vcpu *vcpu) 1717 struct kvm_run *run,
1718 struct kvm_vcpu *vcpu)
1743{ 1719{
1744 struct mips_coproc *cop0 = vcpu->arch.cop0; 1720 struct mips_coproc *cop0 = vcpu->arch.cop0;
1745 struct kvm_vcpu_arch *arch = &vcpu->arch; 1721 struct kvm_vcpu_arch *arch = &vcpu->arch;
1746 enum emulation_result er = EMULATE_DONE;
1747 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1722 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1748 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1723 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1749 1724
@@ -1778,16 +1753,16 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1778 /* Blow away the shadow host TLBs */ 1753 /* Blow away the shadow host TLBs */
1779 kvm_mips_flush_host_tlb(1); 1754 kvm_mips_flush_host_tlb(1);
1780 1755
1781 return er; 1756 return EMULATE_DONE;
1782} 1757}
1783 1758
1784enum emulation_result 1759enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1785kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, 1760 uint32_t *opc,
1786 struct kvm_run *run, struct kvm_vcpu *vcpu) 1761 struct kvm_run *run,
1762 struct kvm_vcpu *vcpu)
1787{ 1763{
1788 struct mips_coproc *cop0 = vcpu->arch.cop0; 1764 struct mips_coproc *cop0 = vcpu->arch.cop0;
1789 struct kvm_vcpu_arch *arch = &vcpu->arch; 1765 struct kvm_vcpu_arch *arch = &vcpu->arch;
1790 enum emulation_result er = EMULATE_DONE;
1791 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1766 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1792 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1767 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1793 1768
@@ -1822,13 +1797,13 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1822 /* Blow away the shadow host TLBs */ 1797 /* Blow away the shadow host TLBs */
1823 kvm_mips_flush_host_tlb(1); 1798 kvm_mips_flush_host_tlb(1);
1824 1799
1825 return er; 1800 return EMULATE_DONE;
1826} 1801}
1827 1802
1828/* TLBMOD: store into address matching TLB with Dirty bit off */ 1803/* TLBMOD: store into address matching TLB with Dirty bit off */
1829enum emulation_result 1804enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1830kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, 1805 struct kvm_run *run,
1831 struct kvm_run *run, struct kvm_vcpu *vcpu) 1806 struct kvm_vcpu *vcpu)
1832{ 1807{
1833 enum emulation_result er = EMULATE_DONE; 1808 enum emulation_result er = EMULATE_DONE;
1834#ifdef DEBUG 1809#ifdef DEBUG
@@ -1837,9 +1812,7 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1837 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1812 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1838 int index; 1813 int index;
1839 1814
1840 /* 1815 /* If address not in the guest TLB, then we are in trouble */
1841 * If address not in the guest TLB, then we are in trouble
1842 */
1843 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 1816 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1844 if (index < 0) { 1817 if (index < 0) {
1845 /* XXXKYMA Invalidate and retry */ 1818 /* XXXKYMA Invalidate and retry */
@@ -1856,15 +1829,15 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1856 return er; 1829 return er;
1857} 1830}
1858 1831
1859enum emulation_result 1832enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
1860kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, 1833 uint32_t *opc,
1861 struct kvm_run *run, struct kvm_vcpu *vcpu) 1834 struct kvm_run *run,
1835 struct kvm_vcpu *vcpu)
1862{ 1836{
1863 struct mips_coproc *cop0 = vcpu->arch.cop0; 1837 struct mips_coproc *cop0 = vcpu->arch.cop0;
1864 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1838 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1865 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); 1839 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1866 struct kvm_vcpu_arch *arch = &vcpu->arch; 1840 struct kvm_vcpu_arch *arch = &vcpu->arch;
1867 enum emulation_result er = EMULATE_DONE;
1868 1841
1869 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1842 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1870 /* save old pc */ 1843 /* save old pc */
@@ -1895,16 +1868,16 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1895 /* Blow away the shadow host TLBs */ 1868 /* Blow away the shadow host TLBs */
1896 kvm_mips_flush_host_tlb(1); 1869 kvm_mips_flush_host_tlb(1);
1897 1870
1898 return er; 1871 return EMULATE_DONE;
1899} 1872}
1900 1873
1901enum emulation_result 1874enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
1902kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, 1875 uint32_t *opc,
1903 struct kvm_run *run, struct kvm_vcpu *vcpu) 1876 struct kvm_run *run,
1877 struct kvm_vcpu *vcpu)
1904{ 1878{
1905 struct mips_coproc *cop0 = vcpu->arch.cop0; 1879 struct mips_coproc *cop0 = vcpu->arch.cop0;
1906 struct kvm_vcpu_arch *arch = &vcpu->arch; 1880 struct kvm_vcpu_arch *arch = &vcpu->arch;
1907 enum emulation_result er = EMULATE_DONE;
1908 1881
1909 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1882 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1910 /* save old pc */ 1883 /* save old pc */
@@ -1924,12 +1897,13 @@ kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1924 (T_COP_UNUSABLE << CAUSEB_EXCCODE)); 1897 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1925 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); 1898 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1926 1899
1927 return er; 1900 return EMULATE_DONE;
1928} 1901}
1929 1902
1930enum emulation_result 1903enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
1931kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, 1904 uint32_t *opc,
1932 struct kvm_run *run, struct kvm_vcpu *vcpu) 1905 struct kvm_run *run,
1906 struct kvm_vcpu *vcpu)
1933{ 1907{
1934 struct mips_coproc *cop0 = vcpu->arch.cop0; 1908 struct mips_coproc *cop0 = vcpu->arch.cop0;
1935 struct kvm_vcpu_arch *arch = &vcpu->arch; 1909 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1961,9 +1935,10 @@ kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1961 return er; 1935 return er;
1962} 1936}
1963 1937
1964enum emulation_result 1938enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
1965kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, 1939 uint32_t *opc,
1966 struct kvm_run *run, struct kvm_vcpu *vcpu) 1940 struct kvm_run *run,
1941 struct kvm_vcpu *vcpu)
1967{ 1942{
1968 struct mips_coproc *cop0 = vcpu->arch.cop0; 1943 struct mips_coproc *cop0 = vcpu->arch.cop0;
1969 struct kvm_vcpu_arch *arch = &vcpu->arch; 1944 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -1988,16 +1963,14 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1988 arch->pc = KVM_GUEST_KSEG0 + 0x180; 1963 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1989 1964
1990 } else { 1965 } else {
1991 printk("Trying to deliver BP when EXL is already set\n"); 1966 kvm_err("Trying to deliver BP when EXL is already set\n");
1992 er = EMULATE_FAIL; 1967 er = EMULATE_FAIL;
1993 } 1968 }
1994 1969
1995 return er; 1970 return er;
1996} 1971}
1997 1972
1998/* 1973/* ll/sc, rdhwr, sync emulation */
1999 * ll/sc, rdhwr, sync emulation
2000 */
2001 1974
2002#define OPCODE 0xfc000000 1975#define OPCODE 0xfc000000
2003#define BASE 0x03e00000 1976#define BASE 0x03e00000
@@ -2012,9 +1985,9 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
2012#define SYNC 0x0000000f 1985#define SYNC 0x0000000f
2013#define RDHWR 0x0000003b 1986#define RDHWR 0x0000003b
2014 1987
2015enum emulation_result 1988enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2016kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, 1989 struct kvm_run *run,
2017 struct kvm_run *run, struct kvm_vcpu *vcpu) 1990 struct kvm_vcpu *vcpu)
2018{ 1991{
2019 struct mips_coproc *cop0 = vcpu->arch.cop0; 1992 struct mips_coproc *cop0 = vcpu->arch.cop0;
2020 struct kvm_vcpu_arch *arch = &vcpu->arch; 1993 struct kvm_vcpu_arch *arch = &vcpu->arch;
@@ -2031,16 +2004,14 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2031 if (er == EMULATE_FAIL) 2004 if (er == EMULATE_FAIL)
2032 return er; 2005 return er;
2033 2006
2034 /* 2007 /* Fetch the instruction. */
2035 * Fetch the instruction.
2036 */
2037 if (cause & CAUSEF_BD) 2008 if (cause & CAUSEF_BD)
2038 opc += 1; 2009 opc += 1;
2039 2010
2040 inst = kvm_get_inst(opc, vcpu); 2011 inst = kvm_get_inst(opc, vcpu);
2041 2012
2042 if (inst == KVM_INVALID_INST) { 2013 if (inst == KVM_INVALID_INST) {
2043 printk("%s: Cannot get inst @ %p\n", __func__, opc); 2014 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2044 return EMULATE_FAIL; 2015 return EMULATE_FAIL;
2045 } 2016 }
2046 2017
@@ -2099,15 +2070,15 @@ emulate_ri:
2099 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); 2070 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2100} 2071}
2101 2072
2102enum emulation_result 2073enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2103kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) 2074 struct kvm_run *run)
2104{ 2075{
2105 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; 2076 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2106 enum emulation_result er = EMULATE_DONE; 2077 enum emulation_result er = EMULATE_DONE;
2107 unsigned long curr_pc; 2078 unsigned long curr_pc;
2108 2079
2109 if (run->mmio.len > sizeof(*gpr)) { 2080 if (run->mmio.len > sizeof(*gpr)) {
2110 printk("Bad MMIO length: %d", run->mmio.len); 2081 kvm_err("Bad MMIO length: %d", run->mmio.len);
2111 er = EMULATE_FAIL; 2082 er = EMULATE_FAIL;
2112 goto done; 2083 goto done;
2113 } 2084 }
@@ -2142,18 +2113,18 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
2142 } 2113 }
2143 2114
2144 if (vcpu->arch.pending_load_cause & CAUSEF_BD) 2115 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2145 kvm_debug 2116 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2146 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", 2117 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2147 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, 2118 vcpu->mmio_needed);
2148 vcpu->mmio_needed);
2149 2119
2150done: 2120done:
2151 return er; 2121 return er;
2152} 2122}
2153 2123
2154static enum emulation_result 2124static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2155kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, 2125 uint32_t *opc,
2156 struct kvm_run *run, struct kvm_vcpu *vcpu) 2126 struct kvm_run *run,
2127 struct kvm_vcpu *vcpu)
2157{ 2128{
2158 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2129 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2159 struct mips_coproc *cop0 = vcpu->arch.cop0; 2130 struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -2181,16 +2152,17 @@ kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
2181 exccode, kvm_read_c0_guest_epc(cop0), 2152 exccode, kvm_read_c0_guest_epc(cop0),
2182 kvm_read_c0_guest_badvaddr(cop0)); 2153 kvm_read_c0_guest_badvaddr(cop0));
2183 } else { 2154 } else {
2184 printk("Trying to deliver EXC when EXL is already set\n"); 2155 kvm_err("Trying to deliver EXC when EXL is already set\n");
2185 er = EMULATE_FAIL; 2156 er = EMULATE_FAIL;
2186 } 2157 }
2187 2158
2188 return er; 2159 return er;
2189} 2160}
2190 2161
2191enum emulation_result 2162enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2192kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, 2163 uint32_t *opc,
2193 struct kvm_run *run, struct kvm_vcpu *vcpu) 2164 struct kvm_run *run,
2165 struct kvm_vcpu *vcpu)
2194{ 2166{
2195 enum emulation_result er = EMULATE_DONE; 2167 enum emulation_result er = EMULATE_DONE;
2196 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2168 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@@ -2215,10 +2187,13 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2215 break; 2187 break;
2216 2188
2217 case T_TLB_LD_MISS: 2189 case T_TLB_LD_MISS:
2218 /* We we are accessing Guest kernel space, then send an address error exception to the guest */ 2190 /*
2191 * We we are accessing Guest kernel space, then send an
2192 * address error exception to the guest
2193 */
2219 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2194 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2220 printk("%s: LD MISS @ %#lx\n", __func__, 2195 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2221 badvaddr); 2196 badvaddr);
2222 cause &= ~0xff; 2197 cause &= ~0xff;
2223 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); 2198 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2224 er = EMULATE_PRIV_FAIL; 2199 er = EMULATE_PRIV_FAIL;
@@ -2226,10 +2201,13 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2226 break; 2201 break;
2227 2202
2228 case T_TLB_ST_MISS: 2203 case T_TLB_ST_MISS:
2229 /* We we are accessing Guest kernel space, then send an address error exception to the guest */ 2204 /*
2205 * We we are accessing Guest kernel space, then send an
2206 * address error exception to the guest
2207 */
2230 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 2208 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2231 printk("%s: ST MISS @ %#lx\n", __func__, 2209 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2232 badvaddr); 2210 badvaddr);
2233 cause &= ~0xff; 2211 cause &= ~0xff;
2234 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); 2212 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2235 er = EMULATE_PRIV_FAIL; 2213 er = EMULATE_PRIV_FAIL;
@@ -2237,8 +2215,8 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2237 break; 2215 break;
2238 2216
2239 case T_ADDR_ERR_ST: 2217 case T_ADDR_ERR_ST:
2240 printk("%s: address error ST @ %#lx\n", __func__, 2218 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2241 badvaddr); 2219 badvaddr);
2242 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2220 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2243 cause &= ~0xff; 2221 cause &= ~0xff;
2244 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); 2222 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
@@ -2246,8 +2224,8 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2246 er = EMULATE_PRIV_FAIL; 2224 er = EMULATE_PRIV_FAIL;
2247 break; 2225 break;
2248 case T_ADDR_ERR_LD: 2226 case T_ADDR_ERR_LD:
2249 printk("%s: address error LD @ %#lx\n", __func__, 2227 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2250 badvaddr); 2228 badvaddr);
2251 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 2229 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2252 cause &= ~0xff; 2230 cause &= ~0xff;
2253 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); 2231 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
@@ -2260,21 +2238,23 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2260 } 2238 }
2261 } 2239 }
2262 2240
2263 if (er == EMULATE_PRIV_FAIL) { 2241 if (er == EMULATE_PRIV_FAIL)
2264 kvm_mips_emulate_exc(cause, opc, run, vcpu); 2242 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2265 } 2243
2266 return er; 2244 return er;
2267} 2245}
2268 2246
2269/* User Address (UA) fault, this could happen if 2247/*
2248 * User Address (UA) fault, this could happen if
2270 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 2249 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2271 * case we pass on the fault to the guest kernel and let it handle it. 2250 * case we pass on the fault to the guest kernel and let it handle it.
2272 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 2251 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2273 * case we inject the TLB from the Guest TLB into the shadow host TLB 2252 * case we inject the TLB from the Guest TLB into the shadow host TLB
2274 */ 2253 */
2275enum emulation_result 2254enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2276kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, 2255 uint32_t *opc,
2277 struct kvm_run *run, struct kvm_vcpu *vcpu) 2256 struct kvm_run *run,
2257 struct kvm_vcpu *vcpu)
2278{ 2258{
2279 enum emulation_result er = EMULATE_DONE; 2259 enum emulation_result er = EMULATE_DONE;
2280 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 2260 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
@@ -2284,10 +2264,11 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2284 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", 2264 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2285 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); 2265 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2286 2266
2287 /* KVM would not have got the exception if this entry was valid in the shadow host TLB 2267 /*
2288 * Check the Guest TLB, if the entry is not there then send the guest an 2268 * KVM would not have got the exception if this entry was valid in the
2289 * exception. The guest exc handler should then inject an entry into the 2269 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2290 * guest TLB 2270 * send the guest an exception. The guest exc handler should then inject
2271 * an entry into the guest TLB.
2291 */ 2272 */
2292 index = kvm_mips_guest_tlb_lookup(vcpu, 2273 index = kvm_mips_guest_tlb_lookup(vcpu,
2293 (va & VPN2_MASK) | 2274 (va & VPN2_MASK) |
@@ -2299,13 +2280,17 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2299 } else if (exccode == T_TLB_ST_MISS) { 2280 } else if (exccode == T_TLB_ST_MISS) {
2300 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); 2281 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2301 } else { 2282 } else {
2302 printk("%s: invalid exc code: %d\n", __func__, exccode); 2283 kvm_err("%s: invalid exc code: %d\n", __func__,
2284 exccode);
2303 er = EMULATE_FAIL; 2285 er = EMULATE_FAIL;
2304 } 2286 }
2305 } else { 2287 } else {
2306 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 2288 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2307 2289
2308 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ 2290 /*
2291 * Check if the entry is valid, if not then setup a TLB invalid
2292 * exception to the guest
2293 */
2309 if (!TLB_IS_VALID(*tlb, va)) { 2294 if (!TLB_IS_VALID(*tlb, va)) {
2310 if (exccode == T_TLB_LD_MISS) { 2295 if (exccode == T_TLB_LD_MISS) {
2311 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, 2296 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
@@ -2314,15 +2299,17 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2314 er = kvm_mips_emulate_tlbinv_st(cause, opc, run, 2299 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2315 vcpu); 2300 vcpu);
2316 } else { 2301 } else {
2317 printk("%s: invalid exc code: %d\n", __func__, 2302 kvm_err("%s: invalid exc code: %d\n", __func__,
2318 exccode); 2303 exccode);
2319 er = EMULATE_FAIL; 2304 er = EMULATE_FAIL;
2320 } 2305 }
2321 } else { 2306 } else {
2322 kvm_debug 2307 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2323 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", 2308 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2324 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); 2309 /*
2325 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ 2310 * OK we have a Guest TLB entry, now inject it into the
2311 * shadow host TLB
2312 */
2326 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, 2313 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2327 NULL); 2314 NULL);
2328 } 2315 }
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/interrupt.c
index 1e5de16afe29..9b4445940c2b 100644
--- a/arch/mips/kvm/kvm_mips_int.c
+++ b/arch/mips/kvm/interrupt.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Interrupt delivery 6 * KVM/MIPS: Interrupt delivery
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -20,7 +20,7 @@
20 20
21#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
22 22
23#include "kvm_mips_int.h" 23#include "interrupt.h"
24 24
25void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) 25void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
26{ 26{
@@ -34,7 +34,8 @@ void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
34 34
35void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) 35void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
36{ 36{
37 /* Cause bits to reflect the pending timer interrupt, 37 /*
38 * Cause bits to reflect the pending timer interrupt,
38 * the EXC code will be set when we are actually 39 * the EXC code will be set when we are actually
39 * delivering the interrupt: 40 * delivering the interrupt:
40 */ 41 */
@@ -51,12 +52,13 @@ void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
51 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); 52 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
52} 53}
53 54
54void 55void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
55kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) 56 struct kvm_mips_interrupt *irq)
56{ 57{
57 int intr = (int)irq->irq; 58 int intr = (int)irq->irq;
58 59
59 /* Cause bits to reflect the pending IO interrupt, 60 /*
61 * Cause bits to reflect the pending IO interrupt,
60 * the EXC code will be set when we are actually 62 * the EXC code will be set when we are actually
61 * delivering the interrupt: 63 * delivering the interrupt:
62 */ 64 */
@@ -83,11 +85,11 @@ kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
83 85
84} 86}
85 87
86void 88void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
87kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 89 struct kvm_mips_interrupt *irq)
88 struct kvm_mips_interrupt *irq)
89{ 90{
90 int intr = (int)irq->irq; 91 int intr = (int)irq->irq;
92
91 switch (intr) { 93 switch (intr) {
92 case -2: 94 case -2:
93 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); 95 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
@@ -111,9 +113,8 @@ kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
111} 113}
112 114
113/* Deliver the interrupt of the corresponding priority, if possible. */ 115/* Deliver the interrupt of the corresponding priority, if possible. */
114int 116int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
115kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 117 uint32_t cause)
116 uint32_t cause)
117{ 118{
118 int allowed = 0; 119 int allowed = 0;
119 uint32_t exccode; 120 uint32_t exccode;
@@ -164,7 +165,6 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
164 165
165 /* Are we allowed to deliver the interrupt ??? */ 166 /* Are we allowed to deliver the interrupt ??? */
166 if (allowed) { 167 if (allowed) {
167
168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
169 /* save old pc */ 169 /* save old pc */
170 kvm_write_c0_guest_epc(cop0, arch->pc); 170 kvm_write_c0_guest_epc(cop0, arch->pc);
@@ -195,9 +195,8 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
195 return allowed; 195 return allowed;
196} 196}
197 197
198int 198int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
199kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 199 uint32_t cause)
200 uint32_t cause)
201{ 200{
202 return 1; 201 return 1;
203} 202}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/interrupt.h
index 20da7d29eede..4ab4bdfad703 100644
--- a/arch/mips/kvm/kvm_mips_int.h
+++ b/arch/mips/kvm/interrupt.h
@@ -1,14 +1,15 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Interrupts 6 * KVM/MIPS: Interrupts
7* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8* Authors: Sanjay Lal <sanjayl@kymasys.com> 8 * Authors: Sanjay Lal <sanjayl@kymasys.com>
9*/ 9 */
10 10
11/* MIPS Exception Priorities, exceptions (including interrupts) are queued up 11/*
12 * MIPS Exception Priorities, exceptions (including interrupts) are queued up
12 * for the guest in the order specified by their priorities 13 * for the guest in the order specified by their priorities
13 */ 14 */
14 15
@@ -27,6 +28,9 @@
27#define MIPS_EXC_MAX 12 28#define MIPS_EXC_MAX 12
28/* XXXSL More to follow */ 29/* XXXSL More to follow */
29 30
31extern char mips32_exception[], mips32_exceptionEnd[];
32extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
33
30#define C_TI (_ULCAST_(1) << 30) 34#define C_TI (_ULCAST_(1) << 30)
31 35
32#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) 36#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
deleted file mode 100644
index a4a8c85cc8f7..000000000000
--- a/arch/mips/kvm/kvm_mips_comm.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: commpage: mapped into get kernel space
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#ifndef __KVM_MIPS_COMMPAGE_H__
13#define __KVM_MIPS_COMMPAGE_H__
14
15struct kvm_mips_commpage {
16 struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */
17};
18
19#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
20
21extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
22
23#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
deleted file mode 100644
index 3873b1ecc40f..000000000000
--- a/arch/mips/kvm/kvm_mips_commpage.c
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* commpage, currently used for Virtual COP0 registers.
7* Mapped into the guest kernel @ 0x0.
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <asm/page.h>
20#include <asm/cacheflush.h>
21#include <asm/mmu_context.h>
22
23#include <linux/kvm_host.h>
24
25#include "kvm_mips_comm.h"
26
27void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
28{
29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
30 memset(page, 0, sizeof(struct kvm_mips_commpage));
31
32 /* Specific init values for fields */
33 vcpu->arch.cop0 = &page->cop0;
34 memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
35
36 return;
37}
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
deleted file mode 100644
index 86d3b4cc348b..000000000000
--- a/arch/mips/kvm/kvm_mips_opcode.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10/*
11 * Define opcode values not defined in <asm/isnt.h>
12 */
13
14#ifndef __KVM_MIPS_OPCODE_H__
15#define __KVM_MIPS_OPCODE_H__
16
17/* COP0 Ops */
18#define mfmcz_op 0x0b /* 01011 */
19#define wrpgpr_op 0x0e /* 01110 */
20
21/* COP0 opcodes (only if COP0 and CO=1): */
22#define wait_op 0x20 /* 100000 */
23
24#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/locore.S
index 033ac343e72c..d7279c03c517 100644
--- a/arch/mips/kvm/kvm_locore.S
+++ b/arch/mips/kvm/locore.S
@@ -16,7 +16,6 @@
16#include <asm/stackframe.h> 16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18 18
19
20#define _C_LABEL(x) x 19#define _C_LABEL(x) x
21#define MIPSX(name) mips32_ ## name 20#define MIPSX(name) mips32_ ## name
22#define CALLFRAME_SIZ 32 21#define CALLFRAME_SIZ 32
@@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run)
91 LONG_S $24, PT_R24(k1) 90 LONG_S $24, PT_R24(k1)
92 LONG_S $25, PT_R25(k1) 91 LONG_S $25, PT_R25(k1)
93 92
94 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ 93 /*
94 * XXXKYMA k0/k1 not saved, not being used if we got here through
95 * an ioctl()
96 */
95 97
96 LONG_S $28, PT_R28(k1) 98 LONG_S $28, PT_R28(k1)
97 LONG_S $29, PT_R29(k1) 99 LONG_S $29, PT_R29(k1)
@@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run)
132 /* Save the kernel gp as well */ 134 /* Save the kernel gp as well */
133 LONG_S gp, VCPU_HOST_GP(k1) 135 LONG_S gp, VCPU_HOST_GP(k1)
134 136
135 /* Setup status register for running the guest in UM, interrupts are disabled */ 137 /*
138 * Setup status register for running the guest in UM, interrupts
139 * are disabled
140 */
136 li k0, (ST0_EXL | KSU_USER | ST0_BEV) 141 li k0, (ST0_EXL | KSU_USER | ST0_BEV)
137 mtc0 k0, CP0_STATUS 142 mtc0 k0, CP0_STATUS
138 ehb 143 ehb
@@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run)
152 mtc0 k0, CP0_STATUS 157 mtc0 k0, CP0_STATUS
153 ehb 158 ehb
154 159
155
156 /* Set Guest EPC */ 160 /* Set Guest EPC */
157 LONG_L t0, VCPU_PC(k1) 161 LONG_L t0, VCPU_PC(k1)
158 mtc0 t0, CP0_EPC 162 mtc0 t0, CP0_EPC
@@ -165,7 +169,7 @@ FEXPORT(__kvm_mips_load_asid)
165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 169 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 170 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1671: 1711:
168 /* t1: contains the base of the ASID array, need to get the cpu id */ 172 /* t1: contains the base of the ASID array, need to get the cpu id */
169 LONG_L t2, TI_CPU($28) /* smp_processor_id */ 173 LONG_L t2, TI_CPU($28) /* smp_processor_id */
170 INT_SLL t2, t2, 2 /* x4 */ 174 INT_SLL t2, t2, 2 /* x4 */
171 REG_ADDU t3, t1, t2 175 REG_ADDU t3, t1, t2
@@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1)
229 eret 233 eret
230 234
231VECTOR(MIPSX(exception), unknown) 235VECTOR(MIPSX(exception), unknown)
232/* 236/* Find out what mode we came from and jump to the proper handler. */
233 * Find out what mode we came from and jump to the proper handler.
234 */
235 mtc0 k0, CP0_ERROREPC #01: Save guest k0 237 mtc0 k0, CP0_ERROREPC #01: Save guest k0
236 ehb #02: 238 ehb #02:
237 239
@@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown)
239 INT_SRL k0, k0, 10 #03: Get rid of CPUNum 241 INT_SRL k0, k0, 10 #03: Get rid of CPUNum
240 INT_SLL k0, k0, 10 #04 242 INT_SLL k0, k0, 10 #04
241 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 243 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
242 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 244 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
245 # installed @ offset 0x2000
243 j k0 #07: jump to the function 246 j k0 #07: jump to the function
244 nop #08: branch delay slot 247 nop #08: branch delay slot
245VECTOR_END(MIPSX(exceptionEnd)) 248VECTOR_END(MIPSX(exceptionEnd))
@@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd))
248/* 251/*
249 * Generic Guest exception handler. We end up here when the guest 252 * Generic Guest exception handler. We end up here when the guest
250 * does something that causes a trap to kernel mode. 253 * does something that causes a trap to kernel mode.
251 *
252 */ 254 */
253NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) 255NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
254 /* Get the VCPU pointer from DDTATA_LO */ 256 /* Get the VCPU pointer from DDTATA_LO */
@@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
290 LONG_S $30, VCPU_R30(k1) 292 LONG_S $30, VCPU_R30(k1)
291 LONG_S $31, VCPU_R31(k1) 293 LONG_S $31, VCPU_R31(k1)
292 294
293 /* We need to save hi/lo and restore them on 295 /* We need to save hi/lo and restore them on the way out */
294 * the way out
295 */
296 mfhi t0 296 mfhi t0
297 LONG_S t0, VCPU_HI(k1) 297 LONG_S t0, VCPU_HI(k1)
298 298
@@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
321 /* Save pointer to run in s0, will be saved by the compiler */ 321 /* Save pointer to run in s0, will be saved by the compiler */
322 move s0, a0 322 move s0, a0
323 323
324 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to 324 /*
325 * process the exception */ 325 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
326 * process the exception
327 */
326 mfc0 k0,CP0_EPC 328 mfc0 k0,CP0_EPC
327 LONG_S k0, VCPU_PC(k1) 329 LONG_S k0, VCPU_PC(k1)
328 330
@@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
351 LONG_L k0, VCPU_HOST_EBASE(k1) 353 LONG_L k0, VCPU_HOST_EBASE(k1)
352 mtc0 k0,CP0_EBASE 354 mtc0 k0,CP0_EBASE
353 355
354
355 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 356 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
356 .set at 357 .set at
357 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) 358 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
@@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
369 /* Saved host state */ 370 /* Saved host state */
370 INT_ADDIU sp, sp, -PT_SIZE 371 INT_ADDIU sp, sp, -PT_SIZE
371 372
372 /* XXXKYMA do we need to load the host ASID, maybe not because the 373 /*
374 * XXXKYMA do we need to load the host ASID, maybe not because the
373 * kernel entries are marked GLOBAL, need to verify 375 * kernel entries are marked GLOBAL, need to verify
374 */ 376 */
375 377
@@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
383 385
384 /* Jump to handler */ 386 /* Jump to handler */
385FEXPORT(__kvm_mips_jump_to_handler) 387FEXPORT(__kvm_mips_jump_to_handler)
386 /* XXXKYMA: not sure if this is safe, how large is the stack?? 388 /*
389 * XXXKYMA: not sure if this is safe, how large is the stack??
387 * Now jump to the kvm_mips_handle_exit() to see if we can deal 390 * Now jump to the kvm_mips_handle_exit() to see if we can deal
388 * with this in the kernel */ 391 * with this in the kernel
392 */
389 PTR_LA t9, kvm_mips_handle_exit 393 PTR_LA t9, kvm_mips_handle_exit
390 jalr.hb t9 394 jalr.hb t9
391 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ 395 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
@@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
394 di 398 di
395 ehb 399 ehb
396 400
397 /* XXXKYMA: k0/k1 could have been blown away if we processed 401 /*
402 * XXXKYMA: k0/k1 could have been blown away if we processed
398 * an exception while we were handling the exception from the 403 * an exception while we were handling the exception from the
399 * guest, reload k1 404 * guest, reload k1
400 */ 405 */
@@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler)
402 move k1, s1 407 move k1, s1
403 INT_ADDIU k1, k1, VCPU_HOST_ARCH 408 INT_ADDIU k1, k1, VCPU_HOST_ARCH
404 409
405 /* Check return value, should tell us if we are returning to the 410 /*
411 * Check return value, should tell us if we are returning to the
406 * host (handle I/O etc)or resuming the guest 412 * host (handle I/O etc)or resuming the guest
407 */ 413 */
408 andi t0, v0, RESUME_HOST 414 andi t0, v0, RESUME_HOST
@@ -521,8 +527,10 @@ __kvm_mips_return_to_host:
521 LONG_L $0, PT_R0(k1) 527 LONG_L $0, PT_R0(k1)
522 LONG_L $1, PT_R1(k1) 528 LONG_L $1, PT_R1(k1)
523 529
524 /* r2/v0 is the return code, shift it down by 2 (arithmetic) 530 /*
525 * to recover the err code */ 531 * r2/v0 is the return code, shift it down by 2 (arithmetic)
532 * to recover the err code
533 */
526 INT_SRA k0, v0, 2 534 INT_SRA k0, v0, 2
527 move $2, k0 535 move $2, k0
528 536
@@ -566,7 +574,6 @@ __kvm_mips_return_to_host:
566 PTR_LI k0, 0x2000000F 574 PTR_LI k0, 0x2000000F
567 mtc0 k0, CP0_HWRENA 575 mtc0 k0, CP0_HWRENA
568 576
569
570 /* Restore RA, which is the address we will return to */ 577 /* Restore RA, which is the address we will return to */
571 LONG_L ra, PT_R31(k1) 578 LONG_L ra, PT_R31(k1)
572 j ra 579 j ra
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/mips.c
index f3c56a182fd8..4fda672cb58e 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/mips.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -21,8 +21,8 @@
21 21
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23 23
24#include "kvm_mips_int.h" 24#include "interrupt.h"
25#include "kvm_mips_comm.h" 25#include "commpage.h"
26 26
27#define CREATE_TRACE_POINTS 27#define CREATE_TRACE_POINTS
28#include "trace.h" 28#include "trace.h"
@@ -31,38 +31,41 @@
31#define VECTORSPACING 0x100 /* for EI/VI mode */ 31#define VECTORSPACING 0x100 /* for EI/VI mode */
32#endif 32#endif
33 33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
35struct kvm_stats_debugfs_item debugfs_entries[] = { 35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) }, 36 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
37 { "cache", VCPU_STAT(cache_exits) }, 37 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
38 { "signal", VCPU_STAT(signal_exits) }, 38 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
39 { "interrupt", VCPU_STAT(int_exits) }, 39 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, 40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) }, 41 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, 42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, 43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, 44 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, 45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
46 { "syscall", VCPU_STAT(syscall_exits) }, 46 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, 47 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
48 { "break_inst", VCPU_STAT(break_inst_exits) }, 48 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, 49 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 50 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
51 {NULL} 51 {NULL}
52}; 52};
53 53
54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) 54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55{ 55{
56 int i; 56 int i;
57
57 for_each_possible_cpu(i) { 58 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0; 59 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0; 60 vcpu->arch.guest_user_asid[i] = 0;
60 } 61 }
62
61 return 0; 63 return 0;
62} 64}
63 65
64/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we 66/*
65 * are "runnable" if interrupts are pending 67 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
68 * Config7, so we are "runnable" if interrupts are pending
66 */ 69 */
67int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 70int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
68{ 71{
@@ -94,16 +97,17 @@ void kvm_arch_hardware_unsetup(void)
94 97
95void kvm_arch_check_processor_compat(void *rtn) 98void kvm_arch_check_processor_compat(void *rtn)
96{ 99{
97 int *r = (int *)rtn; 100 *(int *)rtn = 0;
98 *r = 0;
99 return;
100} 101}
101 102
102static void kvm_mips_init_tlbs(struct kvm *kvm) 103static void kvm_mips_init_tlbs(struct kvm *kvm)
103{ 104{
104 unsigned long wired; 105 unsigned long wired;
105 106
106 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ 107 /*
108 * Add a wired entry to the TLB, it is used to map the commpage to
109 * the Guest kernel
110 */
107 wired = read_c0_wired(); 111 wired = read_c0_wired();
108 write_c0_wired(wired + 1); 112 write_c0_wired(wired + 1);
109 mtc0_tlbw_hazard(); 113 mtc0_tlbw_hazard();
@@ -130,7 +134,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
130 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); 134 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
131 } 135 }
132 136
133
134 return 0; 137 return 0;
135} 138}
136 139
@@ -185,8 +188,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
185 } 188 }
186} 189}
187 190
188long 191long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
189kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 192 unsigned long arg)
190{ 193{
191 return -ENOIOCTLCMD; 194 return -ENOIOCTLCMD;
192} 195}
@@ -207,20 +210,20 @@ void kvm_arch_memslots_updated(struct kvm *kvm)
207} 210}
208 211
209int kvm_arch_prepare_memory_region(struct kvm *kvm, 212int kvm_arch_prepare_memory_region(struct kvm *kvm,
210 struct kvm_memory_slot *memslot, 213 struct kvm_memory_slot *memslot,
211 struct kvm_userspace_memory_region *mem, 214 struct kvm_userspace_memory_region *mem,
212 enum kvm_mr_change change) 215 enum kvm_mr_change change)
213{ 216{
214 return 0; 217 return 0;
215} 218}
216 219
217void kvm_arch_commit_memory_region(struct kvm *kvm, 220void kvm_arch_commit_memory_region(struct kvm *kvm,
218 struct kvm_userspace_memory_region *mem, 221 struct kvm_userspace_memory_region *mem,
219 const struct kvm_memory_slot *old, 222 const struct kvm_memory_slot *old,
220 enum kvm_mr_change change) 223 enum kvm_mr_change change)
221{ 224{
222 unsigned long npages = 0; 225 unsigned long npages = 0;
223 int i, err = 0; 226 int i;
224 227
225 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", 228 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
226 __func__, kvm, mem->slot, mem->guest_phys_addr, 229 __func__, kvm, mem->slot, mem->guest_phys_addr,
@@ -238,21 +241,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
238 241
239 if (!kvm->arch.guest_pmap) { 242 if (!kvm->arch.guest_pmap) {
240 kvm_err("Failed to allocate guest PMAP"); 243 kvm_err("Failed to allocate guest PMAP");
241 err = -ENOMEM; 244 return;
242 goto out;
243 } 245 }
244 246
245 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", 247 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
246 npages, kvm->arch.guest_pmap); 248 npages, kvm->arch.guest_pmap);
247 249
248 /* Now setup the page table */ 250 /* Now setup the page table */
249 for (i = 0; i < npages; i++) { 251 for (i = 0; i < npages; i++)
250 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; 252 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
251 }
252 } 253 }
253 } 254 }
254out:
255 return;
256} 255}
257 256
258void kvm_arch_flush_shadow_all(struct kvm *kvm) 257void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -270,8 +269,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
270 269
271struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 270struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
272{ 271{
273 extern char mips32_exception[], mips32_exceptionEnd[];
274 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
275 int err, size, offset; 272 int err, size, offset;
276 void *gebase; 273 void *gebase;
277 int i; 274 int i;
@@ -290,14 +287,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
290 287
291 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); 288 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
292 289
293 /* Allocate space for host mode exception handlers that handle 290 /*
291 * Allocate space for host mode exception handlers that handle
294 * guest mode exits 292 * guest mode exits
295 */ 293 */
296 if (cpu_has_veic || cpu_has_vint) { 294 if (cpu_has_veic || cpu_has_vint)
297 size = 0x200 + VECTORSPACING * 64; 295 size = 0x200 + VECTORSPACING * 64;
298 } else { 296 else
299 size = 0x4000; 297 size = 0x4000;
300 }
301 298
302 /* Save Linux EBASE */ 299 /* Save Linux EBASE */
303 vcpu->arch.host_ebase = (void *)read_c0_ebase(); 300 vcpu->arch.host_ebase = (void *)read_c0_ebase();
@@ -345,7 +342,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
345 local_flush_icache_range((unsigned long)gebase, 342 local_flush_icache_range((unsigned long)gebase,
346 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 343 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
347 344
348 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ 345 /*
346 * Allocate comm page for guest kernel, a TLB will be reserved for
347 * mapping GVA @ 0xFFFF8000 to this page
348 */
349 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); 349 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
350 350
351 if (!vcpu->arch.kseg0_commpage) { 351 if (!vcpu->arch.kseg0_commpage) {
@@ -392,9 +392,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
392 kvm_arch_vcpu_free(vcpu); 392 kvm_arch_vcpu_free(vcpu);
393} 393}
394 394
395int 395int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
396kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 396 struct kvm_guest_debug *dbg)
397 struct kvm_guest_debug *dbg)
398{ 397{
399 return -ENOIOCTLCMD; 398 return -ENOIOCTLCMD;
400} 399}
@@ -431,8 +430,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
431 return r; 430 return r;
432} 431}
433 432
434int 433int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
435kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) 434 struct kvm_mips_interrupt *irq)
436{ 435{
437 int intr = (int)irq->irq; 436 int intr = (int)irq->irq;
438 struct kvm_vcpu *dvcpu = NULL; 437 struct kvm_vcpu *dvcpu = NULL;
@@ -459,23 +458,20 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
459 458
460 dvcpu->arch.wait = 0; 459 dvcpu->arch.wait = 0;
461 460
462 if (waitqueue_active(&dvcpu->wq)) { 461 if (waitqueue_active(&dvcpu->wq))
463 wake_up_interruptible(&dvcpu->wq); 462 wake_up_interruptible(&dvcpu->wq);
464 }
465 463
466 return 0; 464 return 0;
467} 465}
468 466
469int 467int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
470kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 468 struct kvm_mp_state *mp_state)
471 struct kvm_mp_state *mp_state)
472{ 469{
473 return -ENOIOCTLCMD; 470 return -ENOIOCTLCMD;
474} 471}
475 472
476int 473int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
477kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 474 struct kvm_mp_state *mp_state)
478 struct kvm_mp_state *mp_state)
479{ 475{
480 return -ENOIOCTLCMD; 476 return -ENOIOCTLCMD;
481} 477}
@@ -632,10 +628,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
632 } 628 }
633 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 629 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
634 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 630 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
631
635 return put_user(v, uaddr64); 632 return put_user(v, uaddr64);
636 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 633 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
637 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 634 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
638 u32 v32 = (u32)v; 635 u32 v32 = (u32)v;
636
639 return put_user(v32, uaddr32); 637 return put_user(v32, uaddr32);
640 } else { 638 } else {
641 return -EINVAL; 639 return -EINVAL;
@@ -728,8 +726,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
728 return 0; 726 return 0;
729} 727}
730 728
731long 729long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
732kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 730 unsigned long arg)
733{ 731{
734 struct kvm_vcpu *vcpu = filp->private_data; 732 struct kvm_vcpu *vcpu = filp->private_data;
735 void __user *argp = (void __user *)arg; 733 void __user *argp = (void __user *)arg;
@@ -739,6 +737,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
739 case KVM_SET_ONE_REG: 737 case KVM_SET_ONE_REG:
740 case KVM_GET_ONE_REG: { 738 case KVM_GET_ONE_REG: {
741 struct kvm_one_reg reg; 739 struct kvm_one_reg reg;
740
742 if (copy_from_user(&reg, argp, sizeof(reg))) 741 if (copy_from_user(&reg, argp, sizeof(reg)))
743 return -EFAULT; 742 return -EFAULT;
744 if (ioctl == KVM_SET_ONE_REG) 743 if (ioctl == KVM_SET_ONE_REG)
@@ -773,6 +772,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
773 case KVM_INTERRUPT: 772 case KVM_INTERRUPT:
774 { 773 {
775 struct kvm_mips_interrupt irq; 774 struct kvm_mips_interrupt irq;
775
776 r = -EFAULT; 776 r = -EFAULT;
777 if (copy_from_user(&irq, argp, sizeof(irq))) 777 if (copy_from_user(&irq, argp, sizeof(irq)))
778 goto out; 778 goto out;
@@ -791,9 +791,7 @@ out:
791 return r; 791 return r;
792} 792}
793 793
794/* 794/* Get (and clear) the dirty memory log for a memory slot. */
795 * Get (and clear) the dirty memory log for a memory slot.
796 */
797int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 795int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
798{ 796{
799 struct kvm_memory_slot *memslot; 797 struct kvm_memory_slot *memslot;
@@ -815,8 +813,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
815 ga = memslot->base_gfn << PAGE_SHIFT; 813 ga = memslot->base_gfn << PAGE_SHIFT;
816 ga_end = ga + (memslot->npages << PAGE_SHIFT); 814 ga_end = ga + (memslot->npages << PAGE_SHIFT);
817 815
818 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, 816 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
819 ga_end); 817 ga_end);
820 818
821 n = kvm_dirty_bitmap_bytes(memslot); 819 n = kvm_dirty_bitmap_bytes(memslot);
822 memset(memslot->dirty_bitmap, 0, n); 820 memset(memslot->dirty_bitmap, 0, n);
@@ -843,16 +841,12 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
843 841
844int kvm_arch_init(void *opaque) 842int kvm_arch_init(void *opaque)
845{ 843{
846 int ret;
847
848 if (kvm_mips_callbacks) { 844 if (kvm_mips_callbacks) {
849 kvm_err("kvm: module already exists\n"); 845 kvm_err("kvm: module already exists\n");
850 return -EEXIST; 846 return -EEXIST;
851 } 847 }
852 848
853 ret = kvm_mips_emulation_init(&kvm_mips_callbacks); 849 return kvm_mips_emulation_init(&kvm_mips_callbacks);
854
855 return ret;
856} 850}
857 851
858void kvm_arch_exit(void) 852void kvm_arch_exit(void)
@@ -860,14 +854,14 @@ void kvm_arch_exit(void)
860 kvm_mips_callbacks = NULL; 854 kvm_mips_callbacks = NULL;
861} 855}
862 856
863int 857int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
864kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 858 struct kvm_sregs *sregs)
865{ 859{
866 return -ENOIOCTLCMD; 860 return -ENOIOCTLCMD;
867} 861}
868 862
869int 863int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
870kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 864 struct kvm_sregs *sregs)
871{ 865{
872 return -ENOIOCTLCMD; 866 return -ENOIOCTLCMD;
873} 867}
@@ -923,24 +917,25 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
923 if (!vcpu) 917 if (!vcpu)
924 return -1; 918 return -1;
925 919
926 printk("VCPU Register Dump:\n"); 920 kvm_debug("VCPU Register Dump:\n");
927 printk("\tpc = 0x%08lx\n", vcpu->arch.pc); 921 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
928 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); 922 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
929 923
930 for (i = 0; i < 32; i += 4) { 924 for (i = 0; i < 32; i += 4) {
931 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, 925 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
932 vcpu->arch.gprs[i], 926 vcpu->arch.gprs[i],
933 vcpu->arch.gprs[i + 1], 927 vcpu->arch.gprs[i + 1],
934 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); 928 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
935 } 929 }
936 printk("\thi: 0x%08lx\n", vcpu->arch.hi); 930 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
937 printk("\tlo: 0x%08lx\n", vcpu->arch.lo); 931 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
938 932
939 cop0 = vcpu->arch.cop0; 933 cop0 = vcpu->arch.cop0;
940 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", 934 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
941 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); 935 kvm_read_c0_guest_status(cop0),
936 kvm_read_c0_guest_cause(cop0));
942 937
943 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); 938 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
944 939
945 return 0; 940 return 0;
946} 941}
@@ -980,14 +975,11 @@ static void kvm_mips_comparecount_func(unsigned long data)
980 kvm_mips_callbacks->queue_timer_int(vcpu); 975 kvm_mips_callbacks->queue_timer_int(vcpu);
981 976
982 vcpu->arch.wait = 0; 977 vcpu->arch.wait = 0;
983 if (waitqueue_active(&vcpu->wq)) { 978 if (waitqueue_active(&vcpu->wq))
984 wake_up_interruptible(&vcpu->wq); 979 wake_up_interruptible(&vcpu->wq);
985 }
986} 980}
987 981
988/* 982/* low level hrtimer wake routine */
989 * low level hrtimer wake routine.
990 */
991static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) 983static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
992{ 984{
993 struct kvm_vcpu *vcpu; 985 struct kvm_vcpu *vcpu;
@@ -1008,11 +1000,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1008 1000
1009void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 1001void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1010{ 1002{
1011 return;
1012} 1003}
1013 1004
1014int 1005int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1015kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) 1006 struct kvm_translation *tr)
1016{ 1007{
1017 return 0; 1008 return 0;
1018} 1009}
@@ -1023,8 +1014,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1023 return kvm_mips_callbacks->vcpu_setup(vcpu); 1014 return kvm_mips_callbacks->vcpu_setup(vcpu);
1024} 1015}
1025 1016
1026static 1017static void kvm_mips_set_c0_status(void)
1027void kvm_mips_set_c0_status(void)
1028{ 1018{
1029 uint32_t status = read_c0_status(); 1019 uint32_t status = read_c0_status();
1030 1020
@@ -1054,7 +1044,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1054 run->exit_reason = KVM_EXIT_UNKNOWN; 1044 run->exit_reason = KVM_EXIT_UNKNOWN;
1055 run->ready_for_interrupt_injection = 1; 1045 run->ready_for_interrupt_injection = 1;
1056 1046
1057 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ 1047 /*
1048 * Set the appropriate status bits based on host CPU features,
1049 * before we hit the scheduler
1050 */
1058 kvm_mips_set_c0_status(); 1051 kvm_mips_set_c0_status();
1059 1052
1060 local_irq_enable(); 1053 local_irq_enable();
@@ -1062,7 +1055,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1062 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", 1055 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1063 cause, opc, run, vcpu); 1056 cause, opc, run, vcpu);
1064 1057
1065 /* Do a privilege check, if in UM most of these exit conditions end up 1058 /*
1059 * Do a privilege check, if in UM most of these exit conditions end up
1066 * causing an exception to be delivered to the Guest Kernel 1060 * causing an exception to be delivered to the Guest Kernel
1067 */ 1061 */
1068 er = kvm_mips_check_privilege(cause, opc, run, vcpu); 1062 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
@@ -1081,9 +1075,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1081 ++vcpu->stat.int_exits; 1075 ++vcpu->stat.int_exits;
1082 trace_kvm_exit(vcpu, INT_EXITS); 1076 trace_kvm_exit(vcpu, INT_EXITS);
1083 1077
1084 if (need_resched()) { 1078 if (need_resched())
1085 cond_resched(); 1079 cond_resched();
1086 }
1087 1080
1088 ret = RESUME_GUEST; 1081 ret = RESUME_GUEST;
1089 break; 1082 break;
@@ -1095,9 +1088,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1095 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); 1088 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1096 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); 1089 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1097 /* XXXKYMA: Might need to return to user space */ 1090 /* XXXKYMA: Might need to return to user space */
1098 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { 1091 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1099 ret = RESUME_HOST; 1092 ret = RESUME_HOST;
1100 }
1101 break; 1093 break;
1102 1094
1103 case T_TLB_MOD: 1095 case T_TLB_MOD:
@@ -1107,10 +1099,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1107 break; 1099 break;
1108 1100
1109 case T_TLB_ST_MISS: 1101 case T_TLB_ST_MISS:
1110 kvm_debug 1102 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1111 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", 1103 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1112 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, 1104 badvaddr);
1113 badvaddr);
1114 1105
1115 ++vcpu->stat.tlbmiss_st_exits; 1106 ++vcpu->stat.tlbmiss_st_exits;
1116 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); 1107 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
@@ -1157,10 +1148,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1157 break; 1148 break;
1158 1149
1159 default: 1150 default:
1160 kvm_err 1151 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1161 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", 1152 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1162 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, 1153 kvm_read_c0_guest_status(vcpu->arch.cop0));
1163 kvm_read_c0_guest_status(vcpu->arch.cop0));
1164 kvm_arch_vcpu_dump_regs(vcpu); 1154 kvm_arch_vcpu_dump_regs(vcpu);
1165 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1155 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1166 ret = RESUME_HOST; 1156 ret = RESUME_HOST;
@@ -1175,7 +1165,7 @@ skip_emul:
1175 kvm_mips_deliver_interrupts(vcpu, cause); 1165 kvm_mips_deliver_interrupts(vcpu, cause);
1176 1166
1177 if (!(ret & RESUME_HOST)) { 1167 if (!(ret & RESUME_HOST)) {
1178 /* Only check for signals if not already exiting to userspace */ 1168 /* Only check for signals if not already exiting to userspace */
1179 if (signal_pending(current)) { 1169 if (signal_pending(current)) {
1180 run->exit_reason = KVM_EXIT_INTR; 1170 run->exit_reason = KVM_EXIT_INTR;
1181 ret = (-EINTR << 2) | RESUME_HOST; 1171 ret = (-EINTR << 2) | RESUME_HOST;
@@ -1196,11 +1186,13 @@ int __init kvm_mips_init(void)
1196 if (ret) 1186 if (ret)
1197 return ret; 1187 return ret;
1198 1188
1199 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. 1189 /*
1200 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) 1190 * On MIPS, kernel modules are executed from "mapped space", which
1201 * to avoid the possibility of double faulting. The issue is that the TLB code 1191 * requires TLBs. The TLB handling code is statically linked with
1202 * references routines that are part of the the KVM module, 1192 * the rest of the kernel (tlb.c) to avoid the possibility of
1203 * which are only available once the module is loaded. 1193 * double faulting. The issue is that the TLB code references
1194 * routines that are part of the the KVM module, which are only
1195 * available once the module is loaded.
1204 */ 1196 */
1205 kvm_mips_gfn_to_pfn = gfn_to_pfn; 1197 kvm_mips_gfn_to_pfn = gfn_to_pfn;
1206 kvm_mips_release_pfn_clean = kvm_release_pfn_clean; 1198 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h
new file mode 100644
index 000000000000..03a6ae84c7df
--- /dev/null
+++ b/arch/mips/kvm/opcode.h
@@ -0,0 +1,22 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 * Authors: Sanjay Lal <sanjayl@kymasys.com>
8 */
9
10/* Define opcode values not defined in <asm/isnt.h> */
11
12#ifndef __KVM_MIPS_OPCODE_H__
13#define __KVM_MIPS_OPCODE_H__
14
15/* COP0 Ops */
16#define mfmcz_op 0x0b /* 01011 */
17#define wrpgpr_op 0x0e /* 01110 */
18
19/* COP0 opcodes (only if COP0 and CO=1): */
20#define wait_op 0x20 /* 100000 */
21
22#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/stats.c
index 075904bcac1b..a74d6024c5ad 100644
--- a/arch/mips/kvm/kvm_mips_stats.c
+++ b/arch/mips/kvm/stats.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: COP0 access histogram 6 * KVM/MIPS: COP0 access histogram
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/kvm_host.h> 12#include <linux/kvm_host.h>
13 13
@@ -63,20 +63,18 @@ char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
63 "DESAVE" 63 "DESAVE"
64}; 64};
65 65
66int kvm_mips_dump_stats(struct kvm_vcpu *vcpu) 66void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
67{ 67{
68#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 68#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
69 int i, j; 69 int i, j;
70 70
71 printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); 71 kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
72 for (i = 0; i < N_MIPS_COPROC_REGS; i++) { 72 for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
73 for (j = 0; j < N_MIPS_COPROC_SEL; j++) { 73 for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
74 if (vcpu->arch.cop0->stat[i][j]) 74 if (vcpu->arch.cop0->stat[i][j])
75 printk("%s[%d]: %lu\n", kvm_cop0_str[i], j, 75 kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
76 vcpu->arch.cop0->stat[i][j]); 76 vcpu->arch.cop0->stat[i][j]);
77 } 77 }
78 } 78 }
79#endif 79#endif
80
81 return 0;
82} 80}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/tlb.c
index 8a5a700ad8de..bbcd82242059 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -1,14 +1,14 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7* TLB handlers run from KSEG0 7 * TLB handlers run from KSEG0
8* 8 *
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com> 10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/ 11 */
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
@@ -18,7 +18,6 @@
18#include <linux/kvm_host.h> 18#include <linux/kvm_host.h>
19#include <linux/srcu.h> 19#include <linux/srcu.h>
20 20
21
22#include <asm/cpu.h> 21#include <asm/cpu.h>
23#include <asm/bootinfo.h> 22#include <asm/bootinfo.h>
24#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
@@ -39,13 +38,13 @@ atomic_t kvm_mips_instance;
39EXPORT_SYMBOL(kvm_mips_instance); 38EXPORT_SYMBOL(kvm_mips_instance);
40 39
41/* These function pointers are initialized once the KVM module is loaded */ 40/* These function pointers are initialized once the KVM module is loaded */
42pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); 41pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
43EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); 42EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
44 43
45void (*kvm_mips_release_pfn_clean) (pfn_t pfn); 44void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
46EXPORT_SYMBOL(kvm_mips_release_pfn_clean); 45EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
47 46
48bool(*kvm_mips_is_error_pfn) (pfn_t pfn); 47bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
49EXPORT_SYMBOL(kvm_mips_is_error_pfn); 48EXPORT_SYMBOL(kvm_mips_is_error_pfn);
50 49
51uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
@@ -53,21 +52,17 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; 52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
54} 53}
55 54
56
57uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 55uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
58{ 56{
59 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; 57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
60} 58}
61 59
62inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) 60inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
63{ 61{
64 return vcpu->kvm->arch.commpage_tlb; 62 return vcpu->kvm->arch.commpage_tlb;
65} 63}
66 64
67 65/* Structure defining an tlb entry data set. */
68/*
69 * Structure defining an tlb entry data set.
70 */
71 66
72void kvm_mips_dump_host_tlbs(void) 67void kvm_mips_dump_host_tlbs(void)
73{ 68{
@@ -82,8 +77,8 @@ void kvm_mips_dump_host_tlbs(void)
82 old_entryhi = read_c0_entryhi(); 77 old_entryhi = read_c0_entryhi();
83 old_pagemask = read_c0_pagemask(); 78 old_pagemask = read_c0_pagemask();
84 79
85 printk("HOST TLBs:\n"); 80 kvm_info("HOST TLBs:\n");
86 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); 81 kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
87 82
88 for (i = 0; i < current_cpu_data.tlbsize; i++) { 83 for (i = 0; i < current_cpu_data.tlbsize; i++) {
89 write_c0_index(i); 84 write_c0_index(i);
@@ -97,25 +92,26 @@ void kvm_mips_dump_host_tlbs(void)
97 tlb.tlb_lo1 = read_c0_entrylo1(); 92 tlb.tlb_lo1 = read_c0_entrylo1();
98 tlb.tlb_mask = read_c0_pagemask(); 93 tlb.tlb_mask = read_c0_pagemask();
99 94
100 printk("TLB%c%3d Hi 0x%08lx ", 95 kvm_info("TLB%c%3d Hi 0x%08lx ",
101 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', 96 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
102 i, tlb.tlb_hi); 97 i, tlb.tlb_hi);
103 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", 98 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), 99 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
105 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', 100 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', 101 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo0 >> 3) & 7); 102 (tlb.tlb_lo0 >> 3) & 7);
108 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", 103 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
109 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), 104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
110 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', 105 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
111 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', 106 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
112 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 107 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
113 } 108 }
114 write_c0_entryhi(old_entryhi); 109 write_c0_entryhi(old_entryhi);
115 write_c0_pagemask(old_pagemask); 110 write_c0_pagemask(old_pagemask);
116 mtc0_tlbw_hazard(); 111 mtc0_tlbw_hazard();
117 local_irq_restore(flags); 112 local_irq_restore(flags);
118} 113}
114EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
119 115
120void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 116void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
121{ 117{
@@ -123,26 +119,27 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
123 struct kvm_mips_tlb tlb; 119 struct kvm_mips_tlb tlb;
124 int i; 120 int i;
125 121
126 printk("Guest TLBs:\n"); 122 kvm_info("Guest TLBs:\n");
127 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 123 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
128 124
129 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 125 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
130 tlb = vcpu->arch.guest_tlb[i]; 126 tlb = vcpu->arch.guest_tlb[i];
131 printk("TLB%c%3d Hi 0x%08lx ", 127 kvm_info("TLB%c%3d Hi 0x%08lx ",
132 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', 128 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
133 i, tlb.tlb_hi); 129 i, tlb.tlb_hi);
134 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", 130 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
135 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), 131 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
136 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', 132 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
137 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', 133 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
138 (tlb.tlb_lo0 >> 3) & 7); 134 (tlb.tlb_lo0 >> 3) & 7);
139 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", 135 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
140 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), 136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
141 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', 137 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
142 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', 138 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
143 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
144 } 140 }
145} 141}
142EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
146 143
147static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
148{ 145{
@@ -152,7 +149,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
152 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) 149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
153 return 0; 150 return 0;
154 151
155 srcu_idx = srcu_read_lock(&kvm->srcu); 152 srcu_idx = srcu_read_lock(&kvm->srcu);
156 pfn = kvm_mips_gfn_to_pfn(kvm, gfn); 153 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
157 154
158 if (kvm_mips_is_error_pfn(pfn)) { 155 if (kvm_mips_is_error_pfn(pfn)) {
@@ -169,7 +166,7 @@ out:
169 166
170/* Translate guest KSEG0 addresses to Host PA */ 167/* Translate guest KSEG0 addresses to Host PA */
171unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, 168unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
172 unsigned long gva) 169 unsigned long gva)
173{ 170{
174 gfn_t gfn; 171 gfn_t gfn;
175 uint32_t offset = gva & ~PAGE_MASK; 172 uint32_t offset = gva & ~PAGE_MASK;
@@ -194,20 +191,20 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
194 191
195 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
196} 193}
194EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
197 195
198/* XXXKYMA: Must be called with interrupts disabled */ 196/* XXXKYMA: Must be called with interrupts disabled */
199/* set flush_dcache_mask == 0 if no dcache flush required */ 197/* set flush_dcache_mask == 0 if no dcache flush required */
200int 198int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
201kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, 199 unsigned long entrylo0, unsigned long entrylo1,
202 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask) 200 int flush_dcache_mask)
203{ 201{
204 unsigned long flags; 202 unsigned long flags;
205 unsigned long old_entryhi; 203 unsigned long old_entryhi;
206 volatile int idx; 204 int idx;
207 205
208 local_irq_save(flags); 206 local_irq_save(flags);
209 207
210
211 old_entryhi = read_c0_entryhi(); 208 old_entryhi = read_c0_entryhi();
212 write_c0_entryhi(entryhi); 209 write_c0_entryhi(entryhi);
213 mtc0_tlbw_hazard(); 210 mtc0_tlbw_hazard();
@@ -240,12 +237,14 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
240 if (flush_dcache_mask) { 237 if (flush_dcache_mask) {
241 if (entrylo0 & MIPS3_PG_V) { 238 if (entrylo0 & MIPS3_PG_V) {
242 ++vcpu->stat.flush_dcache_exits; 239 ++vcpu->stat.flush_dcache_exits;
243 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); 240 flush_data_cache_page((entryhi & VPN2_MASK) &
241 ~flush_dcache_mask);
244 } 242 }
245 if (entrylo1 & MIPS3_PG_V) { 243 if (entrylo1 & MIPS3_PG_V) {
246 ++vcpu->stat.flush_dcache_exits; 244 ++vcpu->stat.flush_dcache_exits;
247 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | 245 flush_data_cache_page(((entryhi & VPN2_MASK) &
248 (0x1 << PAGE_SHIFT)); 246 ~flush_dcache_mask) |
247 (0x1 << PAGE_SHIFT));
249 } 248 }
250 } 249 }
251 250
@@ -257,10 +256,9 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
257 return 0; 256 return 0;
258} 257}
259 258
260
261/* XXXKYMA: Must be called with interrupts disabled */ 259/* XXXKYMA: Must be called with interrupts disabled */
262int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, 260int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
263 struct kvm_vcpu *vcpu) 261 struct kvm_vcpu *vcpu)
264{ 262{
265 gfn_t gfn; 263 gfn_t gfn;
266 pfn_t pfn0, pfn1; 264 pfn_t pfn0, pfn1;
@@ -270,7 +268,6 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
270 struct kvm *kvm = vcpu->kvm; 268 struct kvm *kvm = vcpu->kvm;
271 const int flush_dcache_mask = 0; 269 const int flush_dcache_mask = 0;
272 270
273
274 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { 271 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
275 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); 272 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
276 kvm_mips_dump_host_tlbs(); 273 kvm_mips_dump_host_tlbs();
@@ -302,14 +299,15 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
302 } 299 }
303 300
304 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); 301 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
305 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | 302 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
306 (0x1 << 1); 303 (1 << 2) | (0x1 << 1);
307 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | 304 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
308 (0x1 << 1); 305 (1 << 2) | (0x1 << 1);
309 306
310 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 307 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
311 flush_dcache_mask); 308 flush_dcache_mask);
312} 309}
310EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
313 311
314int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 312int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
315 struct kvm_vcpu *vcpu) 313 struct kvm_vcpu *vcpu)
@@ -318,11 +316,10 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
318 unsigned long flags, old_entryhi = 0, vaddr = 0; 316 unsigned long flags, old_entryhi = 0, vaddr = 0;
319 unsigned long entrylo0 = 0, entrylo1 = 0; 317 unsigned long entrylo0 = 0, entrylo1 = 0;
320 318
321
322 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; 319 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
323 pfn1 = 0; 320 pfn1 = 0;
324 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | 321 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
325 (0x1 << 1); 322 (1 << 2) | (0x1 << 1);
326 entrylo1 = 0; 323 entrylo1 = 0;
327 324
328 local_irq_save(flags); 325 local_irq_save(flags);
@@ -341,9 +338,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
341 mtc0_tlbw_hazard(); 338 mtc0_tlbw_hazard();
342 tlbw_use_hazard(); 339 tlbw_use_hazard();
343 340
344 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", 341 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
345 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), 342 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
346 read_c0_entrylo0(), read_c0_entrylo1()); 343 read_c0_entrylo0(), read_c0_entrylo1());
347 344
348 /* Restore old ASID */ 345 /* Restore old ASID */
349 write_c0_entryhi(old_entryhi); 346 write_c0_entryhi(old_entryhi);
@@ -353,28 +350,33 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
353 350
354 return 0; 351 return 0;
355} 352}
353EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
356 354
357int 355int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
358kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 356 struct kvm_mips_tlb *tlb,
359 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1) 357 unsigned long *hpa0,
358 unsigned long *hpa1)
360{ 359{
361 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 360 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
362 struct kvm *kvm = vcpu->kvm; 361 struct kvm *kvm = vcpu->kvm;
363 pfn_t pfn0, pfn1; 362 pfn_t pfn0, pfn1;
364 363
365
366 if ((tlb->tlb_hi & VPN2_MASK) == 0) { 364 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
367 pfn0 = 0; 365 pfn0 = 0;
368 pfn1 = 0; 366 pfn1 = 0;
369 } else { 367 } else {
370 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) 368 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
369 >> PAGE_SHIFT) < 0)
371 return -1; 370 return -1;
372 371
373 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) 372 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
373 >> PAGE_SHIFT) < 0)
374 return -1; 374 return -1;
375 375
376 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; 376 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
377 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; 377 >> PAGE_SHIFT];
378 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
379 >> PAGE_SHIFT];
378 } 380 }
379 381
380 if (hpa0) 382 if (hpa0)
@@ -385,11 +387,12 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
385 387
386 /* Get attributes from the Guest TLB */ 388 /* Get attributes from the Guest TLB */
387 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? 389 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
388 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); 390 kvm_mips_get_kernel_asid(vcpu) :
391 kvm_mips_get_user_asid(vcpu));
389 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | 392 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
390 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); 393 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
391 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | 394 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
392 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); 395 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
393 396
394 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 397 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
395 tlb->tlb_lo0, tlb->tlb_lo1); 398 tlb->tlb_lo0, tlb->tlb_lo1);
@@ -397,6 +400,7 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
397 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, 400 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
398 tlb->tlb_mask); 401 tlb->tlb_mask);
399} 402}
403EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
400 404
401int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 405int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
402{ 406{
@@ -404,10 +408,9 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
404 int index = -1; 408 int index = -1;
405 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 409 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
406 410
407
408 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 411 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
409 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && 412 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
410 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { 413 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
411 index = i; 414 index = i;
412 break; 415 break;
413 } 416 }
@@ -418,21 +421,23 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
418 421
419 return index; 422 return index;
420} 423}
424EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
421 425
422int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) 426int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
423{ 427{
424 unsigned long old_entryhi, flags; 428 unsigned long old_entryhi, flags;
425 volatile int idx; 429 int idx;
426
427 430
428 local_irq_save(flags); 431 local_irq_save(flags);
429 432
430 old_entryhi = read_c0_entryhi(); 433 old_entryhi = read_c0_entryhi();
431 434
432 if (KVM_GUEST_KERNEL_MODE(vcpu)) 435 if (KVM_GUEST_KERNEL_MODE(vcpu))
433 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu)); 436 write_c0_entryhi((vaddr & VPN2_MASK) |
437 kvm_mips_get_kernel_asid(vcpu));
434 else { 438 else {
435 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); 439 write_c0_entryhi((vaddr & VPN2_MASK) |
440 kvm_mips_get_user_asid(vcpu));
436 } 441 }
437 442
438 mtc0_tlbw_hazard(); 443 mtc0_tlbw_hazard();
@@ -452,6 +457,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
452 457
453 return idx; 458 return idx;
454} 459}
460EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
455 461
456int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 462int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
457{ 463{
@@ -460,7 +466,6 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
460 466
461 local_irq_save(flags); 467 local_irq_save(flags);
462 468
463
464 old_entryhi = read_c0_entryhi(); 469 old_entryhi = read_c0_entryhi();
465 470
466 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); 471 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
@@ -499,8 +504,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
499 504
500 return 0; 505 return 0;
501} 506}
507EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
502 508
503/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/ 509/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
504int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) 510int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
505{ 511{
506 unsigned long flags, old_entryhi; 512 unsigned long flags, old_entryhi;
@@ -510,7 +516,6 @@ int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
510 516
511 local_irq_save(flags); 517 local_irq_save(flags);
512 518
513
514 old_entryhi = read_c0_entryhi(); 519 old_entryhi = read_c0_entryhi();
515 520
516 write_c0_entryhi(UNIQUE_ENTRYHI(index)); 521 write_c0_entryhi(UNIQUE_ENTRYHI(index));
@@ -546,7 +551,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
546 int entry = 0; 551 int entry = 0;
547 int maxentry = current_cpu_data.tlbsize; 552 int maxentry = current_cpu_data.tlbsize;
548 553
549
550 local_irq_save(flags); 554 local_irq_save(flags);
551 555
552 old_entryhi = read_c0_entryhi(); 556 old_entryhi = read_c0_entryhi();
@@ -554,7 +558,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
554 558
555 /* Blast 'em all away. */ 559 /* Blast 'em all away. */
556 for (entry = 0; entry < maxentry; entry++) { 560 for (entry = 0; entry < maxentry; entry++) {
557
558 write_c0_index(entry); 561 write_c0_index(entry);
559 mtc0_tlbw_hazard(); 562 mtc0_tlbw_hazard();
560 563
@@ -565,9 +568,8 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
565 entryhi = read_c0_entryhi(); 568 entryhi = read_c0_entryhi();
566 569
567 /* Don't blow away guest kernel entries */ 570 /* Don't blow away guest kernel entries */
568 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) { 571 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
569 continue; 572 continue;
570 }
571 } 573 }
572 574
573 /* Make sure all entries differ. */ 575 /* Make sure all entries differ. */
@@ -591,17 +593,17 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
591 593
592 local_irq_restore(flags); 594 local_irq_restore(flags);
593} 595}
596EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
594 597
595void 598void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
596kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 599 struct kvm_vcpu *vcpu)
597 struct kvm_vcpu *vcpu)
598{ 600{
599 unsigned long asid = asid_cache(cpu); 601 unsigned long asid = asid_cache(cpu);
600 602
601 if (!((asid += ASID_INC) & ASID_MASK)) { 603 asid += ASID_INC;
602 if (cpu_has_vtag_icache) { 604 if (!(asid & ASID_MASK)) {
605 if (cpu_has_vtag_icache)
603 flush_icache_all(); 606 flush_icache_all();
604 }
605 607
606 kvm_local_flush_tlb_all(); /* start new asid cycle */ 608 kvm_local_flush_tlb_all(); /* start new asid cycle */
607 609
@@ -639,6 +641,7 @@ void kvm_local_flush_tlb_all(void)
639 641
640 local_irq_restore(flags); 642 local_irq_restore(flags);
641} 643}
644EXPORT_SYMBOL(kvm_local_flush_tlb_all);
642 645
643/** 646/**
644 * kvm_mips_migrate_count() - Migrate timer. 647 * kvm_mips_migrate_count() - Migrate timer.
@@ -699,7 +702,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
699 } 702 }
700 703
701 if (!newasid) { 704 if (!newasid) {
702 /* If we preempted while the guest was executing, then reload the pre-empted ASID */ 705 /*
706 * If we preempted while the guest was executing, then reload
707 * the pre-empted ASID
708 */
703 if (current->flags & PF_VCPU) { 709 if (current->flags & PF_VCPU) {
704 write_c0_entryhi(vcpu->arch. 710 write_c0_entryhi(vcpu->arch.
705 preempt_entryhi & ASID_MASK); 711 preempt_entryhi & ASID_MASK);
@@ -708,9 +714,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
708 } else { 714 } else {
709 /* New ASIDs were allocated for the VM */ 715 /* New ASIDs were allocated for the VM */
710 716
711 /* Were we in guest context? If so then the pre-empted ASID is no longer 717 /*
712 * valid, we need to set it to what it should be based on the mode of 718 * Were we in guest context? If so then the pre-empted ASID is
713 * the Guest (Kernel/User) 719 * no longer valid, we need to set it to what it should be based
720 * on the mode of the Guest (Kernel/User)
714 */ 721 */
715 if (current->flags & PF_VCPU) { 722 if (current->flags & PF_VCPU) {
716 if (KVM_GUEST_KERNEL_MODE(vcpu)) 723 if (KVM_GUEST_KERNEL_MODE(vcpu))
@@ -728,6 +735,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
728 local_irq_restore(flags); 735 local_irq_restore(flags);
729 736
730} 737}
738EXPORT_SYMBOL(kvm_arch_vcpu_load);
731 739
732/* ASID can change if another task is scheduled during preemption */ 740/* ASID can change if another task is scheduled during preemption */
733void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 741void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -739,7 +747,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
739 747
740 cpu = smp_processor_id(); 748 cpu = smp_processor_id();
741 749
742
743 vcpu->arch.preempt_entryhi = read_c0_entryhi(); 750 vcpu->arch.preempt_entryhi = read_c0_entryhi();
744 vcpu->arch.last_sched_cpu = cpu; 751 vcpu->arch.last_sched_cpu = cpu;
745 752
@@ -754,11 +761,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
754 761
755 local_irq_restore(flags); 762 local_irq_restore(flags);
756} 763}
764EXPORT_SYMBOL(kvm_arch_vcpu_put);
757 765
758uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) 766uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
759{ 767{
760 struct mips_coproc *cop0 = vcpu->arch.cop0; 768 struct mips_coproc *cop0 = vcpu->arch.cop0;
761 unsigned long paddr, flags; 769 unsigned long paddr, flags, vpn2, asid;
762 uint32_t inst; 770 uint32_t inst;
763 int index; 771 int index;
764 772
@@ -769,16 +777,12 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
769 if (index >= 0) { 777 if (index >= 0) {
770 inst = *(opc); 778 inst = *(opc);
771 } else { 779 } else {
772 index = 780 vpn2 = (unsigned long) opc & VPN2_MASK;
773 kvm_mips_guest_tlb_lookup(vcpu, 781 asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
774 ((unsigned long) opc & VPN2_MASK) 782 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
775 |
776 (kvm_read_c0_guest_entryhi
777 (cop0) & ASID_MASK));
778 if (index < 0) { 783 if (index < 0) {
779 kvm_err 784 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
780 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", 785 __func__, opc, vcpu, read_c0_entryhi());
781 __func__, opc, vcpu, read_c0_entryhi());
782 kvm_mips_dump_host_tlbs(); 786 kvm_mips_dump_host_tlbs();
783 local_irq_restore(flags); 787 local_irq_restore(flags);
784 return KVM_INVALID_INST; 788 return KVM_INVALID_INST;
@@ -793,7 +797,7 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
793 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { 797 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
794 paddr = 798 paddr =
795 kvm_mips_translate_guest_kseg0_to_hpa(vcpu, 799 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
796 (unsigned long) opc); 800 (unsigned long) opc);
797 inst = *(uint32_t *) CKSEG0ADDR(paddr); 801 inst = *(uint32_t *) CKSEG0ADDR(paddr);
798 } else { 802 } else {
799 kvm_err("%s: illegal address: %p\n", __func__, opc); 803 kvm_err("%s: illegal address: %p\n", __func__, opc);
@@ -802,18 +806,4 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
802 806
803 return inst; 807 return inst;
804} 808}
805
806EXPORT_SYMBOL(kvm_local_flush_tlb_all);
807EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
808EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
809EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
810EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
811EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
812EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
813EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
814EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
815EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
816EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
817EXPORT_SYMBOL(kvm_get_inst); 809EXPORT_SYMBOL(kvm_get_inst);
818EXPORT_SYMBOL(kvm_arch_vcpu_load);
819EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index bc9e0f406c08..c1388d40663b 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -1,11 +1,11 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com> 7 * Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/ 8 */
9 9
10#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 10#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_KVM_H 11#define _TRACE_KVM_H
@@ -17,9 +17,7 @@
17#define TRACE_INCLUDE_PATH . 17#define TRACE_INCLUDE_PATH .
18#define TRACE_INCLUDE_FILE trace 18#define TRACE_INCLUDE_FILE trace
19 19
20/* 20/* Tracepoints for VM eists */
21 * Tracepoints for VM eists
22 */
23extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; 21extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
24 22
25TRACE_EVENT(kvm_exit, 23TRACE_EVENT(kvm_exit,
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/trap_emul.c
index 693f952b2fbb..fd7257b70e65 100644
--- a/arch/mips/kvm/kvm_trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel 6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/err.h> 13#include <linux/err.h>
@@ -16,8 +16,8 @@
16 16
17#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
18 18
19#include "kvm_mips_opcode.h" 19#include "opcode.h"
20#include "kvm_mips_int.h" 20#include "interrupt.h"
21 21
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) 22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{ 23{
@@ -27,7 +27,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
27 if ((kseg == CKSEG0) || (kseg == CKSEG1)) 27 if ((kseg == CKSEG0) || (kseg == CKSEG1))
28 gpa = CPHYSADDR(gva); 28 gpa = CPHYSADDR(gva);
29 else { 29 else {
30 printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); 30 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31 kvm_mips_dump_host_tlbs(); 31 kvm_mips_dump_host_tlbs();
32 gpa = KVM_INVALID_ADDR; 32 gpa = KVM_INVALID_ADDR;
33 } 33 }
@@ -37,7 +37,6 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
37 return gpa; 37 return gpa;
38} 38}
39 39
40
41static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) 40static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
42{ 41{
43 struct kvm_run *run = vcpu->run; 42 struct kvm_run *run = vcpu->run;
@@ -46,9 +45,9 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
46 enum emulation_result er = EMULATE_DONE; 45 enum emulation_result er = EMULATE_DONE;
47 int ret = RESUME_GUEST; 46 int ret = RESUME_GUEST;
48 47
49 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { 48 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
50 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); 49 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
51 } else 50 else
52 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); 51 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
53 52
54 switch (er) { 53 switch (er) {
@@ -83,9 +82,8 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
83 82
84 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 83 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
85 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { 84 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
86 kvm_debug 85 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
87 ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", 86 cause, opc, badvaddr);
88 cause, opc, badvaddr);
89 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); 87 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
90 88
91 if (er == EMULATE_DONE) 89 if (er == EMULATE_DONE)
@@ -95,20 +93,20 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
95 ret = RESUME_HOST; 93 ret = RESUME_HOST;
96 } 94 }
97 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { 95 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
98 /* XXXKYMA: The guest kernel does not expect to get this fault when we are not 96 /*
99 * using HIGHMEM. Need to address this in a HIGHMEM kernel 97 * XXXKYMA: The guest kernel does not expect to get this fault
98 * when we are not using HIGHMEM. Need to address this in a
99 * HIGHMEM kernel
100 */ 100 */
101 printk 101 kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
102 ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", 102 cause, opc, badvaddr);
103 cause, opc, badvaddr);
104 kvm_mips_dump_host_tlbs(); 103 kvm_mips_dump_host_tlbs();
105 kvm_arch_vcpu_dump_regs(vcpu); 104 kvm_arch_vcpu_dump_regs(vcpu);
106 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 105 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
107 ret = RESUME_HOST; 106 ret = RESUME_HOST;
108 } else { 107 } else {
109 printk 108 kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
110 ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", 109 cause, opc, badvaddr);
111 cause, opc, badvaddr);
112 kvm_mips_dump_host_tlbs(); 110 kvm_mips_dump_host_tlbs();
113 kvm_arch_vcpu_dump_regs(vcpu); 111 kvm_arch_vcpu_dump_regs(vcpu);
114 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 112 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -134,9 +132,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
134 } 132 }
135 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 133 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
136 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { 134 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
137 kvm_debug 135 kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
138 ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", 136 cause, opc, badvaddr);
139 cause, opc, badvaddr);
140 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); 137 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
141 if (er == EMULATE_DONE) 138 if (er == EMULATE_DONE)
142 ret = RESUME_GUEST; 139 ret = RESUME_GUEST;
@@ -145,8 +142,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
145 ret = RESUME_HOST; 142 ret = RESUME_HOST;
146 } 143 }
147 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { 144 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
148 /* All KSEG0 faults are handled by KVM, as the guest kernel does not 145 /*
149 * expect to ever get them 146 * All KSEG0 faults are handled by KVM, as the guest kernel does
147 * not expect to ever get them
150 */ 148 */
151 if (kvm_mips_handle_kseg0_tlb_fault 149 if (kvm_mips_handle_kseg0_tlb_fault
152 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { 150 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
@@ -154,9 +152,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
154 ret = RESUME_HOST; 152 ret = RESUME_HOST;
155 } 153 }
156 } else { 154 } else {
157 kvm_err 155 kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
158 ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", 156 cause, opc, badvaddr);
159 cause, opc, badvaddr);
160 kvm_mips_dump_host_tlbs(); 157 kvm_mips_dump_host_tlbs();
161 kvm_arch_vcpu_dump_regs(vcpu); 158 kvm_arch_vcpu_dump_regs(vcpu);
162 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 159 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -185,11 +182,14 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
185 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", 182 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
186 vcpu->arch.pc, badvaddr); 183 vcpu->arch.pc, badvaddr);
187 184
188 /* User Address (UA) fault, this could happen if 185 /*
189 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 186 * User Address (UA) fault, this could happen if
190 * case we pass on the fault to the guest kernel and let it handle it. 187 * (1) TLB entry not present/valid in both Guest and shadow host
191 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 188 * TLBs, in this case we pass on the fault to the guest
192 * case we inject the TLB from the Guest TLB into the shadow host TLB 189 * kernel and let it handle it.
190 * (2) TLB entry is present in the Guest TLB but not in the
191 * shadow, in this case we inject the TLB from the Guest TLB
192 * into the shadow host TLB
193 */ 193 */
194 194
195 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); 195 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
@@ -206,9 +206,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
206 ret = RESUME_HOST; 206 ret = RESUME_HOST;
207 } 207 }
208 } else { 208 } else {
209 printk 209 kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
210 ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", 210 cause, opc, badvaddr);
211 cause, opc, badvaddr);
212 kvm_mips_dump_host_tlbs(); 211 kvm_mips_dump_host_tlbs();
213 kvm_arch_vcpu_dump_regs(vcpu); 212 kvm_arch_vcpu_dump_regs(vcpu);
214 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -231,7 +230,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
231 kvm_debug("Emulate Store to MMIO space\n"); 230 kvm_debug("Emulate Store to MMIO space\n");
232 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); 231 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
233 if (er == EMULATE_FAIL) { 232 if (er == EMULATE_FAIL) {
234 printk("Emulate Store to MMIO space failed\n"); 233 kvm_err("Emulate Store to MMIO space failed\n");
235 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 234 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
236 ret = RESUME_HOST; 235 ret = RESUME_HOST;
237 } else { 236 } else {
@@ -239,9 +238,8 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
239 ret = RESUME_HOST; 238 ret = RESUME_HOST;
240 } 239 }
241 } else { 240 } else {
242 printk 241 kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
243 ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", 242 cause, opc, badvaddr);
244 cause, opc, badvaddr);
245 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 243 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
246 ret = RESUME_HOST; 244 ret = RESUME_HOST;
247 } 245 }
@@ -261,7 +259,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
261 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); 259 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
262 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); 260 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
263 if (er == EMULATE_FAIL) { 261 if (er == EMULATE_FAIL) {
264 printk("Emulate Load from MMIO space failed\n"); 262 kvm_err("Emulate Load from MMIO space failed\n");
265 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 263 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
266 ret = RESUME_HOST; 264 ret = RESUME_HOST;
267 } else { 265 } else {
@@ -269,9 +267,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
269 ret = RESUME_HOST; 267 ret = RESUME_HOST;
270 } 268 }
271 } else { 269 } else {
272 printk 270 kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
273 ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", 271 cause, opc, badvaddr);
274 cause, opc, badvaddr);
275 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 272 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
276 ret = RESUME_HOST; 273 ret = RESUME_HOST;
277 er = EMULATE_FAIL; 274 er = EMULATE_FAIL;
@@ -349,9 +346,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
349 uint32_t config1; 346 uint32_t config1;
350 int vcpu_id = vcpu->vcpu_id; 347 int vcpu_id = vcpu->vcpu_id;
351 348
352 /* Arch specific stuff, set up config registers properly so that the 349 /*
353 * guest will come up as expected, for now we simulate a 350 * Arch specific stuff, set up config registers properly so that the
354 * MIPS 24kc 351 * guest will come up as expected, for now we simulate a MIPS 24kc
355 */ 352 */
356 kvm_write_c0_guest_prid(cop0, 0x00019300); 353 kvm_write_c0_guest_prid(cop0, 0x00019300);
357 kvm_write_c0_guest_config(cop0, 354 kvm_write_c0_guest_config(cop0,
@@ -373,14 +370,15 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
373 370
374 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); 371 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
375 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ 372 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
376 kvm_write_c0_guest_config3(cop0, 373 kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
377 MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << 374 (1 << CP0C3_ULRI));
378 CP0C3_ULRI));
379 375
380 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ 376 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
381 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); 377 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
382 378
383 /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ 379 /*
380 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
381 */
384 kvm_write_c0_guest_intctl(cop0, 0xFC000000); 382 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
385 383
386 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ 384 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */