aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/kvm/trap_emul.c119
1 files changed, 72 insertions, 47 deletions
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 070d1ddbc7ee..ae971ae30e30 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -85,6 +85,75 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
85 return ret; 85 return ret;
86} 86}
87 87
88static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
89 struct kvm_vcpu *vcpu)
90{
91 enum emulation_result er;
92 union mips_instruction inst;
93 int err;
94
95 /* A code fetch fault doesn't count as an MMIO */
96 if (kvm_is_ifetch_fault(&vcpu->arch)) {
97 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
98 return RESUME_HOST;
99 }
100
101 /* Fetch the instruction. */
102 if (cause & CAUSEF_BD)
103 opc += 1;
104 err = kvm_get_badinstr(opc, vcpu, &inst.word);
105 if (err) {
106 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
107 return RESUME_HOST;
108 }
109
110 /* Emulate the load */
111 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
112 if (er == EMULATE_FAIL) {
113 kvm_err("Emulate load from MMIO space failed\n");
114 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
115 } else {
116 run->exit_reason = KVM_EXIT_MMIO;
117 }
118 return RESUME_HOST;
119}
120
121static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
122 struct kvm_vcpu *vcpu)
123{
124 enum emulation_result er;
125 union mips_instruction inst;
126 int err;
127
128 /* Fetch the instruction. */
129 if (cause & CAUSEF_BD)
130 opc += 1;
131 err = kvm_get_badinstr(opc, vcpu, &inst.word);
132 if (err) {
133 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
134 return RESUME_HOST;
135 }
136
137 /* Emulate the store */
138 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
139 if (er == EMULATE_FAIL) {
140 kvm_err("Emulate store to MMIO space failed\n");
141 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
142 } else {
143 run->exit_reason = KVM_EXIT_MMIO;
144 }
145 return RESUME_HOST;
146}
147
148static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
149 struct kvm_vcpu *vcpu, bool store)
150{
151 if (store)
152 return kvm_mips_bad_store(cause, opc, run, vcpu);
153 else
154 return kvm_mips_bad_load(cause, opc, run, vcpu);
155}
156
88static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) 157static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
89{ 158{
90 struct kvm_run *run = vcpu->run; 159 struct kvm_run *run = vcpu->run;
@@ -178,28 +247,11 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
178 } 247 }
179 } else if (KVM_GUEST_KERNEL_MODE(vcpu) 248 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
180 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { 249 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
181 /* A code fetch fault doesn't count as an MMIO */
182 if (!store && kvm_is_ifetch_fault(&vcpu->arch)) {
183 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
184 return RESUME_HOST;
185 }
186
187 /* 250 /*
188 * With EVA we may get a TLB exception instead of an address 251 * With EVA we may get a TLB exception instead of an address
189 * error when the guest performs MMIO to KSeg1 addresses. 252 * error when the guest performs MMIO to KSeg1 addresses.
190 */ 253 */
191 kvm_debug("Emulate %s MMIO space\n", 254 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
192 store ? "Store to" : "Load from");
193 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
194 if (er == EMULATE_FAIL) {
195 kvm_err("Emulate %s MMIO space failed\n",
196 store ? "Store to" : "Load from");
197 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
198 ret = RESUME_HOST;
199 } else {
200 run->exit_reason = KVM_EXIT_MMIO;
201 ret = RESUME_HOST;
202 }
203 } else { 255 } else {
204 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", 256 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
205 store ? "ST" : "LD", cause, opc, badvaddr); 257 store ? "ST" : "LD", cause, opc, badvaddr);
@@ -227,21 +279,11 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
227 u32 __user *opc = (u32 __user *) vcpu->arch.pc; 279 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
228 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 280 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
229 u32 cause = vcpu->arch.host_cp0_cause; 281 u32 cause = vcpu->arch.host_cp0_cause;
230 enum emulation_result er = EMULATE_DONE;
231 int ret = RESUME_GUEST; 282 int ret = RESUME_GUEST;
232 283
233 if (KVM_GUEST_KERNEL_MODE(vcpu) 284 if (KVM_GUEST_KERNEL_MODE(vcpu)
234 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { 285 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
235 kvm_debug("Emulate Store to MMIO space\n"); 286 ret = kvm_mips_bad_store(cause, opc, run, vcpu);
236 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
237 if (er == EMULATE_FAIL) {
238 kvm_err("Emulate Store to MMIO space failed\n");
239 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
240 ret = RESUME_HOST;
241 } else {
242 run->exit_reason = KVM_EXIT_MMIO;
243 ret = RESUME_HOST;
244 }
245 } else { 287 } else {
246 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", 288 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
247 cause, opc, badvaddr); 289 cause, opc, badvaddr);
@@ -257,32 +299,15 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
257 u32 __user *opc = (u32 __user *) vcpu->arch.pc; 299 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
258 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 300 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
259 u32 cause = vcpu->arch.host_cp0_cause; 301 u32 cause = vcpu->arch.host_cp0_cause;
260 enum emulation_result er = EMULATE_DONE;
261 int ret = RESUME_GUEST; 302 int ret = RESUME_GUEST;
262 303
263 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { 304 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
264 /* A code fetch fault doesn't count as an MMIO */ 305 ret = kvm_mips_bad_load(cause, opc, run, vcpu);
265 if (kvm_is_ifetch_fault(&vcpu->arch)) {
266 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
267 return RESUME_HOST;
268 }
269
270 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
271 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
272 if (er == EMULATE_FAIL) {
273 kvm_err("Emulate Load from MMIO space failed\n");
274 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
275 ret = RESUME_HOST;
276 } else {
277 run->exit_reason = KVM_EXIT_MMIO;
278 ret = RESUME_HOST;
279 }
280 } else { 306 } else {
281 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", 307 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
282 cause, opc, badvaddr); 308 cause, opc, badvaddr);
283 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 309 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
284 ret = RESUME_HOST; 310 ret = RESUME_HOST;
285 er = EMULATE_FAIL;
286 } 311 }
287 return ret; 312 return ret;
288} 313}