aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-11-28 12:23:14 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:21:06 -0500
commit122e51d47418f74a69a93bf02f5535d11ff75bf5 (patch)
tree38aad30f6698d4397b75e0f9f05f2c03f6ea2db6
parenta1ecc54d7ea629538116351a3ccc7d86bb9a3c69 (diff)
KVM: MIPS: Improve kvm_get_inst() error return
Currently kvm_get_inst() returns KVM_INVALID_INST in the event of a fault reading the guest instruction. This has the rather arbitrary magic value 0xdeadbeef. This API isn't very robust, and in fact 0xdeadbeef is a valid MIPS64 instruction encoding, namely "ld t1,-16657(s5)". Therefore change the kvm_get_inst() API to return 0 or -EFAULT, and to return the instruction via a u32 *out argument. We can then drop the KVM_INVALID_INST definition entirely. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/include/asm/kvm_host.h3
-rw-r--r--arch/mips/kvm/emulate.c90
-rw-r--r--arch/mips/kvm/mips.c7
-rw-r--r--arch/mips/kvm/mmu.c9
4 files changed, 56 insertions, 53 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6f68f7545b66..f296ebeda9e3 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -104,7 +104,6 @@
104#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) 104#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
105 105
106#define KVM_INVALID_PAGE 0xdeadbeef 106#define KVM_INVALID_PAGE 0xdeadbeef
107#define KVM_INVALID_INST 0xdeadbeef
108#define KVM_INVALID_ADDR 0xdeadbeef 107#define KVM_INVALID_ADDR 0xdeadbeef
109 108
110/* 109/*
@@ -640,7 +639,7 @@ void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
640 bool user); 639 bool user);
641 640
642/* Emulation */ 641/* Emulation */
643u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu); 642int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
644enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); 643enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
645 644
646/** 645/**
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 67ea39973b96..b906fc0589f3 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -38,23 +38,25 @@
38 * Compute the return address and do emulate branch simulation, if required. 38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active. 39 * This function should be called only in branch delay slot active.
40 */ 40 */
41unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, 41static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
42 unsigned long instpc) 42 unsigned long *out)
43{ 43{
44 unsigned int dspcontrol; 44 unsigned int dspcontrol;
45 union mips_instruction insn; 45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch; 46 struct kvm_vcpu_arch *arch = &vcpu->arch;
47 long epc = instpc; 47 long epc = instpc;
48 long nextpc = KVM_INVALID_INST; 48 long nextpc;
49 int err;
49 50
50 if (epc & 3) 51 if (epc & 3) {
51 goto unaligned; 52 kvm_err("%s: unaligned epc\n", __func__);
53 return -EINVAL;
54 }
52 55
53 /* Read the instruction */ 56 /* Read the instruction */
54 insn.word = kvm_get_inst((u32 *) epc, vcpu); 57 err = kvm_get_inst((u32 *)epc, vcpu, &insn.word);
55 58 if (err)
56 if (insn.word == KVM_INVALID_INST) 59 return err;
57 return KVM_INVALID_INST;
58 60
59 switch (insn.i_format.opcode) { 61 switch (insn.i_format.opcode) {
60 /* jr and jalr are in r_format format. */ 62 /* jr and jalr are in r_format format. */
@@ -66,6 +68,8 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
66 case jr_op: 68 case jr_op:
67 nextpc = arch->gprs[insn.r_format.rs]; 69 nextpc = arch->gprs[insn.r_format.rs];
68 break; 70 break;
71 default:
72 return -EINVAL;
69 } 73 }
70 break; 74 break;
71 75
@@ -114,8 +118,11 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
114 nextpc = epc; 118 nextpc = epc;
115 break; 119 break;
116 case bposge32_op: 120 case bposge32_op:
117 if (!cpu_has_dsp) 121 if (!cpu_has_dsp) {
118 goto sigill; 122 kvm_err("%s: DSP branch but not DSP ASE\n",
123 __func__);
124 return -EINVAL;
125 }
119 126
120 dspcontrol = rddsp(0x01); 127 dspcontrol = rddsp(0x01);
121 128
@@ -125,6 +132,8 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
125 epc += 8; 132 epc += 8;
126 nextpc = epc; 133 nextpc = epc;
127 break; 134 break;
135 default:
136 return -EINVAL;
128 } 137 }
129 break; 138 break;
130 139
@@ -189,7 +198,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
189 /* And now the FPA/cp1 branch instructions. */ 198 /* And now the FPA/cp1 branch instructions. */
190 case cop1_op: 199 case cop1_op:
191 kvm_err("%s: unsupported cop1_op\n", __func__); 200 kvm_err("%s: unsupported cop1_op\n", __func__);
192 break; 201 return -EINVAL;
193 202
194#ifdef CONFIG_CPU_MIPSR6 203#ifdef CONFIG_CPU_MIPSR6
195 /* R6 added the following compact branches with forbidden slots */ 204 /* R6 added the following compact branches with forbidden slots */
@@ -198,19 +207,19 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
198 /* only rt == 0 isn't compact branch */ 207 /* only rt == 0 isn't compact branch */
199 if (insn.i_format.rt != 0) 208 if (insn.i_format.rt != 0)
200 goto compact_branch; 209 goto compact_branch;
201 break; 210 return -EINVAL;
202 case pop10_op: 211 case pop10_op:
203 case pop30_op: 212 case pop30_op:
204 /* only rs == rt == 0 is reserved, rest are compact branches */ 213 /* only rs == rt == 0 is reserved, rest are compact branches */
205 if (insn.i_format.rs != 0 || insn.i_format.rt != 0) 214 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
206 goto compact_branch; 215 goto compact_branch;
207 break; 216 return -EINVAL;
208 case pop66_op: 217 case pop66_op:
209 case pop76_op: 218 case pop76_op:
210 /* only rs == 0 isn't compact branch */ 219 /* only rs == 0 isn't compact branch */
211 if (insn.i_format.rs != 0) 220 if (insn.i_format.rs != 0)
212 goto compact_branch; 221 goto compact_branch;
213 break; 222 return -EINVAL;
214compact_branch: 223compact_branch:
215 /* 224 /*
216 * If we've hit an exception on the forbidden slot, then 225 * If we've hit an exception on the forbidden slot, then
@@ -221,42 +230,32 @@ compact_branch:
221 break; 230 break;
222#else 231#else
223compact_branch: 232compact_branch:
224 /* Compact branches not supported before R6 */ 233 /* Fall through - Compact branches not supported before R6 */
225 break;
226#endif 234#endif
235 default:
236 return -EINVAL;
227 } 237 }
228 238
229 return nextpc; 239 *out = nextpc;
230 240 return 0;
231unaligned:
232 kvm_err("%s: unaligned epc\n", __func__);
233 return nextpc;
234
235sigill:
236 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
237 return nextpc;
238} 241}
239 242
240enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) 243enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
241{ 244{
242 unsigned long branch_pc; 245 int err;
243 enum emulation_result er = EMULATE_DONE;
244 246
245 if (cause & CAUSEF_BD) { 247 if (cause & CAUSEF_BD) {
246 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); 248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
247 if (branch_pc == KVM_INVALID_INST) { 249 &vcpu->arch.pc);
248 er = EMULATE_FAIL; 250 if (err)
249 } else { 251 return EMULATE_FAIL;
250 vcpu->arch.pc = branch_pc; 252 } else {
251 kvm_debug("BD update_pc(): New PC: %#lx\n",
252 vcpu->arch.pc);
253 }
254 } else
255 vcpu->arch.pc += 4; 253 vcpu->arch.pc += 4;
254 }
256 255
257 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); 256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
258 257
259 return er; 258 return EMULATE_DONE;
260} 259}
261 260
262/** 261/**
@@ -1835,12 +1834,14 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1835{ 1834{
1836 union mips_instruction inst; 1835 union mips_instruction inst;
1837 enum emulation_result er = EMULATE_DONE; 1836 enum emulation_result er = EMULATE_DONE;
1837 int err;
1838 1838
1839 /* Fetch the instruction. */ 1839 /* Fetch the instruction. */
1840 if (cause & CAUSEF_BD) 1840 if (cause & CAUSEF_BD)
1841 opc += 1; 1841 opc += 1;
1842 1842 err = kvm_get_inst(opc, vcpu, &inst.word);
1843 inst.word = kvm_get_inst(opc, vcpu); 1843 if (err)
1844 return EMULATE_FAIL;
1844 1845
1845 switch (inst.r_format.opcode) { 1846 switch (inst.r_format.opcode) {
1846 case cop0_op: 1847 case cop0_op:
@@ -2419,6 +2420,7 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2419 enum emulation_result er = EMULATE_DONE; 2420 enum emulation_result er = EMULATE_DONE;
2420 unsigned long curr_pc; 2421 unsigned long curr_pc;
2421 union mips_instruction inst; 2422 union mips_instruction inst;
2423 int err;
2422 2424
2423 /* 2425 /*
2424 * Update PC and hold onto current PC in case there is 2426 * Update PC and hold onto current PC in case there is
@@ -2432,11 +2434,9 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2432 /* Fetch the instruction. */ 2434 /* Fetch the instruction. */
2433 if (cause & CAUSEF_BD) 2435 if (cause & CAUSEF_BD)
2434 opc += 1; 2436 opc += 1;
2435 2437 err = kvm_get_inst(opc, vcpu, &inst.word);
2436 inst.word = kvm_get_inst(opc, vcpu); 2438 if (err) {
2437 2439 kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
2438 if (inst.word == KVM_INVALID_INST) {
2439 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2440 return EMULATE_FAIL; 2440 return EMULATE_FAIL;
2441 } 2441 }
2442 2442
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 07ce10e3627a..29afd96069ef 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1343,6 +1343,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1343 u32 __user *opc = (u32 __user *) vcpu->arch.pc; 1343 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1344 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 1344 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1345 enum emulation_result er = EMULATE_DONE; 1345 enum emulation_result er = EMULATE_DONE;
1346 u32 inst;
1346 int ret = RESUME_GUEST; 1347 int ret = RESUME_GUEST;
1347 1348
1348 /* re-enable HTW before enabling interrupts */ 1349 /* re-enable HTW before enabling interrupts */
@@ -1467,8 +1468,12 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1467 break; 1468 break;
1468 1469
1469 default: 1470 default:
1471 if (cause & CAUSEF_BD)
1472 opc += 1;
1473 inst = 0;
1474 kvm_get_inst(opc, vcpu, &inst);
1470 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", 1475 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1471 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, 1476 exccode, opc, inst, badvaddr,
1472 kvm_read_c0_guest_status(vcpu->arch.cop0)); 1477 kvm_read_c0_guest_status(vcpu->arch.cop0));
1473 kvm_arch_vcpu_dump_regs(vcpu); 1478 kvm_arch_vcpu_dump_regs(vcpu);
1474 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1479 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index aab604e75d3b..6379ac1bc7b9 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -503,16 +503,15 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
503 local_irq_restore(flags); 503 local_irq_restore(flags);
504} 504}
505 505
506u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) 506int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
507{ 507{
508 u32 inst;
509 int err; 508 int err;
510 509
511 err = get_user(inst, opc); 510 err = get_user(*out, opc);
512 if (unlikely(err)) { 511 if (unlikely(err)) {
513 kvm_err("%s: illegal address: %p\n", __func__, opc); 512 kvm_err("%s: illegal address: %p\n", __func__, opc);
514 return KVM_INVALID_INST; 513 return -EFAULT;
515 } 514 }
516 515
517 return inst; 516 return 0;
518} 517}