aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_emulate.c')
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c73
1 files changed, 29 insertions, 44 deletions
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index c85f906038ce..466846557089 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -73,8 +73,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
73 switch (get_xop(inst)) { 73 switch (get_xop(inst)) {
74 case OP_19_XOP_RFID: 74 case OP_19_XOP_RFID:
75 case OP_19_XOP_RFI: 75 case OP_19_XOP_RFI:
76 kvmppc_set_pc(vcpu, vcpu->arch.srr0); 76 kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
77 kvmppc_set_msr(vcpu, vcpu->arch.srr1); 77 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
78 *advance = 0; 78 *advance = 0;
79 break; 79 break;
80 80
@@ -86,14 +86,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
86 case 31: 86 case 31:
87 switch (get_xop(inst)) { 87 switch (get_xop(inst)) {
88 case OP_31_XOP_MFMSR: 88 case OP_31_XOP_MFMSR:
89 kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); 89 kvmppc_set_gpr(vcpu, get_rt(inst),
90 vcpu->arch.shared->msr);
90 break; 91 break;
91 case OP_31_XOP_MTMSRD: 92 case OP_31_XOP_MTMSRD:
92 { 93 {
93 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); 94 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
94 if (inst & 0x10000) { 95 if (inst & 0x10000) {
95 vcpu->arch.msr &= ~(MSR_RI | MSR_EE); 96 vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
96 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); 97 vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
97 } else 98 } else
98 kvmppc_set_msr(vcpu, rs); 99 kvmppc_set_msr(vcpu, rs);
99 break; 100 break;
@@ -204,14 +205,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
204 ra = kvmppc_get_gpr(vcpu, get_ra(inst)); 205 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
205 206
206 addr = (ra + rb) & ~31ULL; 207 addr = (ra + rb) & ~31ULL;
207 if (!(vcpu->arch.msr & MSR_SF)) 208 if (!(vcpu->arch.shared->msr & MSR_SF))
208 addr &= 0xffffffff; 209 addr &= 0xffffffff;
209 vaddr = addr; 210 vaddr = addr;
210 211
211 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 212 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
212 if ((r == -ENOENT) || (r == -EPERM)) { 213 if ((r == -ENOENT) || (r == -EPERM)) {
213 *advance = 0; 214 *advance = 0;
214 vcpu->arch.dear = vaddr; 215 vcpu->arch.shared->dar = vaddr;
215 to_svcpu(vcpu)->fault_dar = vaddr; 216 to_svcpu(vcpu)->fault_dar = vaddr;
216 217
217 dsisr = DSISR_ISSTORE; 218 dsisr = DSISR_ISSTORE;
@@ -220,7 +221,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
220 else if (r == -EPERM) 221 else if (r == -EPERM)
221 dsisr |= DSISR_PROTFAULT; 222 dsisr |= DSISR_PROTFAULT;
222 223
223 to_book3s(vcpu)->dsisr = dsisr; 224 vcpu->arch.shared->dsisr = dsisr;
224 to_svcpu(vcpu)->fault_dsisr = dsisr; 225 to_svcpu(vcpu)->fault_dsisr = dsisr;
225 226
226 kvmppc_book3s_queue_irqprio(vcpu, 227 kvmppc_book3s_queue_irqprio(vcpu,
@@ -263,7 +264,7 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
263 } 264 }
264} 265}
265 266
266static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) 267static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
267{ 268{
268 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 269 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
269 struct kvmppc_bat *bat; 270 struct kvmppc_bat *bat;
@@ -285,35 +286,7 @@ static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn)
285 BUG(); 286 BUG();
286 } 287 }
287 288
288 if (sprn % 2) 289 return bat;
289 return bat->raw >> 32;
290 else
291 return bat->raw;
292}
293
294static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
295{
296 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
297 struct kvmppc_bat *bat;
298
299 switch (sprn) {
300 case SPRN_IBAT0U ... SPRN_IBAT3L:
301 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
302 break;
303 case SPRN_IBAT4U ... SPRN_IBAT7L:
304 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
305 break;
306 case SPRN_DBAT0U ... SPRN_DBAT3L:
307 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
308 break;
309 case SPRN_DBAT4U ... SPRN_DBAT7L:
310 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
311 break;
312 default:
313 BUG();
314 }
315
316 kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
317} 290}
318 291
319int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 292int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
@@ -326,10 +299,10 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
326 to_book3s(vcpu)->sdr1 = spr_val; 299 to_book3s(vcpu)->sdr1 = spr_val;
327 break; 300 break;
328 case SPRN_DSISR: 301 case SPRN_DSISR:
329 to_book3s(vcpu)->dsisr = spr_val; 302 vcpu->arch.shared->dsisr = spr_val;
330 break; 303 break;
331 case SPRN_DAR: 304 case SPRN_DAR:
332 vcpu->arch.dear = spr_val; 305 vcpu->arch.shared->dar = spr_val;
333 break; 306 break;
334 case SPRN_HIOR: 307 case SPRN_HIOR:
335 to_book3s(vcpu)->hior = spr_val; 308 to_book3s(vcpu)->hior = spr_val;
@@ -338,12 +311,16 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
338 case SPRN_IBAT4U ... SPRN_IBAT7L: 311 case SPRN_IBAT4U ... SPRN_IBAT7L:
339 case SPRN_DBAT0U ... SPRN_DBAT3L: 312 case SPRN_DBAT0U ... SPRN_DBAT3L:
340 case SPRN_DBAT4U ... SPRN_DBAT7L: 313 case SPRN_DBAT4U ... SPRN_DBAT7L:
341 kvmppc_write_bat(vcpu, sprn, (u32)spr_val); 314 {
315 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
316
317 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
342 /* BAT writes happen so rarely that we're ok to flush 318 /* BAT writes happen so rarely that we're ok to flush
343 * everything here */ 319 * everything here */
344 kvmppc_mmu_pte_flush(vcpu, 0, 0); 320 kvmppc_mmu_pte_flush(vcpu, 0, 0);
345 kvmppc_mmu_flush_segments(vcpu); 321 kvmppc_mmu_flush_segments(vcpu);
346 break; 322 break;
323 }
347 case SPRN_HID0: 324 case SPRN_HID0:
348 to_book3s(vcpu)->hid[0] = spr_val; 325 to_book3s(vcpu)->hid[0] = spr_val;
349 break; 326 break;
@@ -433,16 +410,24 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
433 case SPRN_IBAT4U ... SPRN_IBAT7L: 410 case SPRN_IBAT4U ... SPRN_IBAT7L:
434 case SPRN_DBAT0U ... SPRN_DBAT3L: 411 case SPRN_DBAT0U ... SPRN_DBAT3L:
435 case SPRN_DBAT4U ... SPRN_DBAT7L: 412 case SPRN_DBAT4U ... SPRN_DBAT7L:
436 kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn)); 413 {
414 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
415
416 if (sprn % 2)
417 kvmppc_set_gpr(vcpu, rt, bat->raw >> 32);
418 else
419 kvmppc_set_gpr(vcpu, rt, bat->raw);
420
437 break; 421 break;
422 }
438 case SPRN_SDR1: 423 case SPRN_SDR1:
439 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); 424 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
440 break; 425 break;
441 case SPRN_DSISR: 426 case SPRN_DSISR:
442 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); 427 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
443 break; 428 break;
444 case SPRN_DAR: 429 case SPRN_DAR:
445 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); 430 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar);
446 break; 431 break;
447 case SPRN_HIOR: 432 case SPRN_HIOR:
448 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); 433 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);