aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-03-11 21:26:30 -0400
committerAvi Kivity <avi@redhat.com>2012-04-08 07:01:37 -0400
commit6020c0f6e78888b6023559e9bf633ad0092a1709 (patch)
tree9756a19a254c6094adcabbd387e162880ef5ad77 /arch/powerpc
parent8943633cf9b87980d261a022e90d94bc2c55df35 (diff)
KVM: PPC: Pass EA to updating emulation ops
When emulating updating load/store instructions (lwzu, stwu, ...) we need to write the effective address of the load/store into a register. Currently, we write the physical address in there, which is very wrong. So instead let's save off where the virtual fault was on MMIO and use that information as value to put into the register. While at it, also move the XOP variants of the above instructions to the new scheme of using the already known vaddr instead of calculating it themselves. Reported-by: Jörg Sommer <joerg@alea.gnuu.de> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c5
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1
-rw-r--r--arch/powerpc/kvm/booke.c1
-rw-r--r--arch/powerpc/kvm/emulate.c39
5 files changed, 17 insertions, 30 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 014eaf27a239..42a527e70490 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -464,6 +464,7 @@ struct kvm_vcpu_arch {
464 u32 epr; 464 u32 epr;
465#endif 465#endif
466 gpa_t paddr_accessed; 466 gpa_t paddr_accessed;
467 gva_t vaddr_accessed;
467 468
468 u8 io_gpr; /* GPR used as IO source/target */ 469 u8 io_gpr; /* GPR used as IO source/target */
469 u8 mmio_is_bigendian; 470 u8 mmio_is_bigendian;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d031ce1d83f5..8e6401f2c16f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -447,7 +447,7 @@ static int instruction_is_store(unsigned int instr)
447} 447}
448 448
449static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, 449static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
450 unsigned long gpa, int is_store) 450 unsigned long gpa, gva_t ea, int is_store)
451{ 451{
452 int ret; 452 int ret;
453 u32 last_inst; 453 u32 last_inst;
@@ -494,6 +494,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
494 */ 494 */
495 495
496 vcpu->arch.paddr_accessed = gpa; 496 vcpu->arch.paddr_accessed = gpa;
497 vcpu->arch.vaddr_accessed = ea;
497 return kvmppc_emulate_mmio(run, vcpu); 498 return kvmppc_emulate_mmio(run, vcpu);
498} 499}
499 500
@@ -547,7 +548,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
547 /* No memslot means it's an emulated MMIO region */ 548 /* No memslot means it's an emulated MMIO region */
548 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { 549 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
549 unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); 550 unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
550 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, 551 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
551 dsisr & DSISR_ISSTORE); 552 dsisr & DSISR_ISSTORE);
552 } 553 }
553 554
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7759053d391b..158047fc9513 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -351,6 +351,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
351 /* MMIO */ 351 /* MMIO */
352 vcpu->stat.mmio_exits++; 352 vcpu->stat.mmio_exits++;
353 vcpu->arch.paddr_accessed = pte.raddr; 353 vcpu->arch.paddr_accessed = pte.raddr;
354 vcpu->arch.vaddr_accessed = pte.eaddr;
354 r = kvmppc_emulate_mmio(run, vcpu); 355 r = kvmppc_emulate_mmio(run, vcpu);
355 if ( r == RESUME_HOST_NV ) 356 if ( r == RESUME_HOST_NV )
356 r = RESUME_HOST; 357 r = RESUME_HOST;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 9f27258d8035..2675dcb40a7f 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -875,6 +875,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
875 /* Guest has mapped and accessed a page which is not 875 /* Guest has mapped and accessed a page which is not
876 * actually RAM. */ 876 * actually RAM. */
877 vcpu->arch.paddr_accessed = gpaddr; 877 vcpu->arch.paddr_accessed = gpaddr;
878 vcpu->arch.vaddr_accessed = eaddr;
878 r = kvmppc_emulate_mmio(run, vcpu); 879 r = kvmppc_emulate_mmio(run, vcpu);
879 kvmppc_account_exit(vcpu, MMIO_EXITS); 880 kvmppc_account_exit(vcpu, MMIO_EXITS);
880 } 881 }
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 968f40101883..e79a620608ab 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -141,7 +141,6 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
141int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 141int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
142{ 142{
143 u32 inst = kvmppc_get_last_inst(vcpu); 143 u32 inst = kvmppc_get_last_inst(vcpu);
144 u32 ea;
145 int ra; 144 int ra;
146 int rb; 145 int rb;
147 int rs; 146 int rs;
@@ -185,12 +184,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
185 ra = get_ra(inst); 184 ra = get_ra(inst);
186 rb = get_rb(inst); 185 rb = get_rb(inst);
187 186
188 ea = kvmppc_get_gpr(vcpu, rb);
189 if (ra)
190 ea += kvmppc_get_gpr(vcpu, ra);
191
192 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 187 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
193 kvmppc_set_gpr(vcpu, ra, ea); 188 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
194 break; 189 break;
195 190
196 case OP_31_XOP_STWX: 191 case OP_31_XOP_STWX:
@@ -212,14 +207,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
212 ra = get_ra(inst); 207 ra = get_ra(inst);
213 rb = get_rb(inst); 208 rb = get_rb(inst);
214 209
215 ea = kvmppc_get_gpr(vcpu, rb);
216 if (ra)
217 ea += kvmppc_get_gpr(vcpu, ra);
218
219 emulated = kvmppc_handle_store(run, vcpu, 210 emulated = kvmppc_handle_store(run, vcpu,
220 kvmppc_get_gpr(vcpu, rs), 211 kvmppc_get_gpr(vcpu, rs),
221 1, 1); 212 1, 1);
222 kvmppc_set_gpr(vcpu, rs, ea); 213 kvmppc_set_gpr(vcpu, rs, vcpu->arch.vaddr_accessed);
223 break; 214 break;
224 215
225 case OP_31_XOP_LHAX: 216 case OP_31_XOP_LHAX:
@@ -237,12 +228,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
237 ra = get_ra(inst); 228 ra = get_ra(inst);
238 rb = get_rb(inst); 229 rb = get_rb(inst);
239 230
240 ea = kvmppc_get_gpr(vcpu, rb);
241 if (ra)
242 ea += kvmppc_get_gpr(vcpu, ra);
243
244 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 231 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
245 kvmppc_set_gpr(vcpu, ra, ea); 232 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
246 break; 233 break;
247 234
248 case OP_31_XOP_MFSPR: 235 case OP_31_XOP_MFSPR:
@@ -318,14 +305,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
318 ra = get_ra(inst); 305 ra = get_ra(inst);
319 rb = get_rb(inst); 306 rb = get_rb(inst);
320 307
321 ea = kvmppc_get_gpr(vcpu, rb);
322 if (ra)
323 ea += kvmppc_get_gpr(vcpu, ra);
324
325 emulated = kvmppc_handle_store(run, vcpu, 308 emulated = kvmppc_handle_store(run, vcpu,
326 kvmppc_get_gpr(vcpu, rs), 309 kvmppc_get_gpr(vcpu, rs),
327 2, 1); 310 2, 1);
328 kvmppc_set_gpr(vcpu, ra, ea); 311 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
329 break; 312 break;
330 313
331 case OP_31_XOP_MTSPR: 314 case OP_31_XOP_MTSPR:
@@ -429,7 +412,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
429 ra = get_ra(inst); 412 ra = get_ra(inst);
430 rt = get_rt(inst); 413 rt = get_rt(inst);
431 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 414 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
432 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 415 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
433 break; 416 break;
434 417
435 case OP_LBZ: 418 case OP_LBZ:
@@ -441,7 +424,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
441 ra = get_ra(inst); 424 ra = get_ra(inst);
442 rt = get_rt(inst); 425 rt = get_rt(inst);
443 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 426 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
444 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 427 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
445 break; 428 break;
446 429
447 case OP_STW: 430 case OP_STW:
@@ -457,7 +440,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
457 emulated = kvmppc_handle_store(run, vcpu, 440 emulated = kvmppc_handle_store(run, vcpu,
458 kvmppc_get_gpr(vcpu, rs), 441 kvmppc_get_gpr(vcpu, rs),
459 4, 1); 442 4, 1);
460 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 443 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
461 break; 444 break;
462 445
463 case OP_STB: 446 case OP_STB:
@@ -473,7 +456,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
473 emulated = kvmppc_handle_store(run, vcpu, 456 emulated = kvmppc_handle_store(run, vcpu,
474 kvmppc_get_gpr(vcpu, rs), 457 kvmppc_get_gpr(vcpu, rs),
475 1, 1); 458 1, 1);
476 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 459 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
477 break; 460 break;
478 461
479 case OP_LHZ: 462 case OP_LHZ:
@@ -485,7 +468,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
485 ra = get_ra(inst); 468 ra = get_ra(inst);
486 rt = get_rt(inst); 469 rt = get_rt(inst);
487 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 470 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
488 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 471 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
489 break; 472 break;
490 473
491 case OP_LHA: 474 case OP_LHA:
@@ -497,7 +480,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
497 ra = get_ra(inst); 480 ra = get_ra(inst);
498 rt = get_rt(inst); 481 rt = get_rt(inst);
499 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 482 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
500 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 483 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
501 break; 484 break;
502 485
503 case OP_STH: 486 case OP_STH:
@@ -513,7 +496,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
513 emulated = kvmppc_handle_store(run, vcpu, 496 emulated = kvmppc_handle_store(run, vcpu,
514 kvmppc_get_gpr(vcpu, rs), 497 kvmppc_get_gpr(vcpu, rs),
515 2, 1); 498 2, 1);
516 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 499 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
517 break; 500 break;
518 501
519 default: 502 default: