aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2009-01-03 17:23:01 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:02:56 -0400
commit58a96214a306fc7fc66105097eea9c4f3bfa35bc (patch)
tree2049ecd13795b80f1f0962bdbc38c9be3b17fd3a /arch
parent475e7cdd69101939006659a63c2e4a32d5b71389 (diff)
KVM: ppc: change kvmppc_mmu_map() parameters
Passing just the TLB index will ease an e500 implementation. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h1
-rw-r--r--arch/powerpc/kvm/44x_tlb.c15
-rw-r--r--arch/powerpc/kvm/booke.c6
3 files changed, 9 insertions, 13 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 7ba95d28b837..f661f8ba3ab8 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -55,7 +55,6 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
55/* Core-specific hooks */ 55/* Core-specific hooks */
56 56
57extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, 57extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
58 u64 asid, u32 flags, u32 max_bytes,
59 unsigned int gtlb_idx); 58 unsigned int gtlb_idx);
60extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 59extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
61extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); 60extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 8f9c09cbb833..e8ed22f28eae 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -269,15 +269,19 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
269 * Caller must ensure that the specified guest TLB entry is safe to insert into 269 * Caller must ensure that the specified guest TLB entry is safe to insert into
270 * the shadow TLB. 270 * the shadow TLB.
271 */ 271 */
272void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, 272void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
273 u32 flags, u32 max_bytes, unsigned int gtlb_index) 273 unsigned int gtlb_index)
274{ 274{
275 struct kvmppc_44x_tlbe stlbe; 275 struct kvmppc_44x_tlbe stlbe;
276 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 276 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
277 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
277 struct kvmppc_44x_shadow_ref *ref; 278 struct kvmppc_44x_shadow_ref *ref;
278 struct page *new_page; 279 struct page *new_page;
279 hpa_t hpaddr; 280 hpa_t hpaddr;
280 gfn_t gfn; 281 gfn_t gfn;
282 u32 asid = gtlbe->tid;
283 u32 flags = gtlbe->word2;
284 u32 max_bytes = get_tlb_bytes(gtlbe);
281 unsigned int victim; 285 unsigned int victim;
282 286
283 /* Select TLB entry to clobber. Indirectly guard against races with the TLB 287 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
@@ -448,10 +452,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
448 } 452 }
449 453
450 if (tlbe_is_host_safe(vcpu, tlbe)) { 454 if (tlbe_is_host_safe(vcpu, tlbe)) {
451 u64 asid;
452 gva_t eaddr; 455 gva_t eaddr;
453 gpa_t gpaddr; 456 gpa_t gpaddr;
454 u32 flags;
455 u32 bytes; 457 u32 bytes;
456 458
457 eaddr = get_tlb_eaddr(tlbe); 459 eaddr = get_tlb_eaddr(tlbe);
@@ -462,10 +464,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
462 eaddr &= ~(bytes - 1); 464 eaddr &= ~(bytes - 1);
463 gpaddr &= ~(bytes - 1); 465 gpaddr &= ~(bytes - 1);
464 466
465 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; 467 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
466 flags = tlbe->word2 & 0xffff;
467
468 kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index);
469 } 468 }
470 469
471 KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, 470 KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d196ae619303..85b9e2fc6c6b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -316,8 +316,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
316 * b) the guest used a large mapping which we're faking 316 * b) the guest used a large mapping which we're faking
317 * Either way, we need to satisfy the fault without 317 * Either way, we need to satisfy the fault without
318 * invoking the guest. */ 318 * invoking the guest. */
319 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, 319 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
320 gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
321 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); 320 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
322 r = RESUME_GUEST; 321 r = RESUME_GUEST;
323 } else { 322 } else {
@@ -364,8 +363,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
364 * b) the guest used a large mapping which we're faking 363 * b) the guest used a large mapping which we're faking
365 * Either way, we need to satisfy the fault without 364 * Either way, we need to satisfy the fault without
366 * invoking the guest. */ 365 * invoking the guest. */
367 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, 366 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
368 gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
369 } else { 367 } else {
370 /* Guest mapped and leaped at non-RAM! */ 368 /* Guest mapped and leaped at non-RAM! */
371 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); 369 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);