aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2014-06-11 04:34:19 -0400
committerAlexander Graf <agraf@suse.de>2014-07-28 09:22:22 -0400
commit02407552256111479fbfd23a3e01218b399aaa35 (patch)
treea1a198730d3eaf1fef4d7b8fd6741482a76004fa /arch
parent6f22bd3265fb542acb2697026b953ec07298242d (diff)
KVM: PPC: Book3S HV: Access guest VPA in BE
There are a few shared data structures between the host and the guest. Most of them get registered through the VPA interface. These data structures are defined to always be in big endian byte order, so let's make sure we always access them in big endian. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c22
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c6
2 files changed, 14 insertions, 14 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7db9df2ac211..f1281c4c381c 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -272,7 +272,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
272static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) 272static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
273{ 273{
274 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; 274 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
275 vpa->yield_count = 1; 275 vpa->yield_count = cpu_to_be32(1);
276} 276}
277 277
278static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, 278static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
@@ -295,8 +295,8 @@ static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
295struct reg_vpa { 295struct reg_vpa {
296 u32 dummy; 296 u32 dummy;
297 union { 297 union {
298 u16 hword; 298 __be16 hword;
299 u32 word; 299 __be32 word;
300 } length; 300 } length;
301}; 301};
302 302
@@ -335,9 +335,9 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
335 if (va == NULL) 335 if (va == NULL)
336 return H_PARAMETER; 336 return H_PARAMETER;
337 if (subfunc == H_VPA_REG_VPA) 337 if (subfunc == H_VPA_REG_VPA)
338 len = ((struct reg_vpa *)va)->length.hword; 338 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
339 else 339 else
340 len = ((struct reg_vpa *)va)->length.word; 340 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
341 kvmppc_unpin_guest_page(kvm, va, vpa, false); 341 kvmppc_unpin_guest_page(kvm, va, vpa, false);
342 342
343 /* Check length */ 343 /* Check length */
@@ -542,18 +542,18 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
542 return; 542 return;
543 memset(dt, 0, sizeof(struct dtl_entry)); 543 memset(dt, 0, sizeof(struct dtl_entry));
544 dt->dispatch_reason = 7; 544 dt->dispatch_reason = 7;
545 dt->processor_id = vc->pcpu + vcpu->arch.ptid; 545 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
546 dt->timebase = now + vc->tb_offset; 546 dt->timebase = cpu_to_be64(now + vc->tb_offset);
547 dt->enqueue_to_dispatch_time = stolen; 547 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
548 dt->srr0 = kvmppc_get_pc(vcpu); 548 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
549 dt->srr1 = vcpu->arch.shregs.msr; 549 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
550 ++dt; 550 ++dt;
551 if (dt == vcpu->arch.dtl.pinned_end) 551 if (dt == vcpu->arch.dtl.pinned_end)
552 dt = vcpu->arch.dtl.pinned_addr; 552 dt = vcpu->arch.dtl.pinned_addr;
553 vcpu->arch.dtl_ptr = dt; 553 vcpu->arch.dtl_ptr = dt;
554 /* order writing *dt vs. writing vpa->dtl_idx */ 554 /* order writing *dt vs. writing vpa->dtl_idx */
555 smp_wmb(); 555 smp_wmb();
556 vpa->dtl_idx = ++vcpu->arch.dtl_index; 556 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
557 vcpu->arch.dtl.dirty = true; 557 vcpu->arch.dtl.dirty = true;
558} 558}
559 559
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 3a5c568b1e89..d562c8e2bc30 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -45,14 +45,14 @@ static void reload_slb(struct kvm_vcpu *vcpu)
45 return; 45 return;
46 46
47 /* Sanity check */ 47 /* Sanity check */
48 n = min_t(u32, slb->persistent, SLB_MIN_SIZE); 48 n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
49 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) 49 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
50 return; 50 return;
51 51
52 /* Load up the SLB from that */ 52 /* Load up the SLB from that */
53 for (i = 0; i < n; ++i) { 53 for (i = 0; i < n; ++i) {
54 unsigned long rb = slb->save_area[i].esid; 54 unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
55 unsigned long rs = slb->save_area[i].vsid; 55 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
56 56
57 rb = (rb & ~0xFFFul) | i; /* insert entry number */ 57 rb = (rb & ~0xFFFul) | i; /* insert entry number */
58 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); 58 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));