aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/powerpc.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-07 20:58:01 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:47 -0500
commit8e5b26b55a8b6aee2c789b1d20ec715f9e4bea5c (patch)
tree4e2d003852ce327a47153b6c100239c6d8e1418f /arch/powerpc/kvm/powerpc.c
parent0d178975d0a5afe5e0fd3211bd1397905b225be5 (diff)
KVM: PPC: Use accessor functions for GPR access
All code in PPC KVM currently accesses gprs in the vcpu struct directly. While there's nothing wrong with that wrt the current way gprs are stored and loaded, it doesn't suffice for the PACA acceleration that will follow in this patchset. So let's just create little wrapper inline functions that we call whenever a GPR needs to be read from or written to. The compiled code shouldn't really change at all for now. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r--arch/powerpc/kvm/powerpc.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 4633e7850dd2..2c291161df89 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -270,34 +270,35 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
270static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 270static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
271 struct kvm_run *run) 271 struct kvm_run *run)
272{ 272{
273 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 273 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
274 *gpr = run->dcr.data;
275} 274}
276 275
277static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 276static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
278 struct kvm_run *run) 277 struct kvm_run *run)
279{ 278{
280 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 279 ulong gpr;
281 280
282 if (run->mmio.len > sizeof(*gpr)) { 281 if (run->mmio.len > sizeof(gpr)) {
283 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 282 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
284 return; 283 return;
285 } 284 }
286 285
287 if (vcpu->arch.mmio_is_bigendian) { 286 if (vcpu->arch.mmio_is_bigendian) {
288 switch (run->mmio.len) { 287 switch (run->mmio.len) {
289 case 4: *gpr = *(u32 *)run->mmio.data; break; 288 case 4: gpr = *(u32 *)run->mmio.data; break;
290 case 2: *gpr = *(u16 *)run->mmio.data; break; 289 case 2: gpr = *(u16 *)run->mmio.data; break;
291 case 1: *gpr = *(u8 *)run->mmio.data; break; 290 case 1: gpr = *(u8 *)run->mmio.data; break;
292 } 291 }
293 } else { 292 } else {
294 /* Convert BE data from userland back to LE. */ 293 /* Convert BE data from userland back to LE. */
295 switch (run->mmio.len) { 294 switch (run->mmio.len) {
296 case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; 295 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
297 case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; 296 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
298 case 1: *gpr = *(u8 *)run->mmio.data; break; 297 case 1: gpr = *(u8 *)run->mmio.data; break;
299 } 298 }
300 } 299 }
300
301 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
301} 302}
302 303
303int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 304int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,