aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVarun Sethi <Varun.Sethi@freescale.com>2012-04-24 21:26:43 -0400
committerAlexander Graf <agraf@suse.de>2012-05-06 10:19:08 -0400
commit185e4188dab6456409cad66c579501dd89487188 (patch)
treef9d57c7a37d12de21667fdd11312ee5559820c2b
parent6e35994d1f6831af1e5577e28c363c9137d7d597 (diff)
KVM: PPC: bookehv: Use a Macro for saving/restoring guest registers to/from their 64 bit copies.
Introduced PPC_STD/PPC_LD macros for saving/restoring guest registers to/from their 64 bit copies. Signed-off-by: Varun Sethi <Varun.Sethi@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h8
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S24
2 files changed, 12 insertions, 20 deletions
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 097815233284..7d4018dd0e11 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -20,6 +20,14 @@
20#ifndef __POWERPC_KVM_ASM_H__ 20#ifndef __POWERPC_KVM_ASM_H__
21#define __POWERPC_KVM_ASM_H__ 21#define __POWERPC_KVM_ASM_H__
22 22
23#ifdef CONFIG_64BIT
24#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
25#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
26#else
27#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
28#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
29#endif
30
23/* IVPR must be 64KiB-aligned. */ 31/* IVPR must be 64KiB-aligned. */
24#define VCPU_SIZE_ORDER 4 32#define VCPU_SIZE_ORDER 4
25#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) 33#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 909e96e0650c..41d34850f826 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -93,11 +93,7 @@
93#endif 93#endif
94 94
95 oris r8, r6, MSR_CE@h 95 oris r8, r6, MSR_CE@h
96#ifdef CONFIG_64BIT 96 PPC_STD(r6, VCPU_SHARED_MSR, r11)
97 std r6, (VCPU_SHARED_MSR)(r11)
98#else
99 stw r6, (VCPU_SHARED_MSR + 4)(r11)
100#endif
101 ori r8, r8, MSR_ME | MSR_RI 97 ori r8, r8, MSR_ME | MSR_RI
102 PPC_STL r5, VCPU_PC(r4) 98 PPC_STL r5, VCPU_PC(r4)
103 99
@@ -335,11 +331,7 @@ _GLOBAL(kvmppc_resume_host)
335 stw r5, VCPU_SHARED_MAS0(r11) 331 stw r5, VCPU_SHARED_MAS0(r11)
336 mfspr r7, SPRN_MAS2 332 mfspr r7, SPRN_MAS2
337 stw r6, VCPU_SHARED_MAS1(r11) 333 stw r6, VCPU_SHARED_MAS1(r11)
338#ifdef CONFIG_64BIT 334 PPC_STD(r7, VCPU_SHARED_MAS2, r11)
339 std r7, (VCPU_SHARED_MAS2)(r11)
340#else
341 stw r7, (VCPU_SHARED_MAS2 + 4)(r11)
342#endif
343 mfspr r5, SPRN_MAS3 335 mfspr r5, SPRN_MAS3
344 mfspr r6, SPRN_MAS4 336 mfspr r6, SPRN_MAS4
345 stw r5, VCPU_SHARED_MAS7_3+4(r11) 337 stw r5, VCPU_SHARED_MAS7_3+4(r11)
@@ -527,11 +519,7 @@ lightweight_exit:
527 stw r3, VCPU_HOST_MAS6(r4) 519 stw r3, VCPU_HOST_MAS6(r4)
528 lwz r3, VCPU_SHARED_MAS0(r11) 520 lwz r3, VCPU_SHARED_MAS0(r11)
529 lwz r5, VCPU_SHARED_MAS1(r11) 521 lwz r5, VCPU_SHARED_MAS1(r11)
530#ifdef CONFIG_64BIT 522 PPC_LD(r6, VCPU_SHARED_MAS2, r11)
531 ld r6, (VCPU_SHARED_MAS2)(r11)
532#else
533 lwz r6, (VCPU_SHARED_MAS2 + 4)(r11)
534#endif
535 lwz r7, VCPU_SHARED_MAS7_3+4(r11) 523 lwz r7, VCPU_SHARED_MAS7_3+4(r11)
536 lwz r8, VCPU_SHARED_MAS4(r11) 524 lwz r8, VCPU_SHARED_MAS4(r11)
537 mtspr SPRN_MAS0, r3 525 mtspr SPRN_MAS0, r3
@@ -565,11 +553,7 @@ lightweight_exit:
565 PPC_LL r6, VCPU_CTR(r4) 553 PPC_LL r6, VCPU_CTR(r4)
566 PPC_LL r7, VCPU_CR(r4) 554 PPC_LL r7, VCPU_CR(r4)
567 PPC_LL r8, VCPU_PC(r4) 555 PPC_LL r8, VCPU_PC(r4)
568#ifdef CONFIG_64BIT 556 PPC_LD(r9, VCPU_SHARED_MSR, r11)
569 ld r9, (VCPU_SHARED_MSR)(r11)
570#else
571 lwz r9, (VCPU_SHARED_MSR + 4)(r11)
572#endif
573 PPC_LL r0, VCPU_GPR(r0)(r4) 557 PPC_LL r0, VCPU_GPR(r0)(r4)
574 PPC_LL r1, VCPU_GPR(r1)(r4) 558 PPC_LL r1, VCPU_GPR(r1)(r4)
575 PPC_LL r2, VCPU_GPR(r2)(r4) 559 PPC_LL r2, VCPU_GPR(r2)(r4)