aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-07 20:58:03 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:48 -0500
commit7e57cba06074da84d7c24d8c3f44040d2d8c88ac (patch)
treee99519ca301b887de4d689421e7a954f1c7792e4 /arch/powerpc
parent992b5b29b5ae254c416c62faf98d59a6cf970027 (diff)
KVM: PPC: Use PACA backed shadow vcpu
We're being horribly racy right now. All the entry and exit code hijacks random fields from the PACA that could easily be used by different code in case we get interrupted, for example by a #MC or even page fault. After discussing this with Ben, we figured it's best to reserve some more space in the PACA and just shove off some vcpu state to there. That way we can drastically improve the readability of the code, make it less racy and less complex. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64_asm.h19
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h20
-rw-r--r--arch/powerpc/include/asm/paca.h5
-rw-r--r--arch/powerpc/kernel/asm-offsets.c33
-rw-r--r--arch/powerpc/kvm/book3s.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_interrupts.S216
-rw-r--r--arch/powerpc/kvm/book3s_64_rmhandlers.S32
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S150
10 files changed, 249 insertions, 237 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 74b7369770d0..f192017d799d 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -23,6 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25#include <asm/kvm_ppc.h> 25#include <asm/kvm_ppc.h>
26#include <asm/kvm_book3s_64_asm.h>
26 27
27struct kvmppc_slb { 28struct kvmppc_slb {
28 u64 esid; 29 u64 esid;
@@ -69,6 +70,7 @@ struct kvmppc_sid_map {
69 70
70struct kvmppc_vcpu_book3s { 71struct kvmppc_vcpu_book3s {
71 struct kvm_vcpu vcpu; 72 struct kvm_vcpu vcpu;
73 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
72 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 74 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
73 struct kvmppc_slb slb[64]; 75 struct kvmppc_slb slb[64];
74 struct { 76 struct {
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
index 2e06ee8184ef..fca9404c1a7d 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
@@ -20,6 +20,8 @@
20#ifndef __ASM_KVM_BOOK3S_ASM_H__ 20#ifndef __ASM_KVM_BOOK3S_ASM_H__
21#define __ASM_KVM_BOOK3S_ASM_H__ 21#define __ASM_KVM_BOOK3S_ASM_H__
22 22
23#ifdef __ASSEMBLY__
24
23#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 25#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
24 26
25#include <asm/kvm_asm.h> 27#include <asm/kvm_asm.h>
@@ -55,4 +57,21 @@ kvmppc_resume_\intno:
55 57
56#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */ 58#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
57 59
60#else /*__ASSEMBLY__ */
61
62struct kvmppc_book3s_shadow_vcpu {
63 ulong gpr[14];
64 u32 cr;
65 u32 xer;
66 ulong host_r1;
67 ulong host_r2;
68 ulong handler;
69 ulong scratch0;
70 ulong scratch1;
71 ulong vmhandler;
72 ulong rmhandler;
73};
74
75#endif /*__ASSEMBLY__ */
76
58#endif /* __ASM_KVM_BOOK3S_ASM_H__ */ 77#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1201f62d0d73..d615fa8a1412 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -175,10 +175,13 @@ struct kvm_vcpu_arch {
175 ulong gpr[32]; 175 ulong gpr[32];
176 176
177 ulong pc; 177 ulong pc;
178 u32 cr;
179 ulong ctr; 178 ulong ctr;
180 ulong lr; 179 ulong lr;
180
181#ifdef CONFIG_BOOKE
181 ulong xer; 182 ulong xer;
183 u32 cr;
184#endif
182 185
183 ulong msr; 186 ulong msr;
184#ifdef CONFIG_PPC64 187#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index d60b2f0cdcf2..89c5d79c3479 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -98,34 +98,42 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
98 98
99#ifdef CONFIG_PPC_BOOK3S 99#ifdef CONFIG_PPC_BOOK3S
100 100
101/* We assume we're always acting on the current vcpu */
102
101static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 103static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
102{ 104{
103 vcpu->arch.gpr[num] = val; 105 if ( num < 14 )
106 get_paca()->shadow_vcpu.gpr[num] = val;
107 else
108 vcpu->arch.gpr[num] = val;
104} 109}
105 110
106static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 111static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
107{ 112{
108 return vcpu->arch.gpr[num]; 113 if ( num < 14 )
114 return get_paca()->shadow_vcpu.gpr[num];
115 else
116 return vcpu->arch.gpr[num];
109} 117}
110 118
111static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 119static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
112{ 120{
113 vcpu->arch.cr = val; 121 get_paca()->shadow_vcpu.cr = val;
114} 122}
115 123
116static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 124static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
117{ 125{
118 return vcpu->arch.cr; 126 return get_paca()->shadow_vcpu.cr;
119} 127}
120 128
121static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) 129static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
122{ 130{
123 vcpu->arch.xer = val; 131 get_paca()->shadow_vcpu.xer = val;
124} 132}
125 133
126static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) 134static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
127{ 135{
128 return vcpu->arch.xer; 136 return get_paca()->shadow_vcpu.xer;
129} 137}
130 138
131#else 139#else
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 5e9b4ef71415..d8a693109c82 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -19,6 +19,9 @@
19#include <asm/mmu.h> 19#include <asm/mmu.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/exception-64e.h> 21#include <asm/exception-64e.h>
22#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
23#include <asm/kvm_book3s_64_asm.h>
24#endif
22 25
23register struct paca_struct *local_paca asm("r13"); 26register struct paca_struct *local_paca asm("r13");
24 27
@@ -135,6 +138,8 @@ struct paca_struct {
135 u64 esid; 138 u64 esid;
136 u64 vsid; 139 u64 vsid;
137 } kvm_slb[64]; /* guest SLB */ 140 } kvm_slb[64]; /* guest SLB */
141 /* We use this to store guest state in */
142 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
138 u8 kvm_slb_max; /* highest used guest slb entry */ 143 u8 kvm_slb_max; /* highest used guest slb entry */
139 u8 kvm_in_guest; /* are we inside the guest? */ 144 u8 kvm_in_guest; /* are we inside the guest? */
140#endif 145#endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a6c2b63227b3..1501e77c980c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -194,6 +194,32 @@ int main(void)
194 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); 194 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
195 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); 195 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
196 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); 196 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
197 DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
198 DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
199 DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
200 DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
201 DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
202 DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
203 DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
204 DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
205 DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
206 DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
207 DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
208 DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
209 DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
210 DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
211 DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
212 DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
213 DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
214 DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
215 DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
216 shadow_vcpu.vmhandler));
217 DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct,
218 shadow_vcpu.rmhandler));
219 DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
220 shadow_vcpu.scratch0));
221 DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
222 shadow_vcpu.scratch1));
197#endif 223#endif
198#endif /* CONFIG_PPC64 */ 224#endif /* CONFIG_PPC64 */
199 225
@@ -389,8 +415,6 @@ int main(void)
389 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 415 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
390 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 416 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
391 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 417 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
392 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
393 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
394 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); 418 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
395 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); 419 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
396 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); 420 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
@@ -415,7 +439,10 @@ int main(void)
415 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 439 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
416 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 440 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
417 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 441 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
418#endif 442#else
443 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
444 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
445#endif /* CONFIG_PPC64 */
419#endif 446#endif
420#ifdef CONFIG_44x 447#ifdef CONFIG_44x
421 DEFINE(PGD_T_LOG2, PGD_T_LOG2); 448 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 09ba8dbaabab..3e06eae3f2c8 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -66,12 +66,16 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
66void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 66void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
67{ 67{
68 memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); 68 memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
69 memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
70 sizeof(get_paca()->shadow_vcpu));
69 get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; 71 get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
70} 72}
71 73
72void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 74void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
73{ 75{
74 memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); 76 memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
77 memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
78 sizeof(get_paca()->shadow_vcpu));
75 to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; 79 to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
76} 80}
77 81
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
index d95d0d967d56..66e3b1179b32 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_64_interrupts.S
@@ -28,11 +28,6 @@
28#define ULONG_SIZE 8 28#define ULONG_SIZE 8
29#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 29#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
30 30
31.macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
32 ld \tmp_reg, (PACA_EXMC+\offset)(r13)
33 std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
34.endm
35
36.macro DISABLE_INTERRUPTS 31.macro DISABLE_INTERRUPTS
37 mfmsr r0 32 mfmsr r0
38 rldicl r0,r0,48,1 33 rldicl r0,r0,48,1
@@ -92,37 +87,30 @@ kvm_start_entry:
92 /* Load non-volatile guest state from the vcpu */ 87 /* Load non-volatile guest state from the vcpu */
93 VCPU_LOAD_NVGPRS(r4) 88 VCPU_LOAD_NVGPRS(r4)
94 89
95kvm_start_lightweight:
96
97 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
98 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
99
100 DISABLE_INTERRUPTS
101
102 /* Save R1/R2 in the PACA */ 90 /* Save R1/R2 in the PACA */
103 std r1, PACAR1(r13) 91 std r1, PACA_KVM_HOST_R1(r13)
104 std r2, (PACA_EXMC+EX_SRR0)(r13) 92 std r2, PACA_KVM_HOST_R2(r13)
93
94 /* XXX swap in/out on load? */
105 ld r3, VCPU_HIGHMEM_HANDLER(r4) 95 ld r3, VCPU_HIGHMEM_HANDLER(r4)
106 std r3, PACASAVEDMSR(r13) 96 std r3, PACA_KVM_VMHANDLER(r13)
107 97
108 ld r3, VCPU_TRAMPOLINE_ENTER(r4) 98 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
109 mtsrr0 r3 99 std r3, PACA_KVM_RMHANDLER(r13)
110 100
111 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 101kvm_start_lightweight:
112 mtsrr1 r3
113 102
114 /* Load guest state in the respective registers */ 103 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
115 lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */ 104 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
116 stw r3, (PACA_EXMC + EX_CCR)(r13)
117 105
106 /* Load some guest state in the respective registers */
118 ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ 107 ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
119 mtctr r3 /* CTR = r3 */ 108 mtctr r3 /* CTR = r3 */
120 109
121 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ 110 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
122 mtlr r3 /* LR = r3 */ 111 mtlr r3 /* LR = r3 */
123 112
124 ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */ 113 DISABLE_INTERRUPTS
125 std r3, (PACA_EXMC + EX_R3)(r13)
126 114
127 /* Some guests may need to have dcbz set to 32 byte length. 115 /* Some guests may need to have dcbz set to 32 byte length.
128 * 116 *
@@ -142,34 +130,21 @@ kvm_start_lightweight:
142 mtspr SPRN_HID5,r3 130 mtspr SPRN_HID5,r3
143 131
144no_dcbz32_on: 132no_dcbz32_on:
145 /* Load guest GPRs */
146
147 ld r3, VCPU_GPR(r9)(r4)
148 std r3, (PACA_EXMC + EX_R9)(r13)
149 ld r3, VCPU_GPR(r10)(r4)
150 std r3, (PACA_EXMC + EX_R10)(r13)
151 ld r3, VCPU_GPR(r11)(r4)
152 std r3, (PACA_EXMC + EX_R11)(r13)
153 ld r3, VCPU_GPR(r12)(r4)
154 std r3, (PACA_EXMC + EX_R12)(r13)
155 ld r3, VCPU_GPR(r13)(r4)
156 std r3, (PACA_EXMC + EX_R13)(r13)
157
158 ld r0, VCPU_GPR(r0)(r4)
159 ld r1, VCPU_GPR(r1)(r4)
160 ld r2, VCPU_GPR(r2)(r4)
161 ld r3, VCPU_GPR(r3)(r4)
162 ld r5, VCPU_GPR(r5)(r4)
163 ld r6, VCPU_GPR(r6)(r4)
164 ld r7, VCPU_GPR(r7)(r4)
165 ld r8, VCPU_GPR(r8)(r4)
166 ld r4, VCPU_GPR(r4)(r4)
167 133
168 /* This sets the Magic value for the trampoline */ 134 /* This sets the Magic value for the trampoline */
169 135
136 /* XXX this needs to move into a safe function, so we can
137 be sure we don't get any interrupts */
138
170 li r11, 1 139 li r11, 1
171 stb r11, PACA_KVM_IN_GUEST(r13) 140 stb r11, PACA_KVM_IN_GUEST(r13)
172 141
142 ld r3, PACA_KVM_RMHANDLER(r13)
143 mtsrr0 r3
144
145 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
146 mtsrr1 r3
147
173 /* Jump to SLB patching handlder and into our guest */ 148 /* Jump to SLB patching handlder and into our guest */
174 RFI 149 RFI
175 150
@@ -185,60 +160,31 @@ kvmppc_handler_highmem:
185 /* 160 /*
186 * Register usage at this point: 161 * Register usage at this point:
187 * 162 *
188 * R00 = guest R13 163 * R0 = guest last inst
189 * R01 = host R1 164 * R1 = host R1
190 * R02 = host R2 165 * R2 = host R2
191 * R10 = guest PC 166 * R3 = guest PC
192 * R11 = guest MSR 167 * R4 = guest MSR
193 * R12 = exit handler id 168 * R5 = guest DAR
194 * R13 = PACA 169 * R6 = guest DSISR
195 * PACA.exmc.R9 = guest R1 170 * R13 = PACA
196 * PACA.exmc.R10 = guest R10 171 * PACA.KVM.* = guest *
197 * PACA.exmc.R11 = guest R11
198 * PACA.exmc.R12 = guest R12
199 * PACA.exmc.R13 = guest R2
200 * PACA.exmc.DAR = guest DAR
201 * PACA.exmc.DSISR = guest DSISR
202 * PACA.exmc.LR = guest instruction
203 * PACA.exmc.CCR = guest CR
204 * PACA.exmc.SRR0 = guest R0
205 * 172 *
206 */ 173 */
207 174
208 std r3, (PACA_EXMC+EX_R3)(r13) 175 /* R7 = vcpu */
176 ld r7, GPR4(r1)
209 177
210 /* save the exit id in R3 */ 178 /* Now save the guest state */
211 mr r3, r12
212 179
213 /* R12 = vcpu */ 180 stw r0, VCPU_LAST_INST(r7)
214 ld r12, GPR4(r1)
215 181
216 /* Now save the guest state */ 182 std r3, VCPU_PC(r7)
183 std r4, VCPU_SHADOW_MSR(r7)
184 std r5, VCPU_FAULT_DEAR(r7)
185 std r6, VCPU_FAULT_DSISR(r7)
217 186
218 std r0, VCPU_GPR(r13)(r12) 187 ld r5, VCPU_HFLAGS(r7)
219 std r4, VCPU_GPR(r4)(r12)
220 std r5, VCPU_GPR(r5)(r12)
221 std r6, VCPU_GPR(r6)(r12)
222 std r7, VCPU_GPR(r7)(r12)
223 std r8, VCPU_GPR(r8)(r12)
224 std r9, VCPU_GPR(r9)(r12)
225
226 /* get registers from PACA */
227 mfpaca r5, r0, EX_SRR0, r12
228 mfpaca r5, r3, EX_R3, r12
229 mfpaca r5, r1, EX_R9, r12
230 mfpaca r5, r10, EX_R10, r12
231 mfpaca r5, r11, EX_R11, r12
232 mfpaca r5, r12, EX_R12, r12
233 mfpaca r5, r2, EX_R13, r12
234
235 lwz r5, (PACA_EXMC+EX_LR)(r13)
236 stw r5, VCPU_LAST_INST(r12)
237
238 lwz r5, (PACA_EXMC+EX_CCR)(r13)
239 stw r5, VCPU_CR(r12)
240
241 ld r5, VCPU_HFLAGS(r12)
242 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ 188 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
243 beq no_dcbz32_off 189 beq no_dcbz32_off
244 190
@@ -248,58 +194,42 @@ kvmppc_handler_highmem:
248 194
249no_dcbz32_off: 195no_dcbz32_off:
250 196
251 std r14, VCPU_GPR(r14)(r12) 197 std r14, VCPU_GPR(r14)(r7)
252 std r15, VCPU_GPR(r15)(r12) 198 std r15, VCPU_GPR(r15)(r7)
253 std r16, VCPU_GPR(r16)(r12) 199 std r16, VCPU_GPR(r16)(r7)
254 std r17, VCPU_GPR(r17)(r12) 200 std r17, VCPU_GPR(r17)(r7)
255 std r18, VCPU_GPR(r18)(r12) 201 std r18, VCPU_GPR(r18)(r7)
256 std r19, VCPU_GPR(r19)(r12) 202 std r19, VCPU_GPR(r19)(r7)
257 std r20, VCPU_GPR(r20)(r12) 203 std r20, VCPU_GPR(r20)(r7)
258 std r21, VCPU_GPR(r21)(r12) 204 std r21, VCPU_GPR(r21)(r7)
259 std r22, VCPU_GPR(r22)(r12) 205 std r22, VCPU_GPR(r22)(r7)
260 std r23, VCPU_GPR(r23)(r12) 206 std r23, VCPU_GPR(r23)(r7)
261 std r24, VCPU_GPR(r24)(r12) 207 std r24, VCPU_GPR(r24)(r7)
262 std r25, VCPU_GPR(r25)(r12) 208 std r25, VCPU_GPR(r25)(r7)
263 std r26, VCPU_GPR(r26)(r12) 209 std r26, VCPU_GPR(r26)(r7)
264 std r27, VCPU_GPR(r27)(r12) 210 std r27, VCPU_GPR(r27)(r7)
265 std r28, VCPU_GPR(r28)(r12) 211 std r28, VCPU_GPR(r28)(r7)
266 std r29, VCPU_GPR(r29)(r12) 212 std r29, VCPU_GPR(r29)(r7)
267 std r30, VCPU_GPR(r30)(r12) 213 std r30, VCPU_GPR(r30)(r7)
268 std r31, VCPU_GPR(r31)(r12) 214 std r31, VCPU_GPR(r31)(r7)
269 215
270 /* Save guest PC (R10) */ 216 /* Save guest CTR */
271 std r10, VCPU_PC(r12)
272
273 /* Save guest msr (R11) */
274 std r11, VCPU_SHADOW_MSR(r12)
275
276 /* Save guest CTR (in R12) */
277 mfctr r5 217 mfctr r5
278 std r5, VCPU_CTR(r12) 218 std r5, VCPU_CTR(r7)
279 219
280 /* Save guest LR */ 220 /* Save guest LR */
281 mflr r5 221 mflr r5
282 std r5, VCPU_LR(r12) 222 std r5, VCPU_LR(r7)
283
284 /* Save guest XER */
285 mfxer r5
286 std r5, VCPU_XER(r12)
287 223
288 /* Save guest DAR */ 224 /* XXX convert to safe function call */
289 ld r5, (PACA_EXMC+EX_DAR)(r13)
290 std r5, VCPU_FAULT_DEAR(r12)
291
292 /* Save guest DSISR */
293 lwz r5, (PACA_EXMC+EX_DSISR)(r13)
294 std r5, VCPU_FAULT_DSISR(r12)
295 225
296 /* Restore host msr -> SRR1 */ 226 /* Restore host msr -> SRR1 */
297 ld r7, VCPU_HOST_MSR(r12) 227 ld r6, VCPU_HOST_MSR(r7)
298 mtsrr1 r7 228 mtsrr1 r6
299 229
300 /* Restore host IP -> SRR0 */ 230 /* Restore host IP -> SRR0 */
301 ld r6, VCPU_HOST_RETIP(r12) 231 ld r5, VCPU_HOST_RETIP(r7)
302 mtsrr0 r6 232 mtsrr0 r5
303 233
304 /* 234 /*
305 * For some interrupts, we need to call the real Linux 235 * For some interrupts, we need to call the real Linux
@@ -311,9 +241,9 @@ no_dcbz32_off:
311 * r3 = address of interrupt handler (exit reason) 241 * r3 = address of interrupt handler (exit reason)
312 */ 242 */
313 243
314 cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL 244 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
315 beq call_linux_handler 245 beq call_linux_handler
316 cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER 246 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
317 beq call_linux_handler 247 beq call_linux_handler
318 248
319 /* Back to Interruptable Mode! (goto kvm_return_point) */ 249 /* Back to Interruptable Mode! (goto kvm_return_point) */
@@ -334,12 +264,12 @@ call_linux_handler:
334 * R7 VCPU_HOST_MSR 264 * R7 VCPU_HOST_MSR
335 */ 265 */
336 266
337 mtlr r3 267 mtlr r12
338 268
339 ld r5, VCPU_TRAMPOLINE_LOWMEM(r12) 269 ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
340 mtsrr0 r5 270 mtsrr0 r4
341 LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR)) 271 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
342 mtsrr1 r5 272 mtsrr1 r3
343 273
344 RFI 274 RFI
345 275
@@ -350,7 +280,7 @@ kvm_return_point:
350 /* go back into the guest */ 280 /* go back into the guest */
351 281
352 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 282 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
353 mr r5, r3 283 mr r5, r12
354 284
355 /* Restore r3 (kvm_run) and r4 (vcpu) */ 285 /* Restore r3 (kvm_run) and r4 (vcpu) */
356 REST_2GPRS(3, r1) 286 REST_2GPRS(3, r1)
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S
index fb7dd2e9ac88..cd9f0b609e48 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S
@@ -45,37 +45,21 @@ kvmppc_trampoline_\intno:
45 * To distinguish, we check a magic byte in the PACA 45 * To distinguish, we check a magic byte in the PACA
46 */ 46 */
47 mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ 47 mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
48 std r12, (PACA_EXMC + EX_R12)(r13) 48 std r12, PACA_KVM_SCRATCH0(r13)
49 mfcr r12 49 mfcr r12
50 stw r12, (PACA_EXMC + EX_CCR)(r13) 50 stw r12, PACA_KVM_SCRATCH1(r13)
51 lbz r12, PACA_KVM_IN_GUEST(r13) 51 lbz r12, PACA_KVM_IN_GUEST(r13)
52 cmpwi r12, 0 52 cmpwi r12, 0
53 bne ..kvmppc_handler_hasmagic_\intno 53 bne ..kvmppc_handler_hasmagic_\intno
54 /* No KVM guest? Then jump back to the Linux handler! */ 54 /* No KVM guest? Then jump back to the Linux handler! */
55 lwz r12, (PACA_EXMC + EX_CCR)(r13) 55 lwz r12, PACA_KVM_SCRATCH1(r13)
56 mtcr r12 56 mtcr r12
57 ld r12, (PACA_EXMC + EX_R12)(r13) 57 ld r12, PACA_KVM_SCRATCH0(r13)
58 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ 58 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
59 b kvmppc_resume_\intno /* Get back original handler */ 59 b kvmppc_resume_\intno /* Get back original handler */
60 60
61 /* Now we know we're handling a KVM guest */ 61 /* Now we know we're handling a KVM guest */
62..kvmppc_handler_hasmagic_\intno: 62..kvmppc_handler_hasmagic_\intno:
63 /* Unset guest state */
64 li r12, 0
65 stb r12, PACA_KVM_IN_GUEST(r13)
66
67 std r1, (PACA_EXMC+EX_R9)(r13)
68 std r10, (PACA_EXMC+EX_R10)(r13)
69 std r11, (PACA_EXMC+EX_R11)(r13)
70 std r2, (PACA_EXMC+EX_R13)(r13)
71
72 mfsrr0 r10
73 mfsrr1 r11
74
75 /* Restore R1/R2 so we can handle faults */
76 ld r1, PACAR1(r13)
77 ld r2, (PACA_EXMC+EX_SRR0)(r13)
78
79 /* Let's store which interrupt we're handling */ 63 /* Let's store which interrupt we're handling */
80 li r12, \intno 64 li r12, \intno
81 65
@@ -106,16 +90,16 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
106 * 90 *
107 * Input Registers: 91 * Input Registers:
108 * 92 *
109 * R6 = SRR0 93 * R5 = SRR0
110 * R7 = SRR1 94 * R6 = SRR1
111 * LR = real-mode IP 95 * LR = real-mode IP
112 * 96 *
113 */ 97 */
114.global kvmppc_handler_lowmem_trampoline 98.global kvmppc_handler_lowmem_trampoline
115kvmppc_handler_lowmem_trampoline: 99kvmppc_handler_lowmem_trampoline:
116 100
117 mtsrr0 r6 101 mtsrr0 r5
118 mtsrr1 r7 102 mtsrr1 r6
119 blr 103 blr
120kvmppc_handler_lowmem_trampoline_end: 104kvmppc_handler_lowmem_trampoline_end:
121 105
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 8e4478866669..7188c11ed7d1 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -51,24 +51,18 @@ kvmppc_handler_trampoline_enter:
51 * 51 *
52 * MSR = ~IR|DR 52 * MSR = ~IR|DR
53 * R13 = PACA 53 * R13 = PACA
54 * R1 = host R1
55 * R2 = host R2
54 * R9 = guest IP 56 * R9 = guest IP
55 * R10 = guest MSR 57 * R10 = guest MSR
56 * R11 = free 58 * all other GPRS = free
57 * R12 = free 59 * PACA[KVM_CR] = guest CR
58 * PACA[PACA_EXMC + EX_R9] = guest R9 60 * PACA[KVM_XER] = guest XER
59 * PACA[PACA_EXMC + EX_R10] = guest R10
60 * PACA[PACA_EXMC + EX_R11] = guest R11
61 * PACA[PACA_EXMC + EX_R12] = guest R12
62 * PACA[PACA_EXMC + EX_R13] = guest R13
63 * PACA[PACA_EXMC + EX_CCR] = guest CR
64 * PACA[PACA_EXMC + EX_R3] = guest XER
65 */ 61 */
66 62
67 mtsrr0 r9 63 mtsrr0 r9
68 mtsrr1 r10 64 mtsrr1 r10
69 65
70 mtspr SPRN_SPRG_SCRATCH0, r0
71
72 /* Remove LPAR shadow entries */ 66 /* Remove LPAR shadow entries */
73 67
74#if SLB_NUM_BOLTED == 3 68#if SLB_NUM_BOLTED == 3
@@ -131,20 +125,27 @@ slb_do_enter:
131 125
132 /* Enter guest */ 126 /* Enter guest */
133 127
134 mfspr r0, SPRN_SPRG_SCRATCH0 128 ld r0, (PACA_KVM_R0)(r13)
135 129 ld r1, (PACA_KVM_R1)(r13)
136 ld r9, (PACA_EXMC+EX_R9)(r13) 130 ld r2, (PACA_KVM_R2)(r13)
137 ld r10, (PACA_EXMC+EX_R10)(r13) 131 ld r3, (PACA_KVM_R3)(r13)
138 ld r12, (PACA_EXMC+EX_R12)(r13) 132 ld r4, (PACA_KVM_R4)(r13)
139 133 ld r5, (PACA_KVM_R5)(r13)
140 lwz r11, (PACA_EXMC+EX_CCR)(r13) 134 ld r6, (PACA_KVM_R6)(r13)
135 ld r7, (PACA_KVM_R7)(r13)
136 ld r8, (PACA_KVM_R8)(r13)
137 ld r9, (PACA_KVM_R9)(r13)
138 ld r10, (PACA_KVM_R10)(r13)
139 ld r12, (PACA_KVM_R12)(r13)
140
141 lwz r11, (PACA_KVM_CR)(r13)
141 mtcr r11 142 mtcr r11
142 143
143 ld r11, (PACA_EXMC+EX_R3)(r13) 144 ld r11, (PACA_KVM_XER)(r13)
144 mtxer r11 145 mtxer r11
145 146
146 ld r11, (PACA_EXMC+EX_R11)(r13) 147 ld r11, (PACA_KVM_R11)(r13)
147 ld r13, (PACA_EXMC+EX_R13)(r13) 148 ld r13, (PACA_KVM_R13)(r13)
148 149
149 RFI 150 RFI
150kvmppc_handler_trampoline_enter_end: 151kvmppc_handler_trampoline_enter_end:
@@ -162,28 +163,58 @@ kvmppc_handler_trampoline_exit:
162 163
163 /* Register usage at this point: 164 /* Register usage at this point:
164 * 165 *
165 * SPRG_SCRATCH0 = guest R13 166 * SPRG_SCRATCH0 = guest R13
166 * R01 = host R1 167 * R12 = exit handler id
167 * R02 = host R2 168 * R13 = PACA
168 * R10 = guest PC 169 * PACA.KVM.SCRATCH0 = guest R12
169 * R11 = guest MSR 170 * PACA.KVM.SCRATCH1 = guest CR
170 * R12 = exit handler id
171 * R13 = PACA
172 * PACA.exmc.CCR = guest CR
173 * PACA.exmc.R9 = guest R1
174 * PACA.exmc.R10 = guest R10
175 * PACA.exmc.R11 = guest R11
176 * PACA.exmc.R12 = guest R12
177 * PACA.exmc.R13 = guest R2
178 * 171 *
179 */ 172 */
180 173
181 /* Save registers */ 174 /* Save registers */
182 175
183 std r0, (PACA_EXMC+EX_SRR0)(r13) 176 std r0, PACA_KVM_R0(r13)
184 std r9, (PACA_EXMC+EX_R3)(r13) 177 std r1, PACA_KVM_R1(r13)
185 std r10, (PACA_EXMC+EX_LR)(r13) 178 std r2, PACA_KVM_R2(r13)
186 std r11, (PACA_EXMC+EX_DAR)(r13) 179 std r3, PACA_KVM_R3(r13)
180 std r4, PACA_KVM_R4(r13)
181 std r5, PACA_KVM_R5(r13)
182 std r6, PACA_KVM_R6(r13)
183 std r7, PACA_KVM_R7(r13)
184 std r8, PACA_KVM_R8(r13)
185 std r9, PACA_KVM_R9(r13)
186 std r10, PACA_KVM_R10(r13)
187 std r11, PACA_KVM_R11(r13)
188
189 /* Restore R1/R2 so we can handle faults */
190 ld r1, PACA_KVM_HOST_R1(r13)
191 ld r2, PACA_KVM_HOST_R2(r13)
192
193 /* Save guest PC and MSR in GPRs */
194 mfsrr0 r3
195 mfsrr1 r4
196
197 /* Get scratch'ed off registers */
198 mfspr r9, SPRN_SPRG_SCRATCH0
199 std r9, PACA_KVM_R13(r13)
200
201 ld r8, PACA_KVM_SCRATCH0(r13)
202 std r8, PACA_KVM_R12(r13)
203
204 lwz r7, PACA_KVM_SCRATCH1(r13)
205 stw r7, PACA_KVM_CR(r13)
206
207 /* Save more register state */
208
209 mfxer r6
210 stw r6, PACA_KVM_XER(r13)
211
212 mfdar r5
213 mfdsisr r6
214
215 /* Unset guest state */
216 li r9, 0
217 stb r9, PACA_KVM_IN_GUEST(r13)
187 218
188 /* 219 /*
189 * In order for us to easily get the last instruction, 220 * In order for us to easily get the last instruction,
@@ -207,7 +238,8 @@ ld_last_inst:
207 ori r11, r9, MSR_DR /* Enable paging for data */ 238 ori r11, r9, MSR_DR /* Enable paging for data */
208 mtmsr r11 239 mtmsr r11
209 /* 2) fetch the instruction */ 240 /* 2) fetch the instruction */
210 lwz r0, 0(r10) 241 /* XXX implement PACA_KVM_IN_GUEST=2 path to safely jump over this */
242 lwz r0, 0(r3)
211 /* 3) disable paging again */ 243 /* 3) disable paging again */
212 mtmsr r9 244 mtmsr r9
213 245
@@ -233,29 +265,27 @@ no_ld_last_inst:
233 265
234slb_do_exit: 266slb_do_exit:
235 267
236 /* Restore registers */ 268 /* Register usage at this point:
237 269 *
238 ld r11, (PACA_EXMC+EX_DAR)(r13) 270 * R0 = guest last inst
239 ld r10, (PACA_EXMC+EX_LR)(r13) 271 * R1 = host R1
240 ld r9, (PACA_EXMC+EX_R3)(r13) 272 * R2 = host R2
241 273 * R3 = guest PC
242 /* Save last inst */ 274 * R4 = guest MSR
243 stw r0, (PACA_EXMC+EX_LR)(r13) 275 * R5 = guest DAR
244 276 * R6 = guest DSISR
245 /* Save DAR and DSISR before going to paged mode */ 277 * R12 = exit handler id
246 mfdar r0 278 * R13 = PACA
247 std r0, (PACA_EXMC+EX_DAR)(r13) 279 * PACA.KVM.* = guest *
248 mfdsisr r0 280 *
249 stw r0, (PACA_EXMC+EX_DSISR)(r13) 281 */
250 282
251 /* RFI into the highmem handler */ 283 /* RFI into the highmem handler */
252 mfmsr r0 284 mfmsr r7
253 ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ 285 ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
254 mtsrr1 r0 286 mtsrr1 r7
255 ld r0, PACASAVEDMSR(r13) /* Highmem handler address */ 287 ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
256 mtsrr0 r0 288 mtsrr0 r8
257
258 mfspr r0, SPRN_SPRG_SCRATCH0
259 289
260 RFI 290 RFI
261kvmppc_handler_trampoline_exit_end: 291kvmppc_handler_trampoline_exit_end: