aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h8
-rw-r--r--arch/powerpc/kernel/asm-offsets.c7
-rw-r--r--arch/powerpc/kvm/book3s_32_sr.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S2
-rw-r--r--arch/powerpc/kvm/book3s_exports.c4
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S129
-rw-r--r--arch/powerpc/kvm/book3s_pr.c12
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S51
-rw-r--r--arch/powerpc/kvm/book3s_segment.S112
11 files changed, 120 insertions, 212 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 91d41fabc5b0..a384ffdf33de 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -141,9 +141,7 @@ extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
141extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 141extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
142extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 142extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
143 143
144extern void kvmppc_handler_lowmem_trampoline(void); 144extern void kvmppc_entry_trampoline(void);
145extern void kvmppc_handler_trampoline_enter(void);
146extern void kvmppc_rmcall(ulong srr0, ulong srr1);
147extern void kvmppc_hv_entry_trampoline(void); 145extern void kvmppc_hv_entry_trampoline(void);
148extern void kvmppc_load_up_fpu(void); 146extern void kvmppc_load_up_fpu(void);
149extern void kvmppc_load_up_altivec(void); 147extern void kvmppc_load_up_altivec(void);
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index ef7b3688c3b6..af73469530e6 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -75,6 +75,7 @@ struct kvmppc_host_state {
75 ulong scratch0; 75 ulong scratch0;
76 ulong scratch1; 76 ulong scratch1;
77 u8 in_guest; 77 u8 in_guest;
78 u8 restore_hid5;
78 79
79#ifdef CONFIG_KVM_BOOK3S_64_HV 80#ifdef CONFIG_KVM_BOOK3S_64_HV
80 struct kvm_vcpu *kvm_vcpu; 81 struct kvm_vcpu *kvm_vcpu;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2b8284f4b4b7..dec3054f6ad4 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -258,14 +258,6 @@ struct kvm_vcpu_arch {
258 ulong host_stack; 258 ulong host_stack;
259 u32 host_pid; 259 u32 host_pid;
260#ifdef CONFIG_PPC_BOOK3S 260#ifdef CONFIG_PPC_BOOK3S
261 ulong host_msr;
262 ulong host_r2;
263 void *host_retip;
264 ulong trampoline_lowmem;
265 ulong trampoline_enter;
266 ulong highmem_handler;
267 ulong rmcall;
268 ulong host_paca_phys;
269 struct kvmppc_slb slb[64]; 261 struct kvmppc_slb slb[64];
270 int slb_max; /* 1 + index of last valid entry in slb[] */ 262 int slb_max; /* 1 + index of last valid entry in slb[] */
271 int slb_nr; /* total number of entries in SLB */ 263 int slb_nr; /* total number of entries in SLB */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 5f078bc2063e..e069c766695d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -449,8 +449,6 @@ int main(void)
449#ifdef CONFIG_PPC_BOOK3S 449#ifdef CONFIG_PPC_BOOK3S
450 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 450 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
451 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); 451 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
452 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
453 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
454 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); 452 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
455 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); 453 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
456 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); 454 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
@@ -458,10 +456,6 @@ int main(void)
458 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); 456 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
459 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); 457 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
460 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); 458 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
461 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
462 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
463 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
464 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
465 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 459 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
466 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); 460 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
467 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); 461 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
@@ -537,6 +531,7 @@ int main(void)
537 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 531 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
538 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 532 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
539 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 533 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
534 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
540 535
541#ifdef CONFIG_KVM_BOOK3S_64_HV 536#ifdef CONFIG_KVM_BOOK3S_64_HV
542 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); 537 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
index 3608471ad2d8..7e06a6fc8d07 100644
--- a/arch/powerpc/kvm/book3s_32_sr.S
+++ b/arch/powerpc/kvm/book3s_32_sr.S
@@ -31,7 +31,7 @@
31 * R1 = host R1 31 * R1 = host R1
32 * R2 = host R2 32 * R2 = host R2
33 * R3 = shadow vcpu 33 * R3 = shadow vcpu
34 * all other volatile GPRS = free 34 * all other volatile GPRS = free except R4, R6
35 * SVCPU[CR] = guest CR 35 * SVCPU[CR] = guest CR
36 * SVCPU[XER] = guest XER 36 * SVCPU[XER] = guest XER
37 * SVCPU[CTR] = guest CTR 37 * SVCPU[CTR] = guest CTR
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 04e7d3bbfe8b..f2e6e48ea463 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -53,7 +53,7 @@ slb_exit_skip_ ## num:
53 * R1 = host R1 53 * R1 = host R1
54 * R2 = host R2 54 * R2 = host R2
55 * R3 = shadow vcpu 55 * R3 = shadow vcpu
56 * all other volatile GPRS = free 56 * all other volatile GPRS = free except R4, R6
57 * SVCPU[CR] = guest CR 57 * SVCPU[CR] = guest CR
58 * SVCPU[XER] = guest XER 58 * SVCPU[XER] = guest XER
59 * SVCPU[CTR] = guest CTR 59 * SVCPU[CTR] = guest CTR
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 88c8f26add02..f7f63a00ab1f 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -23,9 +23,7 @@
23#ifdef CONFIG_KVM_BOOK3S_64_HV 23#ifdef CONFIG_KVM_BOOK3S_64_HV
24EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); 24EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
25#else 25#else
26EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter); 26EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
27EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline);
28EXPORT_SYMBOL_GPL(kvmppc_rmcall);
29EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); 27EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
30#ifdef CONFIG_ALTIVEC 28#ifdef CONFIG_ALTIVEC
31EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); 29EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index c54b0e30cf3f..0a8515a5c042 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -29,27 +29,11 @@
29#define ULONG_SIZE 8 29#define ULONG_SIZE 8
30#define FUNC(name) GLUE(.,name) 30#define FUNC(name) GLUE(.,name)
31 31
32#define GET_SHADOW_VCPU_R13
33
34#define DISABLE_INTERRUPTS \
35 mfmsr r0; \
36 rldicl r0,r0,48,1; \
37 rotldi r0,r0,16; \
38 mtmsrd r0,1; \
39
40#elif defined(CONFIG_PPC_BOOK3S_32) 32#elif defined(CONFIG_PPC_BOOK3S_32)
41 33
42#define ULONG_SIZE 4 34#define ULONG_SIZE 4
43#define FUNC(name) name 35#define FUNC(name) name
44 36
45#define GET_SHADOW_VCPU_R13 \
46 lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2)
47
48#define DISABLE_INTERRUPTS \
49 mfmsr r0; \
50 rlwinm r0,r0,0,17,15; \
51 mtmsr r0; \
52
53#endif /* CONFIG_PPC_BOOK3S_XX */ 37#endif /* CONFIG_PPC_BOOK3S_XX */
54 38
55 39
@@ -108,44 +92,17 @@ kvm_start_entry:
108 92
109kvm_start_lightweight: 93kvm_start_lightweight:
110 94
111 GET_SHADOW_VCPU_R13
112 PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
113 PPC_STL r3, HSTATE_VMHANDLER(r13)
114
115 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
116
117 DISABLE_INTERRUPTS
118
119#ifdef CONFIG_PPC_BOOK3S_64 95#ifdef CONFIG_PPC_BOOK3S_64
120 /* Some guests may need to have dcbz set to 32 byte length.
121 *
122 * Usually we ensure that by patching the guest's instructions
123 * to trap on dcbz and emulate it in the hypervisor.
124 *
125 * If we can, we should tell the CPU to use 32 byte dcbz though,
126 * because that's a lot faster.
127 */
128
129 PPC_LL r3, VCPU_HFLAGS(r4) 96 PPC_LL r3, VCPU_HFLAGS(r4)
130 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ 97 rldicl r3, r3, 0, 63 /* r3 &= 1 */
131 beq no_dcbz32_on 98 stb r3, HSTATE_RESTORE_HID5(r13)
132
133 mfspr r3,SPRN_HID5
134 ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */
135 mtspr SPRN_HID5,r3
136
137no_dcbz32_on:
138
139#endif /* CONFIG_PPC_BOOK3S_64 */ 99#endif /* CONFIG_PPC_BOOK3S_64 */
140 100
141 PPC_LL r6, VCPU_RMCALL(r4) 101 PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
142 mtctr r6
143
144 PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4)
145 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
146 102
147 /* Jump to segment patching handler and into our guest */ 103 /* Jump to segment patching handler and into our guest */
148 bctr 104 bl FUNC(kvmppc_entry_trampoline)
105 nop
149 106
150/* 107/*
151 * This is the handler in module memory. It gets jumped at from the 108 * This is the handler in module memory. It gets jumped at from the
@@ -170,21 +127,6 @@ kvmppc_handler_highmem:
170 /* R7 = vcpu */ 127 /* R7 = vcpu */
171 PPC_LL r7, GPR4(r1) 128 PPC_LL r7, GPR4(r1)
172 129
173#ifdef CONFIG_PPC_BOOK3S_64
174
175 PPC_LL r5, VCPU_HFLAGS(r7)
176 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
177 beq no_dcbz32_off
178
179 li r4, 0
180 mfspr r5,SPRN_HID5
181 rldimi r5,r4,6,56
182 mtspr SPRN_HID5,r5
183
184no_dcbz32_off:
185
186#endif /* CONFIG_PPC_BOOK3S_64 */
187
188 PPC_STL r14, VCPU_GPR(r14)(r7) 130 PPC_STL r14, VCPU_GPR(r14)(r7)
189 PPC_STL r15, VCPU_GPR(r15)(r7) 131 PPC_STL r15, VCPU_GPR(r15)(r7)
190 PPC_STL r16, VCPU_GPR(r16)(r7) 132 PPC_STL r16, VCPU_GPR(r16)(r7)
@@ -204,67 +146,6 @@ no_dcbz32_off:
204 PPC_STL r30, VCPU_GPR(r30)(r7) 146 PPC_STL r30, VCPU_GPR(r30)(r7)
205 PPC_STL r31, VCPU_GPR(r31)(r7) 147 PPC_STL r31, VCPU_GPR(r31)(r7)
206 148
207 /* Restore host msr -> SRR1 */
208 PPC_LL r6, VCPU_HOST_MSR(r7)
209
210 /*
211 * For some interrupts, we need to call the real Linux
212 * handler, so it can do work for us. This has to happen
213 * as if the interrupt arrived from the kernel though,
214 * so let's fake it here where most state is restored.
215 *
216 * Call Linux for hardware interrupts/decrementer
217 * r3 = address of interrupt handler (exit reason)
218 */
219
220 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
221 beq call_linux_handler
222 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
223 beq call_linux_handler
224 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
225 beq call_linux_handler
226
227 /* Back to EE=1 */
228 mtmsr r6
229 sync
230 b kvm_return_point
231
232call_linux_handler:
233
234 /*
235 * If we land here we need to jump back to the handler we
236 * came from.
237 *
238 * We have a page that we can access from real mode, so let's
239 * jump back to that and use it as a trampoline to get back into the
240 * interrupt handler!
241 *
242 * R3 still contains the exit code,
243 * R5 VCPU_HOST_RETIP and
244 * R6 VCPU_HOST_MSR
245 */
246
247 /* Restore host IP -> SRR0 */
248 PPC_LL r5, VCPU_HOST_RETIP(r7)
249
250 /* XXX Better move to a safe function?
251 * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
252
253 mtlr r12
254
255 PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7)
256 mtsrr0 r4
257 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
258 mtsrr1 r3
259
260 RFI
261
262.global kvm_return_point
263kvm_return_point:
264
265 /* Jump back to lightweight entry if we're supposed to */
266 /* go back into the guest */
267
268 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 149 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
269 mr r5, r12 150 mr r5, r12
270 151
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 6e3488b09519..d417511abfb1 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -875,8 +875,6 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
875 if (!p) 875 if (!p)
876 goto uninit_vcpu; 876 goto uninit_vcpu;
877 877
878 vcpu->arch.host_retip = kvm_return_point;
879 vcpu->arch.host_msr = mfmsr();
880#ifdef CONFIG_PPC_BOOK3S_64 878#ifdef CONFIG_PPC_BOOK3S_64
881 /* default to book3s_64 (970fx) */ 879 /* default to book3s_64 (970fx) */
882 vcpu->arch.pvr = 0x3C0301; 880 vcpu->arch.pvr = 0x3C0301;
@@ -887,16 +885,6 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
887 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 885 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
888 vcpu->arch.slb_nr = 64; 886 vcpu->arch.slb_nr = 64;
889 887
890 /* remember where some real-mode handlers are */
891 vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
892 vcpu->arch.trampoline_enter = __pa(kvmppc_handler_trampoline_enter);
893 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
894#ifdef CONFIG_PPC_BOOK3S_64
895 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
896#else
897 vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
898#endif
899
900 vcpu->arch.shadow_msr = MSR_USER64; 888 vcpu->arch.shadow_msr = MSR_USER64;
901 889
902 err = kvmppc_mmu_init(vcpu); 890 err = kvmppc_mmu_init(vcpu);
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 5ee66edd749d..34187585c507 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,9 +36,8 @@
36 36
37#if defined(CONFIG_PPC_BOOK3S_64) 37#if defined(CONFIG_PPC_BOOK3S_64)
38 38
39#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
40#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
41#define FUNC(name) GLUE(.,name) 39#define FUNC(name) GLUE(.,name)
40#define MTMSR_EERI(reg) mtmsrd (reg),1
42 41
43 .globl kvmppc_skip_interrupt 42 .globl kvmppc_skip_interrupt
44kvmppc_skip_interrupt: 43kvmppc_skip_interrupt:
@@ -68,8 +67,8 @@ kvmppc_skip_Hinterrupt:
68 67
69#elif defined(CONFIG_PPC_BOOK3S_32) 68#elif defined(CONFIG_PPC_BOOK3S_32)
70 69
71#define MSR_NOIRQ MSR_KERNEL
72#define FUNC(name) name 70#define FUNC(name) name
71#define MTMSR_EERI(reg) mtmsr (reg)
73 72
74.macro INTERRUPT_TRAMPOLINE intno 73.macro INTERRUPT_TRAMPOLINE intno
75 74
@@ -170,40 +169,24 @@ kvmppc_handler_skip_ins:
170#endif 169#endif
171 170
172/* 171/*
173 * This trampoline brings us back to a real mode handler 172 * Call kvmppc_handler_trampoline_enter in real mode
174 *
175 * Input Registers:
176 *
177 * R5 = SRR0
178 * R6 = SRR1
179 * LR = real-mode IP
180 * 173 *
174 * On entry, r4 contains the guest shadow MSR
181 */ 175 */
182.global kvmppc_handler_lowmem_trampoline 176_GLOBAL(kvmppc_entry_trampoline)
183kvmppc_handler_lowmem_trampoline: 177 mfmsr r5
184 178 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
185 mtsrr0 r5 179 toreal(r7)
180
181 li r9, MSR_RI
182 ori r9, r9, MSR_EE
183 andc r9, r5, r9 /* Clear EE and RI in MSR value */
184 li r6, MSR_IR | MSR_DR
185 ori r6, r6, MSR_EE
186 andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */
187 MTMSR_EERI(r9) /* Clear EE and RI in MSR */
188 mtsrr0 r7 /* before we set srr0/1 */
186 mtsrr1 r6 189 mtsrr1 r6
187 blr
188kvmppc_handler_lowmem_trampoline_end:
189
190/*
191 * Call a function in real mode
192 *
193 * Input Registers:
194 *
195 * R3 = function
196 * R4 = MSR
197 * R5 = scratch register
198 *
199 */
200_GLOBAL(kvmppc_rmcall)
201 LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
202 mtmsr r5 /* Disable relocation and interrupts, so mtsrr
203 doesn't get interrupted */
204 sync
205 mtsrr0 r3
206 mtsrr1 r4
207 RFI 190 RFI
208 191
209#if defined(CONFIG_PPC_BOOK3S_32) 192#if defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 678b6be31693..0676ae249b9f 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -23,6 +23,7 @@
23 23
24#define GET_SHADOW_VCPU(reg) \ 24#define GET_SHADOW_VCPU(reg) \
25 mr reg, r13 25 mr reg, r13
26#define MTMSR_EERI(reg) mtmsrd (reg),1
26 27
27#elif defined(CONFIG_PPC_BOOK3S_32) 28#elif defined(CONFIG_PPC_BOOK3S_32)
28 29
@@ -30,6 +31,7 @@
30 tophys(reg, r2); \ 31 tophys(reg, r2); \
31 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ 32 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
32 tophys(reg, reg) 33 tophys(reg, reg)
34#define MTMSR_EERI(reg) mtmsr (reg)
33 35
34#endif 36#endif
35 37
@@ -57,10 +59,12 @@ kvmppc_handler_trampoline_enter:
57 /* Required state: 59 /* Required state:
58 * 60 *
59 * MSR = ~IR|DR 61 * MSR = ~IR|DR
60 * R13 = PACA
61 * R1 = host R1 62 * R1 = host R1
62 * R2 = host R2 63 * R2 = host R2
63 * R10 = guest MSR 64 * R4 = guest shadow MSR
65 * R5 = normal host MSR
66 * R6 = current host MSR (EE, IR, DR off)
67 * LR = highmem guest exit code
64 * all other volatile GPRS = free 68 * all other volatile GPRS = free
65 * SVCPU[CR] = guest CR 69 * SVCPU[CR] = guest CR
66 * SVCPU[XER] = guest XER 70 * SVCPU[XER] = guest XER
@@ -71,15 +75,15 @@ kvmppc_handler_trampoline_enter:
71 /* r3 = shadow vcpu */ 75 /* r3 = shadow vcpu */
72 GET_SHADOW_VCPU(r3) 76 GET_SHADOW_VCPU(r3)
73 77
78 /* Save guest exit handler address and MSR */
79 mflr r0
80 PPC_STL r0, HSTATE_VMHANDLER(r3)
81 PPC_STL r5, HSTATE_HOST_MSR(r3)
82
74 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ 83 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
75 PPC_STL r1, HSTATE_HOST_R1(r3) 84 PPC_STL r1, HSTATE_HOST_R1(r3)
76 PPC_STL r2, HSTATE_HOST_R2(r3) 85 PPC_STL r2, HSTATE_HOST_R2(r3)
77 86
78 /* Move SRR0 and SRR1 into the respective regs */
79 PPC_LL r9, SVCPU_PC(r3)
80 mtsrr0 r9
81 mtsrr1 r10
82
83 /* Activate guest mode, so faults get handled by KVM */ 87 /* Activate guest mode, so faults get handled by KVM */
84 li r11, KVM_GUEST_MODE_GUEST 88 li r11, KVM_GUEST_MODE_GUEST
85 stb r11, HSTATE_IN_GUEST(r3) 89 stb r11, HSTATE_IN_GUEST(r3)
@@ -87,17 +91,46 @@ kvmppc_handler_trampoline_enter:
87 /* Switch to guest segment. This is subarch specific. */ 91 /* Switch to guest segment. This is subarch specific. */
88 LOAD_GUEST_SEGMENTS 92 LOAD_GUEST_SEGMENTS
89 93
94#ifdef CONFIG_PPC_BOOK3S_64
95 /* Some guests may need to have dcbz set to 32 byte length.
96 *
97 * Usually we ensure that by patching the guest's instructions
98 * to trap on dcbz and emulate it in the hypervisor.
99 *
100 * If we can, we should tell the CPU to use 32 byte dcbz though,
101 * because that's a lot faster.
102 */
103 lbz r0, HSTATE_RESTORE_HID5(r3)
104 cmpwi r0, 0
105 beq no_dcbz32_on
106
107 mfspr r0,SPRN_HID5
108 ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */
109 mtspr SPRN_HID5,r0
110no_dcbz32_on:
111
112#endif /* CONFIG_PPC_BOOK3S_64 */
113
90 /* Enter guest */ 114 /* Enter guest */
91 115
92 PPC_LL r4, SVCPU_CTR(r3) 116 PPC_LL r8, SVCPU_CTR(r3)
93 PPC_LL r5, SVCPU_LR(r3) 117 PPC_LL r9, SVCPU_LR(r3)
94 lwz r6, SVCPU_CR(r3) 118 lwz r10, SVCPU_CR(r3)
95 lwz r7, SVCPU_XER(r3) 119 lwz r11, SVCPU_XER(r3)
120
121 mtctr r8
122 mtlr r9
123 mtcr r10
124 mtxer r11
96 125
97 mtctr r4 126 /* Move SRR0 and SRR1 into the respective regs */
98 mtlr r5 127 PPC_LL r9, SVCPU_PC(r3)
99 mtcr r6 128 /* First clear RI in our current MSR value */
100 mtxer r7 129 li r0, MSR_RI
130 andc r6, r6, r0
131 MTMSR_EERI(r6)
132 mtsrr0 r9
133 mtsrr1 r4
101 134
102 PPC_LL r0, SVCPU_R0(r3) 135 PPC_LL r0, SVCPU_R0(r3)
103 PPC_LL r1, SVCPU_R1(r3) 136 PPC_LL r1, SVCPU_R1(r3)
@@ -259,6 +292,43 @@ no_ld_last_inst:
259 /* Switch back to host MMU */ 292 /* Switch back to host MMU */
260 LOAD_HOST_SEGMENTS 293 LOAD_HOST_SEGMENTS
261 294
295#ifdef CONFIG_PPC_BOOK3S_64
296
297 lbz r5, HSTATE_RESTORE_HID5(r13)
298 cmpwi r5, 0
299 beq no_dcbz32_off
300
301 li r4, 0
302 mfspr r5,SPRN_HID5
303 rldimi r5,r4,6,56
304 mtspr SPRN_HID5,r5
305
306no_dcbz32_off:
307
308#endif /* CONFIG_PPC_BOOK3S_64 */
309
310 /*
311 * For some interrupts, we need to call the real Linux
312 * handler, so it can do work for us. This has to happen
313 * as if the interrupt arrived from the kernel though,
314 * so let's fake it here where most state is restored.
315 *
316 * Having set up SRR0/1 with the address where we want
317 * to continue with relocation on (potentially in module
318 * space), we either just go straight there with rfi[d],
319 * or we jump to an interrupt handler with bctr if there
320 * is an interrupt to be handled first. In the latter
321 * case, the rfi[d] at the end of the interrupt handler
322 * will get us back to where we want to continue.
323 */
324
325 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
326 beq 1f
327 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
328 beq 1f
329 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
3301: mtctr r12
331
262 /* Register usage at this point: 332 /* Register usage at this point:
263 * 333 *
264 * R1 = host R1 334 * R1 = host R1
@@ -269,13 +339,15 @@ no_ld_last_inst:
269 * 339 *
270 */ 340 */
271 341
272 /* RFI into the highmem handler */ 342 PPC_LL r6, HSTATE_HOST_MSR(r13)
273 mfmsr r7
274 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
275 mtsrr1 r7
276 /* Load highmem handler address */
277 PPC_LL r8, HSTATE_VMHANDLER(r13) 343 PPC_LL r8, HSTATE_VMHANDLER(r13)
344
345 /* Restore host msr -> SRR1 */
346 mtsrr1 r6
347 /* Load highmem handler address */
278 mtsrr0 r8 348 mtsrr0 r8
279 349
350 /* RFI into the highmem handler, or jump to interrupt handler */
351 beqctr
280 RFI 352 RFI
281kvmppc_handler_trampoline_exit_end: 353kvmppc_handler_trampoline_exit_end: