aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_rmhandlers.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rmhandlers.S')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1191
1 files changed, 734 insertions, 457 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index be4fa04a37c9..e66d4ec04d95 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,6 +33,10 @@
33#error Need to fix lppaca and SLB shadow accesses in little endian mode 33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif 34#endif
35 35
36/* Values in HSTATE_NAPPING(r13) */
37#define NAPPING_CEDE 1
38#define NAPPING_NOVCPU 2
39
36/* 40/*
37 * Call kvmppc_hv_entry in real mode. 41 * Call kvmppc_hv_entry in real mode.
38 * Must be called with interrupts hard-disabled. 42 * Must be called with interrupts hard-disabled.
@@ -57,29 +61,23 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
57 RFI 61 RFI
58 62
59kvmppc_call_hv_entry: 63kvmppc_call_hv_entry:
64 ld r4, HSTATE_KVM_VCPU(r13)
60 bl kvmppc_hv_entry 65 bl kvmppc_hv_entry
61 66
62 /* Back from guest - restore host state and return to caller */ 67 /* Back from guest - restore host state and return to caller */
63 68
69BEGIN_FTR_SECTION
64 /* Restore host DABR and DABRX */ 70 /* Restore host DABR and DABRX */
65 ld r5,HSTATE_DABR(r13) 71 ld r5,HSTATE_DABR(r13)
66 li r6,7 72 li r6,7
67 mtspr SPRN_DABR,r5 73 mtspr SPRN_DABR,r5
68 mtspr SPRN_DABRX,r6 74 mtspr SPRN_DABRX,r6
75END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
69 76
70 /* Restore SPRG3 */ 77 /* Restore SPRG3 */
71 ld r3,PACA_SPRG3(r13) 78 ld r3,PACA_SPRG3(r13)
72 mtspr SPRN_SPRG3,r3 79 mtspr SPRN_SPRG3,r3
73 80
74 /*
75 * Reload DEC. HDEC interrupts were disabled when
76 * we reloaded the host's LPCR value.
77 */
78 ld r3, HSTATE_DECEXP(r13)
79 mftb r4
80 subf r4, r4, r3
81 mtspr SPRN_DEC, r4
82
83 /* Reload the host's PMU registers */ 81 /* Reload the host's PMU registers */
84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
85 lbz r4, LPPACA_PMCINUSE(r3) 83 lbz r4, LPPACA_PMCINUSE(r3)
@@ -115,6 +113,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
11523: 11323:
116 114
117 /* 115 /*
116 * Reload DEC. HDEC interrupts were disabled when
117 * we reloaded the host's LPCR value.
118 */
119 ld r3, HSTATE_DECEXP(r13)
120 mftb r4
121 subf r4, r4, r3
122 mtspr SPRN_DEC, r4
123
124 /*
118 * For external and machine check interrupts, we need 125 * For external and machine check interrupts, we need
119 * to call the Linux handler to process the interrupt. 126 * to call the Linux handler to process the interrupt.
120 * We do that by jumping to absolute address 0x500 for 127 * We do that by jumping to absolute address 0x500 for
@@ -153,15 +160,75 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
153 160
15413: b machine_check_fwnmi 16113: b machine_check_fwnmi
155 162
163kvmppc_primary_no_guest:
164 /* We handle this much like a ceded vcpu */
165 /* set our bit in napping_threads */
166 ld r5, HSTATE_KVM_VCORE(r13)
167 lbz r7, HSTATE_PTID(r13)
168 li r0, 1
169 sld r0, r0, r7
170 addi r6, r5, VCORE_NAPPING_THREADS
1711: lwarx r3, 0, r6
172 or r3, r3, r0
173 stwcx. r3, 0, r6
174 bne 1b
175 /* order napping_threads update vs testing entry_exit_count */
176 isync
177 li r12, 0
178 lwz r7, VCORE_ENTRY_EXIT(r5)
179 cmpwi r7, 0x100
180 bge kvm_novcpu_exit /* another thread already exiting */
181 li r3, NAPPING_NOVCPU
182 stb r3, HSTATE_NAPPING(r13)
183 li r3, 1
184 stb r3, HSTATE_HWTHREAD_REQ(r13)
185
186 b kvm_do_nap
187
188kvm_novcpu_wakeup:
189 ld r1, HSTATE_HOST_R1(r13)
190 ld r5, HSTATE_KVM_VCORE(r13)
191 li r0, 0
192 stb r0, HSTATE_NAPPING(r13)
193 stb r0, HSTATE_HWTHREAD_REQ(r13)
194
195 /* check the wake reason */
196 bl kvmppc_check_wake_reason
197
198 /* see if any other thread is already exiting */
199 lwz r0, VCORE_ENTRY_EXIT(r5)
200 cmpwi r0, 0x100
201 bge kvm_novcpu_exit
202
203 /* clear our bit in napping_threads */
204 lbz r7, HSTATE_PTID(r13)
205 li r0, 1
206 sld r0, r0, r7
207 addi r6, r5, VCORE_NAPPING_THREADS
2084: lwarx r7, 0, r6
209 andc r7, r7, r0
210 stwcx. r7, 0, r6
211 bne 4b
212
213 /* See if the wake reason means we need to exit */
214 cmpdi r3, 0
215 bge kvm_novcpu_exit
216
217 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
218 ld r4, HSTATE_KVM_VCPU(r13)
219 cmpdi r4, 0
220 bne kvmppc_got_guest
221
222kvm_novcpu_exit:
223 b hdec_soon
224
156/* 225/*
157 * We come in here when wakened from nap mode on a secondary hw thread. 226 * We come in here when wakened from nap mode.
158 * Relocation is off and most register values are lost. 227 * Relocation is off and most register values are lost.
159 * r13 points to the PACA. 228 * r13 points to the PACA.
160 */ 229 */
161 .globl kvm_start_guest 230 .globl kvm_start_guest
162kvm_start_guest: 231kvm_start_guest:
163 ld r1,PACAEMERGSP(r13)
164 subi r1,r1,STACK_FRAME_OVERHEAD
165 ld r2,PACATOC(r13) 232 ld r2,PACATOC(r13)
166 233
167 li r0,KVM_HWTHREAD_IN_KVM 234 li r0,KVM_HWTHREAD_IN_KVM
@@ -173,8 +240,13 @@ kvm_start_guest:
173 240
174 /* were we napping due to cede? */ 241 /* were we napping due to cede? */
175 lbz r0,HSTATE_NAPPING(r13) 242 lbz r0,HSTATE_NAPPING(r13)
176 cmpwi r0,0 243 cmpwi r0,NAPPING_CEDE
177 bne kvm_end_cede 244 beq kvm_end_cede
245 cmpwi r0,NAPPING_NOVCPU
246 beq kvm_novcpu_wakeup
247
248 ld r1,PACAEMERGSP(r13)
249 subi r1,r1,STACK_FRAME_OVERHEAD
178 250
179 /* 251 /*
180 * We weren't napping due to cede, so this must be a secondary 252 * We weren't napping due to cede, so this must be a secondary
@@ -184,40 +256,22 @@ kvm_start_guest:
184 */ 256 */
185 257
186 /* Check the wake reason in SRR1 to see why we got here */ 258 /* Check the wake reason in SRR1 to see why we got here */
187 mfspr r3,SPRN_SRR1 259 bl kvmppc_check_wake_reason
188 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ 260 cmpdi r3, 0
189 cmpwi r3,4 /* was it an external interrupt? */ 261 bge kvm_no_guest
190 bne 27f /* if not */
191 ld r5,HSTATE_XICS_PHYS(r13)
192 li r7,XICS_XIRR /* if it was an external interrupt, */
193 lwzcix r8,r5,r7 /* get and ack the interrupt */
194 sync
195 clrldi. r9,r8,40 /* get interrupt source ID. */
196 beq 28f /* none there? */
197 cmpwi r9,XICS_IPI /* was it an IPI? */
198 bne 29f
199 li r0,0xff
200 li r6,XICS_MFRR
201 stbcix r0,r5,r6 /* clear IPI */
202 stwcix r8,r5,r7 /* EOI the interrupt */
203 sync /* order loading of vcpu after that */
204 262
205 /* get vcpu pointer, NULL if we have no vcpu to run */ 263 /* get vcpu pointer, NULL if we have no vcpu to run */
206 ld r4,HSTATE_KVM_VCPU(r13) 264 ld r4,HSTATE_KVM_VCPU(r13)
207 cmpdi r4,0 265 cmpdi r4,0
208 /* if we have no vcpu to run, go back to sleep */ 266 /* if we have no vcpu to run, go back to sleep */
209 beq kvm_no_guest 267 beq kvm_no_guest
210 b 30f
211 268
21227: /* XXX should handle hypervisor maintenance interrupts etc. here */ 269 /* Set HSTATE_DSCR(r13) to something sensible */
213 b kvm_no_guest 270 LOAD_REG_ADDR(r6, dscr_default)
21428: /* SRR1 said external but ICP said nope?? */ 271 ld r6, 0(r6)
215 b kvm_no_guest 272 std r6, HSTATE_DSCR(r13)
21629: /* External non-IPI interrupt to offline secondary thread? help?? */
217 stw r8,HSTATE_SAVED_XIRR(r13)
218 b kvm_no_guest
219 273
22030: bl kvmppc_hv_entry 274 bl kvmppc_hv_entry
221 275
222 /* Back from the guest, go back to nap */ 276 /* Back from the guest, go back to nap */
223 /* Clear our vcpu pointer so we don't come back in early */ 277 /* Clear our vcpu pointer so we don't come back in early */
@@ -229,18 +283,6 @@ kvm_start_guest:
229 * visible we could be given another vcpu. 283 * visible we could be given another vcpu.
230 */ 284 */
231 lwsync 285 lwsync
232 /* Clear any pending IPI - we're an offline thread */
233 ld r5, HSTATE_XICS_PHYS(r13)
234 li r7, XICS_XIRR
235 lwzcix r3, r5, r7 /* ack any pending interrupt */
236 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
237 beq 37f
238 sync
239 li r0, 0xff
240 li r6, XICS_MFRR
241 stbcix r0, r5, r6 /* clear the IPI */
242 stwcix r3, r5, r7 /* EOI it */
24337: sync
244 286
245 /* increment the nap count and then go to nap mode */ 287 /* increment the nap count and then go to nap mode */
246 ld r4, HSTATE_KVM_VCORE(r13) 288 ld r4, HSTATE_KVM_VCORE(r13)
@@ -253,6 +295,7 @@ kvm_start_guest:
253kvm_no_guest: 295kvm_no_guest:
254 li r0, KVM_HWTHREAD_IN_NAP 296 li r0, KVM_HWTHREAD_IN_NAP
255 stb r0, HSTATE_HWTHREAD_STATE(r13) 297 stb r0, HSTATE_HWTHREAD_STATE(r13)
298kvm_do_nap:
256 li r3, LPCR_PECE0 299 li r3, LPCR_PECE0
257 mfspr r4, SPRN_LPCR 300 mfspr r4, SPRN_LPCR
258 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 301 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -277,7 +320,7 @@ kvmppc_hv_entry:
277 320
278 /* Required state: 321 /* Required state:
279 * 322 *
280 * R4 = vcpu pointer 323 * R4 = vcpu pointer (or NULL)
281 * MSR = ~IR|DR 324 * MSR = ~IR|DR
282 * R13 = PACA 325 * R13 = PACA
283 * R1 = host R1 326 * R1 = host R1
@@ -287,122 +330,12 @@ kvmppc_hv_entry:
287 std r0, PPC_LR_STKOFF(r1) 330 std r0, PPC_LR_STKOFF(r1)
288 stdu r1, -112(r1) 331 stdu r1, -112(r1)
289 332
290 /* Set partition DABR */
291 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
292 li r5,3
293 ld r6,VCPU_DABR(r4)
294 mtspr SPRN_DABRX,r5
295 mtspr SPRN_DABR,r6
296BEGIN_FTR_SECTION
297 isync
298END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
299
300 /* Load guest PMU registers */
301 /* R4 is live here (vcpu pointer) */
302 li r3, 1
303 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
304 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
305 isync
306 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
307 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
308 lwz r6, VCPU_PMC + 8(r4)
309 lwz r7, VCPU_PMC + 12(r4)
310 lwz r8, VCPU_PMC + 16(r4)
311 lwz r9, VCPU_PMC + 20(r4)
312BEGIN_FTR_SECTION
313 lwz r10, VCPU_PMC + 24(r4)
314 lwz r11, VCPU_PMC + 28(r4)
315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
316 mtspr SPRN_PMC1, r3
317 mtspr SPRN_PMC2, r5
318 mtspr SPRN_PMC3, r6
319 mtspr SPRN_PMC4, r7
320 mtspr SPRN_PMC5, r8
321 mtspr SPRN_PMC6, r9
322BEGIN_FTR_SECTION
323 mtspr SPRN_PMC7, r10
324 mtspr SPRN_PMC8, r11
325END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
326 ld r3, VCPU_MMCR(r4)
327 ld r5, VCPU_MMCR + 8(r4)
328 ld r6, VCPU_MMCR + 16(r4)
329 ld r7, VCPU_SIAR(r4)
330 ld r8, VCPU_SDAR(r4)
331 mtspr SPRN_MMCR1, r5
332 mtspr SPRN_MMCRA, r6
333 mtspr SPRN_SIAR, r7
334 mtspr SPRN_SDAR, r8
335 mtspr SPRN_MMCR0, r3
336 isync
337
338 /* Load up FP, VMX and VSX registers */
339 bl kvmppc_load_fp
340
341 ld r14, VCPU_GPR(R14)(r4)
342 ld r15, VCPU_GPR(R15)(r4)
343 ld r16, VCPU_GPR(R16)(r4)
344 ld r17, VCPU_GPR(R17)(r4)
345 ld r18, VCPU_GPR(R18)(r4)
346 ld r19, VCPU_GPR(R19)(r4)
347 ld r20, VCPU_GPR(R20)(r4)
348 ld r21, VCPU_GPR(R21)(r4)
349 ld r22, VCPU_GPR(R22)(r4)
350 ld r23, VCPU_GPR(R23)(r4)
351 ld r24, VCPU_GPR(R24)(r4)
352 ld r25, VCPU_GPR(R25)(r4)
353 ld r26, VCPU_GPR(R26)(r4)
354 ld r27, VCPU_GPR(R27)(r4)
355 ld r28, VCPU_GPR(R28)(r4)
356 ld r29, VCPU_GPR(R29)(r4)
357 ld r30, VCPU_GPR(R30)(r4)
358 ld r31, VCPU_GPR(R31)(r4)
359
360BEGIN_FTR_SECTION
361 /* Switch DSCR to guest value */
362 ld r5, VCPU_DSCR(r4)
363 mtspr SPRN_DSCR, r5
364END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
365
366 /*
367 * Set the decrementer to the guest decrementer.
368 */
369 ld r8,VCPU_DEC_EXPIRES(r4)
370 mftb r7
371 subf r3,r7,r8
372 mtspr SPRN_DEC,r3
373 stw r3,VCPU_DEC(r4)
374
375 ld r5, VCPU_SPRG0(r4)
376 ld r6, VCPU_SPRG1(r4)
377 ld r7, VCPU_SPRG2(r4)
378 ld r8, VCPU_SPRG3(r4)
379 mtspr SPRN_SPRG0, r5
380 mtspr SPRN_SPRG1, r6
381 mtspr SPRN_SPRG2, r7
382 mtspr SPRN_SPRG3, r8
383
384 /* Save R1 in the PACA */ 333 /* Save R1 in the PACA */
385 std r1, HSTATE_HOST_R1(r13) 334 std r1, HSTATE_HOST_R1(r13)
386 335
387 /* Load up DAR and DSISR */
388 ld r5, VCPU_DAR(r4)
389 lwz r6, VCPU_DSISR(r4)
390 mtspr SPRN_DAR, r5
391 mtspr SPRN_DSISR, r6
392
393 li r6, KVM_GUEST_MODE_HOST_HV 336 li r6, KVM_GUEST_MODE_HOST_HV
394 stb r6, HSTATE_IN_GUEST(r13) 337 stb r6, HSTATE_IN_GUEST(r13)
395 338
396BEGIN_FTR_SECTION
397 /* Restore AMR and UAMOR, set AMOR to all 1s */
398 ld r5,VCPU_AMR(r4)
399 ld r6,VCPU_UAMOR(r4)
400 li r7,-1
401 mtspr SPRN_AMR,r5
402 mtspr SPRN_UAMOR,r6
403 mtspr SPRN_AMOR,r7
404END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
405
406 /* Clear out SLB */ 339 /* Clear out SLB */
407 li r6,0 340 li r6,0
408 slbmte r6,r6 341 slbmte r6,r6
@@ -428,8 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
428 bne 21b 361 bne 21b
429 362
430 /* Primary thread switches to guest partition. */ 363 /* Primary thread switches to guest partition. */
431 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 364 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
432 lwz r6,VCPU_PTID(r4) 365 lbz r6,HSTATE_PTID(r13)
433 cmpwi r6,0 366 cmpwi r6,0
434 bne 20f 367 bne 20f
435 ld r6,KVM_SDR1(r9) 368 ld r6,KVM_SDR1(r9)
@@ -457,7 +390,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
457 andc r7,r7,r0 390 andc r7,r7,r0
458 stdcx. r7,0,r6 391 stdcx. r7,0,r6
459 bne 23b 392 bne 23b
460 li r6,128 /* and flush the TLB */ 393 /* Flush the TLB of any entries for this LPID */
394 /* use arch 2.07S as a proxy for POWER8 */
395BEGIN_FTR_SECTION
396 li r6,512 /* POWER8 has 512 sets */
397FTR_SECTION_ELSE
398 li r6,128 /* POWER7 has 128 sets */
399ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
461 mtctr r6 400 mtctr r6
462 li r7,0x800 /* IS field = 0b10 */ 401 li r7,0x800 /* IS field = 0b10 */
463 ptesync 402 ptesync
@@ -487,6 +426,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
487 beq 38f 426 beq 38f
488 mtspr SPRN_PCR, r7 427 mtspr SPRN_PCR, r7
48938: 42838:
429
430BEGIN_FTR_SECTION
431 /* DPDES is shared between threads */
432 ld r8, VCORE_DPDES(r5)
433 mtspr SPRN_DPDES, r8
434END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
435
490 li r0,1 436 li r0,1
491 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 437 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
492 b 10f 438 b 10f
@@ -503,32 +449,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
503 mtspr SPRN_RMOR,r8 449 mtspr SPRN_RMOR,r8
504 isync 450 isync
505 451
506 /* Increment yield count if they have a VPA */
507 ld r3, VCPU_VPA(r4)
508 cmpdi r3, 0
509 beq 25f
510 lwz r5, LPPACA_YIELDCOUNT(r3)
511 addi r5, r5, 1
512 stw r5, LPPACA_YIELDCOUNT(r3)
513 li r6, 1
514 stb r6, VCPU_VPA_DIRTY(r4)
51525:
516 /* Check if HDEC expires soon */ 452 /* Check if HDEC expires soon */
517 mfspr r3,SPRN_HDEC 453 mfspr r3,SPRN_HDEC
518 cmpwi r3,10 454 cmpwi r3,512 /* 1 microsecond */
519 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 455 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
520 mr r9,r4
521 blt hdec_soon 456 blt hdec_soon
522
523 /* Save purr/spurr */
524 mfspr r5,SPRN_PURR
525 mfspr r6,SPRN_SPURR
526 std r5,HSTATE_PURR(r13)
527 std r6,HSTATE_SPURR(r13)
528 ld r7,VCPU_PURR(r4)
529 ld r8,VCPU_SPURR(r4)
530 mtspr SPRN_PURR,r7
531 mtspr SPRN_SPURR,r8
532 b 31f 457 b 31f
533 458
534 /* 459 /*
@@ -539,7 +464,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
539 * We also have to invalidate the TLB since its 464 * We also have to invalidate the TLB since its
540 * entries aren't tagged with the LPID. 465 * entries aren't tagged with the LPID.
541 */ 466 */
54230: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ 46730: ld r5,HSTATE_KVM_VCORE(r13)
468 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
543 469
544 /* first take native_tlbie_lock */ 470 /* first take native_tlbie_lock */
545 .section ".toc","aw" 471 .section ".toc","aw"
@@ -604,7 +530,6 @@ toc_tlbie_lock:
604 mfspr r3,SPRN_HDEC 530 mfspr r3,SPRN_HDEC
605 cmpwi r3,10 531 cmpwi r3,10
606 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 532 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
607 mr r9,r4
608 blt hdec_soon 533 blt hdec_soon
609 534
610 /* Enable HDEC interrupts */ 535 /* Enable HDEC interrupts */
@@ -619,9 +544,14 @@ toc_tlbie_lock:
619 mfspr r0,SPRN_HID0 544 mfspr r0,SPRN_HID0
620 mfspr r0,SPRN_HID0 545 mfspr r0,SPRN_HID0
621 mfspr r0,SPRN_HID0 546 mfspr r0,SPRN_HID0
54731:
548 /* Do we have a guest vcpu to run? */
549 cmpdi r4, 0
550 beq kvmppc_primary_no_guest
551kvmppc_got_guest:
622 552
623 /* Load up guest SLB entries */ 553 /* Load up guest SLB entries */
62431: lwz r5,VCPU_SLB_MAX(r4) 554 lwz r5,VCPU_SLB_MAX(r4)
625 cmpwi r5,0 555 cmpwi r5,0
626 beq 9f 556 beq 9f
627 mtctr r5 557 mtctr r5
@@ -632,6 +562,209 @@ toc_tlbie_lock:
632 addi r6,r6,VCPU_SLB_SIZE 562 addi r6,r6,VCPU_SLB_SIZE
633 bdnz 1b 563 bdnz 1b
6349: 5649:
565 /* Increment yield count if they have a VPA */
566 ld r3, VCPU_VPA(r4)
567 cmpdi r3, 0
568 beq 25f
569 lwz r5, LPPACA_YIELDCOUNT(r3)
570 addi r5, r5, 1
571 stw r5, LPPACA_YIELDCOUNT(r3)
572 li r6, 1
573 stb r6, VCPU_VPA_DIRTY(r4)
57425:
575
576BEGIN_FTR_SECTION
577 /* Save purr/spurr */
578 mfspr r5,SPRN_PURR
579 mfspr r6,SPRN_SPURR
580 std r5,HSTATE_PURR(r13)
581 std r6,HSTATE_SPURR(r13)
582 ld r7,VCPU_PURR(r4)
583 ld r8,VCPU_SPURR(r4)
584 mtspr SPRN_PURR,r7
585 mtspr SPRN_SPURR,r8
586END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
587
588BEGIN_FTR_SECTION
589 /* Set partition DABR */
590 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
591 lwz r5,VCPU_DABRX(r4)
592 ld r6,VCPU_DABR(r4)
593 mtspr SPRN_DABRX,r5
594 mtspr SPRN_DABR,r6
595 BEGIN_FTR_SECTION_NESTED(89)
596 isync
597 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
598END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
599
600 /* Load guest PMU registers */
601 /* R4 is live here (vcpu pointer) */
602 li r3, 1
603 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
604 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
605 isync
606 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
607 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
608 lwz r6, VCPU_PMC + 8(r4)
609 lwz r7, VCPU_PMC + 12(r4)
610 lwz r8, VCPU_PMC + 16(r4)
611 lwz r9, VCPU_PMC + 20(r4)
612BEGIN_FTR_SECTION
613 lwz r10, VCPU_PMC + 24(r4)
614 lwz r11, VCPU_PMC + 28(r4)
615END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
616 mtspr SPRN_PMC1, r3
617 mtspr SPRN_PMC2, r5
618 mtspr SPRN_PMC3, r6
619 mtspr SPRN_PMC4, r7
620 mtspr SPRN_PMC5, r8
621 mtspr SPRN_PMC6, r9
622BEGIN_FTR_SECTION
623 mtspr SPRN_PMC7, r10
624 mtspr SPRN_PMC8, r11
625END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
626 ld r3, VCPU_MMCR(r4)
627 ld r5, VCPU_MMCR + 8(r4)
628 ld r6, VCPU_MMCR + 16(r4)
629 ld r7, VCPU_SIAR(r4)
630 ld r8, VCPU_SDAR(r4)
631 mtspr SPRN_MMCR1, r5
632 mtspr SPRN_MMCRA, r6
633 mtspr SPRN_SIAR, r7
634 mtspr SPRN_SDAR, r8
635BEGIN_FTR_SECTION
636 ld r5, VCPU_MMCR + 24(r4)
637 ld r6, VCPU_SIER(r4)
638 lwz r7, VCPU_PMC + 24(r4)
639 lwz r8, VCPU_PMC + 28(r4)
640 ld r9, VCPU_MMCR + 32(r4)
641 mtspr SPRN_MMCR2, r5
642 mtspr SPRN_SIER, r6
643 mtspr SPRN_SPMC1, r7
644 mtspr SPRN_SPMC2, r8
645 mtspr SPRN_MMCRS, r9
646END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
647 mtspr SPRN_MMCR0, r3
648 isync
649
650 /* Load up FP, VMX and VSX registers */
651 bl kvmppc_load_fp
652
653 ld r14, VCPU_GPR(R14)(r4)
654 ld r15, VCPU_GPR(R15)(r4)
655 ld r16, VCPU_GPR(R16)(r4)
656 ld r17, VCPU_GPR(R17)(r4)
657 ld r18, VCPU_GPR(R18)(r4)
658 ld r19, VCPU_GPR(R19)(r4)
659 ld r20, VCPU_GPR(R20)(r4)
660 ld r21, VCPU_GPR(R21)(r4)
661 ld r22, VCPU_GPR(R22)(r4)
662 ld r23, VCPU_GPR(R23)(r4)
663 ld r24, VCPU_GPR(R24)(r4)
664 ld r25, VCPU_GPR(R25)(r4)
665 ld r26, VCPU_GPR(R26)(r4)
666 ld r27, VCPU_GPR(R27)(r4)
667 ld r28, VCPU_GPR(R28)(r4)
668 ld r29, VCPU_GPR(R29)(r4)
669 ld r30, VCPU_GPR(R30)(r4)
670 ld r31, VCPU_GPR(R31)(r4)
671
672BEGIN_FTR_SECTION
673 /* Switch DSCR to guest value */
674 ld r5, VCPU_DSCR(r4)
675 mtspr SPRN_DSCR, r5
676END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
677
678BEGIN_FTR_SECTION
679 /* Skip next section on POWER7 or PPC970 */
680 b 8f
681END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
682 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
683 mfmsr r8
684 li r0, 1
685 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
686 mtmsrd r8
687
688 /* Load up POWER8-specific registers */
689 ld r5, VCPU_IAMR(r4)
690 lwz r6, VCPU_PSPB(r4)
691 ld r7, VCPU_FSCR(r4)
692 mtspr SPRN_IAMR, r5
693 mtspr SPRN_PSPB, r6
694 mtspr SPRN_FSCR, r7
695 ld r5, VCPU_DAWR(r4)
696 ld r6, VCPU_DAWRX(r4)
697 ld r7, VCPU_CIABR(r4)
698 ld r8, VCPU_TAR(r4)
699 mtspr SPRN_DAWR, r5
700 mtspr SPRN_DAWRX, r6
701 mtspr SPRN_CIABR, r7
702 mtspr SPRN_TAR, r8
703 ld r5, VCPU_IC(r4)
704 ld r6, VCPU_VTB(r4)
705 mtspr SPRN_IC, r5
706 mtspr SPRN_VTB, r6
707#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
708 ld r5, VCPU_TFHAR(r4)
709 ld r6, VCPU_TFIAR(r4)
710 ld r7, VCPU_TEXASR(r4)
711 mtspr SPRN_TFHAR, r5
712 mtspr SPRN_TFIAR, r6
713 mtspr SPRN_TEXASR, r7
714#endif
715 ld r8, VCPU_EBBHR(r4)
716 mtspr SPRN_EBBHR, r8
717 ld r5, VCPU_EBBRR(r4)
718 ld r6, VCPU_BESCR(r4)
719 ld r7, VCPU_CSIGR(r4)
720 ld r8, VCPU_TACR(r4)
721 mtspr SPRN_EBBRR, r5
722 mtspr SPRN_BESCR, r6
723 mtspr SPRN_CSIGR, r7
724 mtspr SPRN_TACR, r8
725 ld r5, VCPU_TCSCR(r4)
726 ld r6, VCPU_ACOP(r4)
727 lwz r7, VCPU_GUEST_PID(r4)
728 ld r8, VCPU_WORT(r4)
729 mtspr SPRN_TCSCR, r5
730 mtspr SPRN_ACOP, r6
731 mtspr SPRN_PID, r7
732 mtspr SPRN_WORT, r8
7338:
734
735 /*
736 * Set the decrementer to the guest decrementer.
737 */
738 ld r8,VCPU_DEC_EXPIRES(r4)
739 mftb r7
740 subf r3,r7,r8
741 mtspr SPRN_DEC,r3
742 stw r3,VCPU_DEC(r4)
743
744 ld r5, VCPU_SPRG0(r4)
745 ld r6, VCPU_SPRG1(r4)
746 ld r7, VCPU_SPRG2(r4)
747 ld r8, VCPU_SPRG3(r4)
748 mtspr SPRN_SPRG0, r5
749 mtspr SPRN_SPRG1, r6
750 mtspr SPRN_SPRG2, r7
751 mtspr SPRN_SPRG3, r8
752
753 /* Load up DAR and DSISR */
754 ld r5, VCPU_DAR(r4)
755 lwz r6, VCPU_DSISR(r4)
756 mtspr SPRN_DAR, r5
757 mtspr SPRN_DSISR, r6
758
759BEGIN_FTR_SECTION
760 /* Restore AMR and UAMOR, set AMOR to all 1s */
761 ld r5,VCPU_AMR(r4)
762 ld r6,VCPU_UAMOR(r4)
763 li r7,-1
764 mtspr SPRN_AMR,r5
765 mtspr SPRN_UAMOR,r6
766 mtspr SPRN_AMOR,r7
767END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
635 768
636 /* Restore state of CTRL run bit; assume 1 on entry */ 769 /* Restore state of CTRL run bit; assume 1 on entry */
637 lwz r5,VCPU_CTRL(r4) 770 lwz r5,VCPU_CTRL(r4)
@@ -647,48 +780,53 @@ toc_tlbie_lock:
647 mtctr r6 780 mtctr r6
648 mtxer r7 781 mtxer r7
649 782
783kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
650 ld r10, VCPU_PC(r4) 784 ld r10, VCPU_PC(r4)
651 ld r11, VCPU_MSR(r4) 785 ld r11, VCPU_MSR(r4)
652kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
653 ld r6, VCPU_SRR0(r4) 786 ld r6, VCPU_SRR0(r4)
654 ld r7, VCPU_SRR1(r4) 787 ld r7, VCPU_SRR1(r4)
788 mtspr SPRN_SRR0, r6
789 mtspr SPRN_SRR1, r7
655 790
791deliver_guest_interrupt:
656 /* r11 = vcpu->arch.msr & ~MSR_HV */ 792 /* r11 = vcpu->arch.msr & ~MSR_HV */
657 rldicl r11, r11, 63 - MSR_HV_LG, 1 793 rldicl r11, r11, 63 - MSR_HV_LG, 1
658 rotldi r11, r11, 1 + MSR_HV_LG 794 rotldi r11, r11, 1 + MSR_HV_LG
659 ori r11, r11, MSR_ME 795 ori r11, r11, MSR_ME
660 796
661 /* Check if we can deliver an external or decrementer interrupt now */ 797 /* Check if we can deliver an external or decrementer interrupt now */
662 ld r0,VCPU_PENDING_EXC(r4) 798 ld r0, VCPU_PENDING_EXC(r4)
663 lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h 799 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
664 and r0,r0,r8 800 cmpdi cr1, r0, 0
665 cmpdi cr1,r0,0 801 andi. r8, r11, MSR_EE
666 andi. r0,r11,MSR_EE
667 beq cr1,11f
668BEGIN_FTR_SECTION 802BEGIN_FTR_SECTION
669 mfspr r8,SPRN_LPCR 803 mfspr r8, SPRN_LPCR
670 ori r8,r8,LPCR_MER 804 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
671 mtspr SPRN_LPCR,r8 805 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
806 mtspr SPRN_LPCR, r8
672 isync 807 isync
673END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 808END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
674 beq 5f 809 beq 5f
675 li r0,BOOK3S_INTERRUPT_EXTERNAL 810 li r0, BOOK3S_INTERRUPT_EXTERNAL
67612: mr r6,r10 811 bne cr1, 12f
677 mr r10,r0 812 mfspr r0, SPRN_DEC
678 mr r7,r11 813 cmpwi r0, 0
679 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 814 li r0, BOOK3S_INTERRUPT_DECREMENTER
680 rotldi r11,r11,63 815 bge 5f
681 b 5f
68211: beq 5f
683 mfspr r0,SPRN_DEC
684 cmpwi r0,0
685 li r0,BOOK3S_INTERRUPT_DECREMENTER
686 blt 12b
687 816
688 /* Move SRR0 and SRR1 into the respective regs */ 81712: mtspr SPRN_SRR0, r10
6895: mtspr SPRN_SRR0, r6 818 mr r10,r0
690 mtspr SPRN_SRR1, r7 819 mtspr SPRN_SRR1, r11
820 ld r11, VCPU_INTR_MSR(r4)
8215:
691 822
823/*
824 * Required state:
825 * R4 = vcpu
826 * R10: value for HSRR0
827 * R11: value for HSRR1
828 * R13 = PACA
829 */
692fast_guest_return: 830fast_guest_return:
693 li r0,0 831 li r0,0
694 stb r0,VCPU_CEDED(r4) /* cancel cede */ 832 stb r0,VCPU_CEDED(r4) /* cancel cede */
@@ -868,39 +1006,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
868 /* External interrupt, first check for host_ipi. If this is 1006 /* External interrupt, first check for host_ipi. If this is
869 * set, we know the host wants us out so let's do it now 1007 * set, we know the host wants us out so let's do it now
870 */ 1008 */
871do_ext_interrupt:
872 bl kvmppc_read_intr 1009 bl kvmppc_read_intr
873 cmpdi r3, 0 1010 cmpdi r3, 0
874 bgt ext_interrupt_to_host 1011 bgt ext_interrupt_to_host
875 1012
876 /* Allright, looks like an IPI for the guest, we need to set MER */
877 /* Check if any CPU is heading out to the host, if so head out too */ 1013 /* Check if any CPU is heading out to the host, if so head out too */
878 ld r5, HSTATE_KVM_VCORE(r13) 1014 ld r5, HSTATE_KVM_VCORE(r13)
879 lwz r0, VCORE_ENTRY_EXIT(r5) 1015 lwz r0, VCORE_ENTRY_EXIT(r5)
880 cmpwi r0, 0x100 1016 cmpwi r0, 0x100
881 bge ext_interrupt_to_host 1017 bge ext_interrupt_to_host
882 1018
883 /* See if there is a pending interrupt for the guest */ 1019 /* Return to guest after delivering any pending interrupt */
884 mfspr r8, SPRN_LPCR 1020 mr r4, r9
885 ld r0, VCPU_PENDING_EXC(r9) 1021 b deliver_guest_interrupt
886 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
887 rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
888 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
889 beq 2f
890
891 /* And if the guest EE is set, we can deliver immediately, else
892 * we return to the guest with MER set
893 */
894 andi. r0, r11, MSR_EE
895 beq 2f
896 mtspr SPRN_SRR0, r10
897 mtspr SPRN_SRR1, r11
898 li r10, BOOK3S_INTERRUPT_EXTERNAL
899 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
900 rotldi r11, r11, 63
9012: mr r4, r9
902 mtspr SPRN_LPCR, r8
903 b fast_guest_return
904 1022
905ext_interrupt_to_host: 1023ext_interrupt_to_host:
906 1024
@@ -975,13 +1093,194 @@ BEGIN_FTR_SECTION
975 mtspr SPRN_SPURR,r4 1093 mtspr SPRN_SPURR,r4
976END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 1094END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
977 1095
1096 /* Save DEC */
1097 mfspr r5,SPRN_DEC
1098 mftb r6
1099 extsw r5,r5
1100 add r5,r5,r6
1101 std r5,VCPU_DEC_EXPIRES(r9)
1102
1103BEGIN_FTR_SECTION
1104 b 8f
1105END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1106 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1107 mfmsr r8
1108 li r0, 1
1109 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1110 mtmsrd r8
1111
1112 /* Save POWER8-specific registers */
1113 mfspr r5, SPRN_IAMR
1114 mfspr r6, SPRN_PSPB
1115 mfspr r7, SPRN_FSCR
1116 std r5, VCPU_IAMR(r9)
1117 stw r6, VCPU_PSPB(r9)
1118 std r7, VCPU_FSCR(r9)
1119 mfspr r5, SPRN_IC
1120 mfspr r6, SPRN_VTB
1121 mfspr r7, SPRN_TAR
1122 std r5, VCPU_IC(r9)
1123 std r6, VCPU_VTB(r9)
1124 std r7, VCPU_TAR(r9)
1125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1126 mfspr r5, SPRN_TFHAR
1127 mfspr r6, SPRN_TFIAR
1128 mfspr r7, SPRN_TEXASR
1129 std r5, VCPU_TFHAR(r9)
1130 std r6, VCPU_TFIAR(r9)
1131 std r7, VCPU_TEXASR(r9)
1132#endif
1133 mfspr r8, SPRN_EBBHR
1134 std r8, VCPU_EBBHR(r9)
1135 mfspr r5, SPRN_EBBRR
1136 mfspr r6, SPRN_BESCR
1137 mfspr r7, SPRN_CSIGR
1138 mfspr r8, SPRN_TACR
1139 std r5, VCPU_EBBRR(r9)
1140 std r6, VCPU_BESCR(r9)
1141 std r7, VCPU_CSIGR(r9)
1142 std r8, VCPU_TACR(r9)
1143 mfspr r5, SPRN_TCSCR
1144 mfspr r6, SPRN_ACOP
1145 mfspr r7, SPRN_PID
1146 mfspr r8, SPRN_WORT
1147 std r5, VCPU_TCSCR(r9)
1148 std r6, VCPU_ACOP(r9)
1149 stw r7, VCPU_GUEST_PID(r9)
1150 std r8, VCPU_WORT(r9)
11518:
1152
1153 /* Save and reset AMR and UAMOR before turning on the MMU */
1154BEGIN_FTR_SECTION
1155 mfspr r5,SPRN_AMR
1156 mfspr r6,SPRN_UAMOR
1157 std r5,VCPU_AMR(r9)
1158 std r6,VCPU_UAMOR(r9)
1159 li r6,0
1160 mtspr SPRN_AMR,r6
1161END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1162
1163 /* Switch DSCR back to host value */
1164BEGIN_FTR_SECTION
1165 mfspr r8, SPRN_DSCR
1166 ld r7, HSTATE_DSCR(r13)
1167 std r8, VCPU_DSCR(r9)
1168 mtspr SPRN_DSCR, r7
1169END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1170
1171 /* Save non-volatile GPRs */
1172 std r14, VCPU_GPR(R14)(r9)
1173 std r15, VCPU_GPR(R15)(r9)
1174 std r16, VCPU_GPR(R16)(r9)
1175 std r17, VCPU_GPR(R17)(r9)
1176 std r18, VCPU_GPR(R18)(r9)
1177 std r19, VCPU_GPR(R19)(r9)
1178 std r20, VCPU_GPR(R20)(r9)
1179 std r21, VCPU_GPR(R21)(r9)
1180 std r22, VCPU_GPR(R22)(r9)
1181 std r23, VCPU_GPR(R23)(r9)
1182 std r24, VCPU_GPR(R24)(r9)
1183 std r25, VCPU_GPR(R25)(r9)
1184 std r26, VCPU_GPR(R26)(r9)
1185 std r27, VCPU_GPR(R27)(r9)
1186 std r28, VCPU_GPR(R28)(r9)
1187 std r29, VCPU_GPR(R29)(r9)
1188 std r30, VCPU_GPR(R30)(r9)
1189 std r31, VCPU_GPR(R31)(r9)
1190
1191 /* Save SPRGs */
1192 mfspr r3, SPRN_SPRG0
1193 mfspr r4, SPRN_SPRG1
1194 mfspr r5, SPRN_SPRG2
1195 mfspr r6, SPRN_SPRG3
1196 std r3, VCPU_SPRG0(r9)
1197 std r4, VCPU_SPRG1(r9)
1198 std r5, VCPU_SPRG2(r9)
1199 std r6, VCPU_SPRG3(r9)
1200
1201 /* save FP state */
1202 mr r3, r9
1203 bl kvmppc_save_fp
1204
1205 /* Increment yield count if they have a VPA */
1206 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1207 cmpdi r8, 0
1208 beq 25f
1209 lwz r3, LPPACA_YIELDCOUNT(r8)
1210 addi r3, r3, 1
1211 stw r3, LPPACA_YIELDCOUNT(r8)
1212 li r3, 1
1213 stb r3, VCPU_VPA_DIRTY(r9)
121425:
1215 /* Save PMU registers if requested */
1216 /* r8 and cr0.eq are live here */
1217 li r3, 1
1218 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1219 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1220 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1221 mfspr r6, SPRN_MMCRA
1222BEGIN_FTR_SECTION
1223 /* On P7, clear MMCRA in order to disable SDAR updates */
1224 li r7, 0
1225 mtspr SPRN_MMCRA, r7
1226END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1227 isync
1228 beq 21f /* if no VPA, save PMU stuff anyway */
1229 lbz r7, LPPACA_PMCINUSE(r8)
1230 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1231 bne 21f
1232 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1233 b 22f
123421: mfspr r5, SPRN_MMCR1
1235 mfspr r7, SPRN_SIAR
1236 mfspr r8, SPRN_SDAR
1237 std r4, VCPU_MMCR(r9)
1238 std r5, VCPU_MMCR + 8(r9)
1239 std r6, VCPU_MMCR + 16(r9)
1240 std r7, VCPU_SIAR(r9)
1241 std r8, VCPU_SDAR(r9)
1242 mfspr r3, SPRN_PMC1
1243 mfspr r4, SPRN_PMC2
1244 mfspr r5, SPRN_PMC3
1245 mfspr r6, SPRN_PMC4
1246 mfspr r7, SPRN_PMC5
1247 mfspr r8, SPRN_PMC6
1248BEGIN_FTR_SECTION
1249 mfspr r10, SPRN_PMC7
1250 mfspr r11, SPRN_PMC8
1251END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1252 stw r3, VCPU_PMC(r9)
1253 stw r4, VCPU_PMC + 4(r9)
1254 stw r5, VCPU_PMC + 8(r9)
1255 stw r6, VCPU_PMC + 12(r9)
1256 stw r7, VCPU_PMC + 16(r9)
1257 stw r8, VCPU_PMC + 20(r9)
1258BEGIN_FTR_SECTION
1259 stw r10, VCPU_PMC + 24(r9)
1260 stw r11, VCPU_PMC + 28(r9)
1261END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1262BEGIN_FTR_SECTION
1263 mfspr r4, SPRN_MMCR2
1264 mfspr r5, SPRN_SIER
1265 mfspr r6, SPRN_SPMC1
1266 mfspr r7, SPRN_SPMC2
1267 mfspr r8, SPRN_MMCRS
1268 std r4, VCPU_MMCR + 24(r9)
1269 std r5, VCPU_SIER(r9)
1270 stw r6, VCPU_PMC + 24(r9)
1271 stw r7, VCPU_PMC + 28(r9)
1272 std r8, VCPU_MMCR + 32(r9)
1273 lis r4, 0x8000
1274 mtspr SPRN_MMCRS, r4
1275END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
127622:
978 /* Clear out SLB */ 1277 /* Clear out SLB */
979 li r5,0 1278 li r5,0
980 slbmte r5,r5 1279 slbmte r5,r5
981 slbia 1280 slbia
982 ptesync 1281 ptesync
983 1282
984hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ 1283hdec_soon: /* r12 = trap, r13 = paca */
985BEGIN_FTR_SECTION 1284BEGIN_FTR_SECTION
986 b 32f 1285 b 32f
987END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1286END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
@@ -1014,8 +1313,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1014 */ 1313 */
1015 cmpwi r3,0x100 /* Are we the first here? */ 1314 cmpwi r3,0x100 /* Are we the first here? */
1016 bge 43f 1315 bge 43f
1017 cmpwi r3,1 /* Are any other threads in the guest? */
1018 ble 43f
1019 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1316 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1020 beq 40f 1317 beq 40f
1021 li r0,0 1318 li r0,0
@@ -1026,7 +1323,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1026 * doesn't wake CPUs up from nap. 1323 * doesn't wake CPUs up from nap.
1027 */ 1324 */
1028 lwz r3,VCORE_NAPPING_THREADS(r5) 1325 lwz r3,VCORE_NAPPING_THREADS(r5)
1029 lwz r4,VCPU_PTID(r9) 1326 lbz r4,HSTATE_PTID(r13)
1030 li r0,1 1327 li r0,1
1031 sld r0,r0,r4 1328 sld r0,r0,r4
1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1329 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
@@ -1045,10 +1342,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1045 addi r6,r6,PACA_SIZE 1342 addi r6,r6,PACA_SIZE
1046 bne 42b 1343 bne 42b
1047 1344
1345secondary_too_late:
1048 /* Secondary threads wait for primary to do partition switch */ 1346 /* Secondary threads wait for primary to do partition switch */
104943: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 134743: ld r5,HSTATE_KVM_VCORE(r13)
1050 ld r5,HSTATE_KVM_VCORE(r13) 1348 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1051 lwz r3,VCPU_PTID(r9) 1349 lbz r3,HSTATE_PTID(r13)
1052 cmpwi r3,0 1350 cmpwi r3,0
1053 beq 15f 1351 beq 15f
1054 HMT_LOW 1352 HMT_LOW
@@ -1076,6 +1374,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1076 mtspr SPRN_LPID,r7 1374 mtspr SPRN_LPID,r7
1077 isync 1375 isync
1078 1376
1377BEGIN_FTR_SECTION
1378 /* DPDES is shared between threads */
1379 mfspr r7, SPRN_DPDES
1380 std r7, VCORE_DPDES(r5)
1381 /* clear DPDES so we don't get guest doorbells in the host */
1382 li r8, 0
1383 mtspr SPRN_DPDES, r8
1384END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1385
1079 /* Subtract timebase offset from timebase */ 1386 /* Subtract timebase offset from timebase */
1080 ld r8,VCORE_TB_OFFSET(r5) 1387 ld r8,VCORE_TB_OFFSET(r5)
1081 cmpdi r8,0 1388 cmpdi r8,0
@@ -1113,7 +1420,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1113 * We have to lock against concurrent tlbies, and 1420 * We have to lock against concurrent tlbies, and
1114 * we have to flush the whole TLB. 1421 * we have to flush the whole TLB.
1115 */ 1422 */
111632: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 142332: ld r5,HSTATE_KVM_VCORE(r13)
1424 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1117 1425
1118 /* Take the guest's tlbie_lock */ 1426 /* Take the guest's tlbie_lock */
1119#ifdef __BIG_ENDIAN__ 1427#ifdef __BIG_ENDIAN__
@@ -1203,6 +1511,56 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1203 add r5,r5,r6 1511 add r5,r5,r6
1204 std r5,VCPU_DEC_EXPIRES(r9) 1512 std r5,VCPU_DEC_EXPIRES(r9)
1205 1513
1514BEGIN_FTR_SECTION
1515 b 8f
1516END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1517 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1518 mfmsr r8
1519 li r0, 1
1520 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1521 mtmsrd r8
1522
1523 /* Save POWER8-specific registers */
1524 mfspr r5, SPRN_IAMR
1525 mfspr r6, SPRN_PSPB
1526 mfspr r7, SPRN_FSCR
1527 std r5, VCPU_IAMR(r9)
1528 stw r6, VCPU_PSPB(r9)
1529 std r7, VCPU_FSCR(r9)
1530 mfspr r5, SPRN_IC
1531 mfspr r6, SPRN_VTB
1532 mfspr r7, SPRN_TAR
1533 std r5, VCPU_IC(r9)
1534 std r6, VCPU_VTB(r9)
1535 std r7, VCPU_TAR(r9)
1536#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1537 mfspr r5, SPRN_TFHAR
1538 mfspr r6, SPRN_TFIAR
1539 mfspr r7, SPRN_TEXASR
1540 std r5, VCPU_TFHAR(r9)
1541 std r6, VCPU_TFIAR(r9)
1542 std r7, VCPU_TEXASR(r9)
1543#endif
1544 mfspr r8, SPRN_EBBHR
1545 std r8, VCPU_EBBHR(r9)
1546 mfspr r5, SPRN_EBBRR
1547 mfspr r6, SPRN_BESCR
1548 mfspr r7, SPRN_CSIGR
1549 mfspr r8, SPRN_TACR
1550 std r5, VCPU_EBBRR(r9)
1551 std r6, VCPU_BESCR(r9)
1552 std r7, VCPU_CSIGR(r9)
1553 std r8, VCPU_TACR(r9)
1554 mfspr r5, SPRN_TCSCR
1555 mfspr r6, SPRN_ACOP
1556 mfspr r7, SPRN_PID
1557 mfspr r8, SPRN_WORT
1558 std r5, VCPU_TCSCR(r9)
1559 std r6, VCPU_ACOP(r9)
1560 stw r7, VCPU_GUEST_PID(r9)
1561 std r8, VCPU_WORT(r9)
15628:
1563
1206 /* Save and reset AMR and UAMOR before turning on the MMU */ 1564 /* Save and reset AMR and UAMOR before turning on the MMU */
1207BEGIN_FTR_SECTION 1565BEGIN_FTR_SECTION
1208 mfspr r5,SPRN_AMR 1566 mfspr r5,SPRN_AMR
@@ -1217,130 +1575,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1217 li r0, KVM_GUEST_MODE_NONE 1575 li r0, KVM_GUEST_MODE_NONE
1218 stb r0, HSTATE_IN_GUEST(r13) 1576 stb r0, HSTATE_IN_GUEST(r13)
1219 1577
1220 /* Switch DSCR back to host value */
1221BEGIN_FTR_SECTION
1222 mfspr r8, SPRN_DSCR
1223 ld r7, HSTATE_DSCR(r13)
1224 std r8, VCPU_DSCR(r9)
1225 mtspr SPRN_DSCR, r7
1226END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1227
1228 /* Save non-volatile GPRs */
1229 std r14, VCPU_GPR(R14)(r9)
1230 std r15, VCPU_GPR(R15)(r9)
1231 std r16, VCPU_GPR(R16)(r9)
1232 std r17, VCPU_GPR(R17)(r9)
1233 std r18, VCPU_GPR(R18)(r9)
1234 std r19, VCPU_GPR(R19)(r9)
1235 std r20, VCPU_GPR(R20)(r9)
1236 std r21, VCPU_GPR(R21)(r9)
1237 std r22, VCPU_GPR(R22)(r9)
1238 std r23, VCPU_GPR(R23)(r9)
1239 std r24, VCPU_GPR(R24)(r9)
1240 std r25, VCPU_GPR(R25)(r9)
1241 std r26, VCPU_GPR(R26)(r9)
1242 std r27, VCPU_GPR(R27)(r9)
1243 std r28, VCPU_GPR(R28)(r9)
1244 std r29, VCPU_GPR(R29)(r9)
1245 std r30, VCPU_GPR(R30)(r9)
1246 std r31, VCPU_GPR(R31)(r9)
1247
1248 /* Save SPRGs */
1249 mfspr r3, SPRN_SPRG0
1250 mfspr r4, SPRN_SPRG1
1251 mfspr r5, SPRN_SPRG2
1252 mfspr r6, SPRN_SPRG3
1253 std r3, VCPU_SPRG0(r9)
1254 std r4, VCPU_SPRG1(r9)
1255 std r5, VCPU_SPRG2(r9)
1256 std r6, VCPU_SPRG3(r9)
1257
1258 /* save FP state */
1259 mr r3, r9
1260 bl .kvmppc_save_fp
1261
1262 /* Increment yield count if they have a VPA */
1263 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1264 cmpdi r8, 0
1265 beq 25f
1266 lwz r3, LPPACA_YIELDCOUNT(r8)
1267 addi r3, r3, 1
1268 stw r3, LPPACA_YIELDCOUNT(r8)
1269 li r3, 1
1270 stb r3, VCPU_VPA_DIRTY(r9)
127125:
1272 /* Save PMU registers if requested */
1273 /* r8 and cr0.eq are live here */
1274 li r3, 1
1275 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1276 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1277 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1278 mfspr r6, SPRN_MMCRA
1279BEGIN_FTR_SECTION
1280 /* On P7, clear MMCRA in order to disable SDAR updates */
1281 li r7, 0
1282 mtspr SPRN_MMCRA, r7
1283END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1284 isync
1285 beq 21f /* if no VPA, save PMU stuff anyway */
1286 lbz r7, LPPACA_PMCINUSE(r8)
1287 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1288 bne 21f
1289 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1290 b 22f
129121: mfspr r5, SPRN_MMCR1
1292 mfspr r7, SPRN_SIAR
1293 mfspr r8, SPRN_SDAR
1294 std r4, VCPU_MMCR(r9)
1295 std r5, VCPU_MMCR + 8(r9)
1296 std r6, VCPU_MMCR + 16(r9)
1297 std r7, VCPU_SIAR(r9)
1298 std r8, VCPU_SDAR(r9)
1299 mfspr r3, SPRN_PMC1
1300 mfspr r4, SPRN_PMC2
1301 mfspr r5, SPRN_PMC3
1302 mfspr r6, SPRN_PMC4
1303 mfspr r7, SPRN_PMC5
1304 mfspr r8, SPRN_PMC6
1305BEGIN_FTR_SECTION
1306 mfspr r10, SPRN_PMC7
1307 mfspr r11, SPRN_PMC8
1308END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1309 stw r3, VCPU_PMC(r9)
1310 stw r4, VCPU_PMC + 4(r9)
1311 stw r5, VCPU_PMC + 8(r9)
1312 stw r6, VCPU_PMC + 12(r9)
1313 stw r7, VCPU_PMC + 16(r9)
1314 stw r8, VCPU_PMC + 20(r9)
1315BEGIN_FTR_SECTION
1316 stw r10, VCPU_PMC + 24(r9)
1317 stw r11, VCPU_PMC + 28(r9)
1318END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
131922:
1320 ld r0, 112+PPC_LR_STKOFF(r1) 1578 ld r0, 112+PPC_LR_STKOFF(r1)
1321 addi r1, r1, 112 1579 addi r1, r1, 112
1322 mtlr r0 1580 mtlr r0
1323 blr 1581 blr
1324secondary_too_late:
1325 ld r5,HSTATE_KVM_VCORE(r13)
1326 HMT_LOW
132713: lbz r3,VCORE_IN_GUEST(r5)
1328 cmpwi r3,0
1329 bne 13b
1330 HMT_MEDIUM
1331 li r0, KVM_GUEST_MODE_NONE
1332 stb r0, HSTATE_IN_GUEST(r13)
1333 ld r11,PACA_SLBSHADOWPTR(r13)
1334
1335 .rept SLB_NUM_BOLTED
1336 ld r5,SLBSHADOW_SAVEAREA(r11)
1337 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1338 andis. r7,r5,SLB_ESID_V@h
1339 beq 1f
1340 slbmte r6,r5
13411: addi r11,r11,16
1342 .endr
1343 b 22b
1344 1582
1345/* 1583/*
1346 * Check whether an HDSI is an HPTE not found fault or something else. 1584 * Check whether an HDSI is an HPTE not found fault or something else.
@@ -1386,8 +1624,7 @@ kvmppc_hdsi:
1386 mtspr SPRN_SRR0, r10 1624 mtspr SPRN_SRR0, r10
1387 mtspr SPRN_SRR1, r11 1625 mtspr SPRN_SRR1, r11
1388 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1626 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1389 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1627 ld r11, VCPU_INTR_MSR(r9)
1390 rotldi r11, r11, 63
1391fast_interrupt_c_return: 1628fast_interrupt_c_return:
13926: ld r7, VCPU_CTR(r9) 16296: ld r7, VCPU_CTR(r9)
1393 lwz r8, VCPU_XER(r9) 1630 lwz r8, VCPU_XER(r9)
@@ -1456,8 +1693,7 @@ kvmppc_hisi:
14561: mtspr SPRN_SRR0, r10 16931: mtspr SPRN_SRR0, r10
1457 mtspr SPRN_SRR1, r11 1694 mtspr SPRN_SRR1, r11
1458 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1695 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1459 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 1696 ld r11, VCPU_INTR_MSR(r9)
1460 rotldi r11, r11, 63
1461 b fast_interrupt_c_return 1697 b fast_interrupt_c_return
1462 1698
14633: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 16993: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
@@ -1474,7 +1710,8 @@ kvmppc_hisi:
1474hcall_try_real_mode: 1710hcall_try_real_mode:
1475 ld r3,VCPU_GPR(R3)(r9) 1711 ld r3,VCPU_GPR(R3)(r9)
1476 andi. r0,r11,MSR_PR 1712 andi. r0,r11,MSR_PR
1477 bne guest_exit_cont 1713 /* sc 1 from userspace - reflect to guest syscall */
1714 bne sc_1_fast_return
1478 clrrdi r3,r3,2 1715 clrrdi r3,r3,2
1479 cmpldi r3,hcall_real_table_end - hcall_real_table 1716 cmpldi r3,hcall_real_table_end - hcall_real_table
1480 bge guest_exit_cont 1717 bge guest_exit_cont
@@ -1495,6 +1732,14 @@ hcall_try_real_mode:
1495 ld r11,VCPU_MSR(r4) 1732 ld r11,VCPU_MSR(r4)
1496 b fast_guest_return 1733 b fast_guest_return
1497 1734
1735sc_1_fast_return:
1736 mtspr SPRN_SRR0,r10
1737 mtspr SPRN_SRR1,r11
1738 li r10, BOOK3S_INTERRUPT_SYSCALL
1739 ld r11, VCPU_INTR_MSR(r9)
1740 mr r4,r9
1741 b fast_guest_return
1742
1498 /* We've attempted a real mode hcall, but it's punted it back 1743 /* We've attempted a real mode hcall, but it's punted it back
1499 * to userspace. We need to restore some clobbered volatiles 1744 * to userspace. We need to restore some clobbered volatiles
1500 * before resuming the pass-it-to-qemu path */ 1745 * before resuming the pass-it-to-qemu path */
@@ -1588,14 +1833,34 @@ hcall_real_table:
1588 .long 0 /* 0x11c */ 1833 .long 0 /* 0x11c */
1589 .long 0 /* 0x120 */ 1834 .long 0 /* 0x120 */
1590 .long .kvmppc_h_bulk_remove - hcall_real_table 1835 .long .kvmppc_h_bulk_remove - hcall_real_table
1836 .long 0 /* 0x128 */
1837 .long 0 /* 0x12c */
1838 .long 0 /* 0x130 */
1839 .long .kvmppc_h_set_xdabr - hcall_real_table
1591hcall_real_table_end: 1840hcall_real_table_end:
1592 1841
1593ignore_hdec: 1842ignore_hdec:
1594 mr r4,r9 1843 mr r4,r9
1595 b fast_guest_return 1844 b fast_guest_return
1596 1845
1846_GLOBAL(kvmppc_h_set_xdabr)
1847 andi. r0, r5, DABRX_USER | DABRX_KERNEL
1848 beq 6f
1849 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
1850 andc. r0, r5, r0
1851 beq 3f
18526: li r3, H_PARAMETER
1853 blr
1854
1597_GLOBAL(kvmppc_h_set_dabr) 1855_GLOBAL(kvmppc_h_set_dabr)
1856 li r5, DABRX_USER | DABRX_KERNEL
18573:
1858BEGIN_FTR_SECTION
1859 b 2f
1860END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1598 std r4,VCPU_DABR(r3) 1861 std r4,VCPU_DABR(r3)
1862 stw r5, VCPU_DABRX(r3)
1863 mtspr SPRN_DABRX, r5
1599 /* Work around P7 bug where DABR can get corrupted on mtspr */ 1864 /* Work around P7 bug where DABR can get corrupted on mtspr */
16001: mtspr SPRN_DABR,r4 18651: mtspr SPRN_DABR,r4
1601 mfspr r5, SPRN_DABR 1866 mfspr r5, SPRN_DABR
@@ -1605,6 +1870,17 @@ _GLOBAL(kvmppc_h_set_dabr)
1605 li r3,0 1870 li r3,0
1606 blr 1871 blr
1607 1872
1873 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
18742: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
1875 rlwimi r5, r4, 1, DAWRX_WT
1876 clrrdi r4, r4, 3
1877 std r4, VCPU_DAWR(r3)
1878 std r5, VCPU_DAWRX(r3)
1879 mtspr SPRN_DAWR, r4
1880 mtspr SPRN_DAWRX, r5
1881 li r3, 0
1882 blr
1883
1608_GLOBAL(kvmppc_h_cede) 1884_GLOBAL(kvmppc_h_cede)
1609 ori r11,r11,MSR_EE 1885 ori r11,r11,MSR_EE
1610 std r11,VCPU_MSR(r3) 1886 std r11,VCPU_MSR(r3)
@@ -1628,7 +1904,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1628 * up to the host. 1904 * up to the host.
1629 */ 1905 */
1630 ld r5,HSTATE_KVM_VCORE(r13) 1906 ld r5,HSTATE_KVM_VCORE(r13)
1631 lwz r6,VCPU_PTID(r3) 1907 lbz r6,HSTATE_PTID(r13)
1632 lwz r8,VCORE_ENTRY_EXIT(r5) 1908 lwz r8,VCORE_ENTRY_EXIT(r5)
1633 clrldi r8,r8,56 1909 clrldi r8,r8,56
1634 li r0,1 1910 li r0,1
@@ -1643,9 +1919,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1643 bne 31b 1919 bne 31b
1644 /* order napping_threads update vs testing entry_exit_count */ 1920 /* order napping_threads update vs testing entry_exit_count */
1645 isync 1921 isync
1646 li r0,1 1922 li r0,NAPPING_CEDE
1647 stb r0,HSTATE_NAPPING(r13) 1923 stb r0,HSTATE_NAPPING(r13)
1648 mr r4,r3
1649 lwz r7,VCORE_ENTRY_EXIT(r5) 1924 lwz r7,VCORE_ENTRY_EXIT(r5)
1650 cmpwi r7,0x100 1925 cmpwi r7,0x100
1651 bge 33f /* another thread already exiting */ 1926 bge 33f /* another thread already exiting */
@@ -1677,16 +1952,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1677 std r31, VCPU_GPR(R31)(r3) 1952 std r31, VCPU_GPR(R31)(r3)
1678 1953
1679 /* save FP state */ 1954 /* save FP state */
1680 bl .kvmppc_save_fp 1955 bl kvmppc_save_fp
1681 1956
1682 /* 1957 /*
1683 * Take a nap until a decrementer or external interrupt occurs, 1958 * Take a nap until a decrementer or external or doobell interrupt
1684 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR 1959 * occurs, with PECE1, PECE0 and PECEDP set in LPCR
1685 */ 1960 */
1686 li r0,1 1961 li r0,1
1687 stb r0,HSTATE_HWTHREAD_REQ(r13) 1962 stb r0,HSTATE_HWTHREAD_REQ(r13)
1688 mfspr r5,SPRN_LPCR 1963 mfspr r5,SPRN_LPCR
1689 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1964 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1965BEGIN_FTR_SECTION
1966 oris r5,r5,LPCR_PECEDP@h
1967END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1690 mtspr SPRN_LPCR,r5 1968 mtspr SPRN_LPCR,r5
1691 isync 1969 isync
1692 li r0, 0 1970 li r0, 0
@@ -1698,6 +1976,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1698 nap 1976 nap
1699 b . 1977 b .
1700 1978
197933: mr r4, r3
1980 li r3, 0
1981 li r12, 0
1982 b 34f
1983
1701kvm_end_cede: 1984kvm_end_cede:
1702 /* get vcpu pointer */ 1985 /* get vcpu pointer */
1703 ld r4, HSTATE_KVM_VCPU(r13) 1986 ld r4, HSTATE_KVM_VCPU(r13)
@@ -1727,12 +2010,15 @@ kvm_end_cede:
1727 ld r29, VCPU_GPR(R29)(r4) 2010 ld r29, VCPU_GPR(R29)(r4)
1728 ld r30, VCPU_GPR(R30)(r4) 2011 ld r30, VCPU_GPR(R30)(r4)
1729 ld r31, VCPU_GPR(R31)(r4) 2012 ld r31, VCPU_GPR(R31)(r4)
2013
2014 /* Check the wake reason in SRR1 to see why we got here */
2015 bl kvmppc_check_wake_reason
1730 2016
1731 /* clear our bit in vcore->napping_threads */ 2017 /* clear our bit in vcore->napping_threads */
173233: ld r5,HSTATE_KVM_VCORE(r13) 201834: ld r5,HSTATE_KVM_VCORE(r13)
1733 lwz r3,VCPU_PTID(r4) 2019 lbz r7,HSTATE_PTID(r13)
1734 li r0,1 2020 li r0,1
1735 sld r0,r0,r3 2021 sld r0,r0,r7
1736 addi r6,r5,VCORE_NAPPING_THREADS 2022 addi r6,r5,VCORE_NAPPING_THREADS
173732: lwarx r7,0,r6 202332: lwarx r7,0,r6
1738 andc r7,r7,r0 2024 andc r7,r7,r0
@@ -1741,23 +2027,18 @@ kvm_end_cede:
1741 li r0,0 2027 li r0,0
1742 stb r0,HSTATE_NAPPING(r13) 2028 stb r0,HSTATE_NAPPING(r13)
1743 2029
1744 /* Check the wake reason in SRR1 to see why we got here */ 2030 /* See if the wake reason means we need to exit */
1745 mfspr r3, SPRN_SRR1 2031 stw r12, VCPU_TRAP(r4)
1746 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
1747 cmpwi r3, 4 /* was it an external interrupt? */
1748 li r12, BOOK3S_INTERRUPT_EXTERNAL
1749 mr r9, r4 2032 mr r9, r4
1750 ld r10, VCPU_PC(r9) 2033 cmpdi r3, 0
1751 ld r11, VCPU_MSR(r9) 2034 bgt guest_exit_cont
1752 beq do_ext_interrupt /* if so */
1753 2035
1754 /* see if any other thread is already exiting */ 2036 /* see if any other thread is already exiting */
1755 lwz r0,VCORE_ENTRY_EXIT(r5) 2037 lwz r0,VCORE_ENTRY_EXIT(r5)
1756 cmpwi r0,0x100 2038 cmpwi r0,0x100
1757 blt kvmppc_cede_reentry /* if not go back to guest */ 2039 bge guest_exit_cont
1758 2040
1759 /* some threads are exiting, so go to the guest exit path */ 2041 b kvmppc_cede_reentry /* if not go back to guest */
1760 b hcall_real_fallback
1761 2042
1762 /* cede when already previously prodded case */ 2043 /* cede when already previously prodded case */
1763kvm_cede_prodded: 2044kvm_cede_prodded:
@@ -1783,11 +2064,48 @@ machine_check_realmode:
1783 beq mc_cont 2064 beq mc_cont
1784 /* If not, deliver a machine check. SRR0/1 are already set */ 2065 /* If not, deliver a machine check. SRR0/1 are already set */
1785 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2066 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1786 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ 2067 ld r11, VCPU_INTR_MSR(r9)
1787 rotldi r11, r11, 63
1788 b fast_interrupt_c_return 2068 b fast_interrupt_c_return
1789 2069
1790/* 2070/*
2071 * Check the reason we woke from nap, and take appropriate action.
2072 * Returns:
2073 * 0 if nothing needs to be done
2074 * 1 if something happened that needs to be handled by the host
2075 * -1 if there was a guest wakeup (IPI)
2076 *
2077 * Also sets r12 to the interrupt vector for any interrupt that needs
2078 * to be handled now by the host (0x500 for external interrupt), or zero.
2079 */
2080kvmppc_check_wake_reason:
2081 mfspr r6, SPRN_SRR1
2082BEGIN_FTR_SECTION
2083 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2084FTR_SECTION_ELSE
2085 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2086ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2087 cmpwi r6, 8 /* was it an external interrupt? */
2088 li r12, BOOK3S_INTERRUPT_EXTERNAL
2089 beq kvmppc_read_intr /* if so, see what it was */
2090 li r3, 0
2091 li r12, 0
2092 cmpwi r6, 6 /* was it the decrementer? */
2093 beq 0f
2094BEGIN_FTR_SECTION
2095 cmpwi r6, 5 /* privileged doorbell? */
2096 beq 0f
2097 cmpwi r6, 3 /* hypervisor doorbell? */
2098 beq 3f
2099END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2100 li r3, 1 /* anything else, return 1 */
21010: blr
2102
2103 /* hypervisor doorbell */
21043: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2105 li r3, 1
2106 blr
2107
2108/*
1791 * Determine what sort of external interrupt is pending (if any). 2109 * Determine what sort of external interrupt is pending (if any).
1792 * Returns: 2110 * Returns:
1793 * 0 if no interrupt is pending 2111 * 0 if no interrupt is pending
@@ -1818,7 +2136,6 @@ kvmppc_read_intr:
1818 * interrupts directly to the guest 2136 * interrupts directly to the guest
1819 */ 2137 */
1820 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ 2138 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
1821 li r3, 1
1822 bne 42f 2139 bne 42f
1823 2140
1824 /* It's an IPI, clear the MFRR and EOI it */ 2141 /* It's an IPI, clear the MFRR and EOI it */
@@ -1844,19 +2161,25 @@ kvmppc_read_intr:
1844 * before exit, it will be picked up by the host ICP driver 2161 * before exit, it will be picked up by the host ICP driver
1845 */ 2162 */
1846 stw r0, HSTATE_SAVED_XIRR(r13) 2163 stw r0, HSTATE_SAVED_XIRR(r13)
2164 li r3, 1
1847 b 1b 2165 b 1b
1848 2166
184943: /* We raced with the host, we need to resend that IPI, bummer */ 216743: /* We raced with the host, we need to resend that IPI, bummer */
1850 li r0, IPI_PRIORITY 2168 li r0, IPI_PRIORITY
1851 stbcix r0, r6, r8 /* set the IPI */ 2169 stbcix r0, r6, r8 /* set the IPI */
1852 sync 2170 sync
2171 li r3, 1
1853 b 1b 2172 b 1b
1854 2173
1855/* 2174/*
1856 * Save away FP, VMX and VSX registers. 2175 * Save away FP, VMX and VSX registers.
1857 * r3 = vcpu pointer 2176 * r3 = vcpu pointer
2177 * N.B. r30 and r31 are volatile across this function,
2178 * thus it is not callable from C.
1858 */ 2179 */
1859_GLOBAL(kvmppc_save_fp) 2180kvmppc_save_fp:
2181 mflr r30
2182 mr r31,r3
1860 mfmsr r5 2183 mfmsr r5
1861 ori r8,r5,MSR_FP 2184 ori r8,r5,MSR_FP
1862#ifdef CONFIG_ALTIVEC 2185#ifdef CONFIG_ALTIVEC
@@ -1871,42 +2194,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1871#endif 2194#endif
1872 mtmsrd r8 2195 mtmsrd r8
1873 isync 2196 isync
1874#ifdef CONFIG_VSX 2197 addi r3,r3,VCPU_FPRS
1875BEGIN_FTR_SECTION 2198 bl .store_fp_state
1876 reg = 0
1877 .rept 32
1878 li r6,reg*16+VCPU_VSRS
1879 STXVD2X(reg,R6,R3)
1880 reg = reg + 1
1881 .endr
1882FTR_SECTION_ELSE
1883#endif
1884 reg = 0
1885 .rept 32
1886 stfd reg,reg*8+VCPU_FPRS(r3)
1887 reg = reg + 1
1888 .endr
1889#ifdef CONFIG_VSX
1890ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1891#endif
1892 mffs fr0
1893 stfd fr0,VCPU_FPSCR(r3)
1894
1895#ifdef CONFIG_ALTIVEC 2199#ifdef CONFIG_ALTIVEC
1896BEGIN_FTR_SECTION 2200BEGIN_FTR_SECTION
1897 reg = 0 2201 addi r3,r31,VCPU_VRS
1898 .rept 32 2202 bl .store_vr_state
1899 li r6,reg*16+VCPU_VRS
1900 stvx reg,r6,r3
1901 reg = reg + 1
1902 .endr
1903 mfvscr vr0
1904 li r6,VCPU_VSCR
1905 stvx vr0,r6,r3
1906END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2203END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1907#endif 2204#endif
1908 mfspr r6,SPRN_VRSAVE 2205 mfspr r6,SPRN_VRSAVE
1909 stw r6,VCPU_VRSAVE(r3) 2206 stw r6,VCPU_VRSAVE(r3)
2207 mtlr r30
1910 mtmsrd r5 2208 mtmsrd r5
1911 isync 2209 isync
1912 blr 2210 blr
@@ -1914,9 +2212,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1914/* 2212/*
1915 * Load up FP, VMX and VSX registers 2213 * Load up FP, VMX and VSX registers
1916 * r4 = vcpu pointer 2214 * r4 = vcpu pointer
2215 * N.B. r30 and r31 are volatile across this function,
2216 * thus it is not callable from C.
1917 */ 2217 */
1918 .globl kvmppc_load_fp
1919kvmppc_load_fp: 2218kvmppc_load_fp:
2219 mflr r30
2220 mr r31,r4
1920 mfmsr r9 2221 mfmsr r9
1921 ori r8,r9,MSR_FP 2222 ori r8,r9,MSR_FP
1922#ifdef CONFIG_ALTIVEC 2223#ifdef CONFIG_ALTIVEC
@@ -1931,42 +2232,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1931#endif 2232#endif
1932 mtmsrd r8 2233 mtmsrd r8
1933 isync 2234 isync
1934 lfd fr0,VCPU_FPSCR(r4) 2235 addi r3,r4,VCPU_FPRS
1935 MTFSF_L(fr0) 2236 bl .load_fp_state
1936#ifdef CONFIG_VSX
1937BEGIN_FTR_SECTION
1938 reg = 0
1939 .rept 32
1940 li r7,reg*16+VCPU_VSRS
1941 LXVD2X(reg,R7,R4)
1942 reg = reg + 1
1943 .endr
1944FTR_SECTION_ELSE
1945#endif
1946 reg = 0
1947 .rept 32
1948 lfd reg,reg*8+VCPU_FPRS(r4)
1949 reg = reg + 1
1950 .endr
1951#ifdef CONFIG_VSX
1952ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1953#endif
1954
1955#ifdef CONFIG_ALTIVEC 2237#ifdef CONFIG_ALTIVEC
1956BEGIN_FTR_SECTION 2238BEGIN_FTR_SECTION
1957 li r7,VCPU_VSCR 2239 addi r3,r31,VCPU_VRS
1958 lvx vr0,r7,r4 2240 bl .load_vr_state
1959 mtvscr vr0
1960 reg = 0
1961 .rept 32
1962 li r7,reg*16+VCPU_VRS
1963 lvx reg,r7,r4
1964 reg = reg + 1
1965 .endr
1966END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2241END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1967#endif 2242#endif
1968 lwz r7,VCPU_VRSAVE(r4) 2243 lwz r7,VCPU_VRSAVE(r4)
1969 mtspr SPRN_VRSAVE,r7 2244 mtspr SPRN_VRSAVE,r7
2245 mtlr r30
2246 mr r4,r31
1970 blr 2247 blr
1971 2248
1972/* 2249/*