aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_rmhandlers.S
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-07-23 03:42:46 -0400
committerAvi Kivity <avi@redhat.com>2011-09-25 12:52:30 -0400
commit19ccb76a1938ab364a412253daec64613acbf3df (patch)
tree42a3e3307355202fe0db48e2530bb42e43d9a035 /arch/powerpc/kvm/book3s_hv_rmhandlers.S
parent02143947603fe90237a0423d34dd8943de229f78 (diff)
KVM: PPC: Implement H_CEDE hcall for book3s_hv in real-mode code
With a KVM guest operating in SMT4 mode (i.e. 4 hardware threads per core), whenever a CPU goes idle, we have to pull all the other hardware threads in the core out of the guest, because the H_CEDE hcall is handled in the kernel. This is inefficient. This adds code to book3s_hv_rmhandlers.S to handle the H_CEDE hcall in real mode. When a guest vcpu does an H_CEDE hcall, we now only exit to the kernel if all the other vcpus in the same core are also idle. Otherwise we mark this vcpu as napping, save state that could be lost in nap mode (mainly GPRs and FPRs), and execute the nap instruction. When the thread wakes up, because of a decrementer or external interrupt, we come back in at kvm_start_guest (from the system reset interrupt vector), find the `napping' flag set in the paca, and go to the resume path. This has some other ramifications. First, when starting a core, we now start all the threads, both those that are immediately runnable and those that are idle. This is so that we don't have to pull all the threads out of the guest when an idle thread gets a decrementer interrupt and wants to start running. In fact the idle threads will all start with the H_CEDE hcall returning; being idle they will just do another H_CEDE immediately and go to nap mode. This required some changes to kvmppc_run_core() and kvmppc_run_vcpu(). These functions have been restructured to make them simpler and clearer. We introduce a level of indirection in the wait queue that gets woken when external and decrementer interrupts get generated for a vcpu, so that we can have the 4 vcpus in a vcore using the same wait queue. We need this because the 4 vcpus are being handled by one thread. Secondly, when we need to exit from the guest to the kernel, we now have to generate an IPI for any napping threads, because an HDEC interrupt doesn't wake up a napping thread. Thirdly, we now need to be able to handle virtual external interrupts and decrementer interrupts becoming pending while a thread is napping, and deliver those interrupts to the guest when the thread wakes. This is done in kvmppc_cede_reentry, just before fast_guest_return. Finally, since we are not using the generic kvm_vcpu_block for book3s_hv, and hence not calling kvm_arch_vcpu_runnable, we can remove the #ifdef from kvm_arch_vcpu_runnable. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rmhandlers.S')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S297
1 files changed, 263 insertions, 34 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bc6ade933089..f422231d9235 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -52,7 +52,7 @@ kvmppc_skip_Hinterrupt:
52 b . 52 b .
53 53
54/* 54/*
55 * Call kvmppc_handler_trampoline_enter in real mode. 55 * Call kvmppc_hv_entry in real mode.
56 * Must be called with interrupts hard-disabled. 56 * Must be called with interrupts hard-disabled.
57 * 57 *
58 * Input Registers: 58 * Input Registers:
@@ -92,6 +92,12 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
92kvm_start_guest: 92kvm_start_guest:
93 ld r1,PACAEMERGSP(r13) 93 ld r1,PACAEMERGSP(r13)
94 subi r1,r1,STACK_FRAME_OVERHEAD 94 subi r1,r1,STACK_FRAME_OVERHEAD
95 ld r2,PACATOC(r13)
96
97 /* were we napping due to cede? */
98 lbz r0,HSTATE_NAPPING(r13)
99 cmpwi r0,0
100 bne kvm_end_cede
95 101
96 /* get vcpu pointer */ 102 /* get vcpu pointer */
97 ld r4, HSTATE_KVM_VCPU(r13) 103 ld r4, HSTATE_KVM_VCPU(r13)
@@ -279,15 +285,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
279 cmpwi r0,0 285 cmpwi r0,0
280 beq 20b 286 beq 20b
281 287
282 /* Set LPCR. Set the MER bit if there is a pending external irq. */ 288 /* Set LPCR and RMOR. */
28310: ld r8,KVM_LPCR(r9) 28910: ld r8,KVM_LPCR(r9)
284 ld r0,VCPU_PENDING_EXC(r4) 290 mtspr SPRN_LPCR,r8
285 li r7,(1 << BOOK3S_IRQPRIO_EXTERNAL)
286 oris r7,r7,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
287 and. r0,r0,r7
288 beq 11f
289 ori r8,r8,LPCR_MER
29011: mtspr SPRN_LPCR,r8
291 ld r8,KVM_RMOR(r9) 291 ld r8,KVM_RMOR(r9)
292 mtspr SPRN_RMOR,r8 292 mtspr SPRN_RMOR,r8
293 isync 293 isync
@@ -451,19 +451,50 @@ toc_tlbie_lock:
451 mtctr r6 451 mtctr r6
452 mtxer r7 452 mtxer r7
453 453
454 /* Move SRR0 and SRR1 into the respective regs */ 454kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
455 ld r6, VCPU_SRR0(r4) 455 ld r6, VCPU_SRR0(r4)
456 ld r7, VCPU_SRR1(r4) 456 ld r7, VCPU_SRR1(r4)
457 mtspr SPRN_SRR0, r6
458 mtspr SPRN_SRR1, r7
459
460 ld r10, VCPU_PC(r4) 457 ld r10, VCPU_PC(r4)
458 ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */
461 459
462 ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
463 rldicl r11, r11, 63 - MSR_HV_LG, 1 460 rldicl r11, r11, 63 - MSR_HV_LG, 1
464 rotldi r11, r11, 1 + MSR_HV_LG 461 rotldi r11, r11, 1 + MSR_HV_LG
465 ori r11, r11, MSR_ME 462 ori r11, r11, MSR_ME
466 463
464 /* Check if we can deliver an external or decrementer interrupt now */
465 ld r0,VCPU_PENDING_EXC(r4)
466 li r8,(1 << BOOK3S_IRQPRIO_EXTERNAL)
467 oris r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
468 and r0,r0,r8
469 cmpdi cr1,r0,0
470 andi. r0,r11,MSR_EE
471 beq cr1,11f
472BEGIN_FTR_SECTION
473 mfspr r8,SPRN_LPCR
474 ori r8,r8,LPCR_MER
475 mtspr SPRN_LPCR,r8
476 isync
477END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
478 beq 5f
479 li r0,BOOK3S_INTERRUPT_EXTERNAL
48012: mr r6,r10
481 mr r10,r0
482 mr r7,r11
483 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
484 rotldi r11,r11,63
485 b 5f
48611: beq 5f
487 mfspr r0,SPRN_DEC
488 cmpwi r0,0
489 li r0,BOOK3S_INTERRUPT_DECREMENTER
490 blt 12b
491
492 /* Move SRR0 and SRR1 into the respective regs */
4935: mtspr SPRN_SRR0, r6
494 mtspr SPRN_SRR1, r7
495 li r0,0
496 stb r0,VCPU_CEDED(r4) /* cancel cede */
497
467fast_guest_return: 498fast_guest_return:
468 mtspr SPRN_HSRR0,r10 499 mtspr SPRN_HSRR0,r10
469 mtspr SPRN_HSRR1,r11 500 mtspr SPRN_HSRR1,r11
@@ -577,21 +608,20 @@ kvmppc_interrupt:
577 /* See if this is something we can handle in real mode */ 608 /* See if this is something we can handle in real mode */
578 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 609 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
579 beq hcall_try_real_mode 610 beq hcall_try_real_mode
580hcall_real_cont:
581 611
582 /* Check for mediated interrupts (could be done earlier really ...) */ 612 /* Check for mediated interrupts (could be done earlier really ...) */
583BEGIN_FTR_SECTION 613BEGIN_FTR_SECTION
584 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL 614 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
585 bne+ 1f 615 bne+ 1f
586 ld r5,VCPU_KVM(r9)
587 ld r5,KVM_LPCR(r5)
588 andi. r0,r11,MSR_EE 616 andi. r0,r11,MSR_EE
589 beq 1f 617 beq 1f
618 mfspr r5,SPRN_LPCR
590 andi. r0,r5,LPCR_MER 619 andi. r0,r5,LPCR_MER
591 bne bounce_ext_interrupt 620 bne bounce_ext_interrupt
5921: 6211:
593END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 622END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
594 623
624hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
595 /* Save DEC */ 625 /* Save DEC */
596 mfspr r5,SPRN_DEC 626 mfspr r5,SPRN_DEC
597 mftb r6 627 mftb r6
@@ -685,7 +715,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
685 slbia 715 slbia
686 ptesync 716 ptesync
687 717
688hdec_soon: 718hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
689BEGIN_FTR_SECTION 719BEGIN_FTR_SECTION
690 b 32f 720 b 32f
691END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 721END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
@@ -703,6 +733,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
703 addi r0,r3,0x100 733 addi r0,r3,0x100
704 stwcx. r0,0,r6 734 stwcx. r0,0,r6
705 bne 41b 735 bne 41b
736 lwsync
706 737
707 /* 738 /*
708 * At this point we have an interrupt that we have to pass 739 * At this point we have an interrupt that we have to pass
@@ -716,18 +747,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
716 * interrupt, since the other threads will already be on their 747 * interrupt, since the other threads will already be on their
717 * way here in that case. 748 * way here in that case.
718 */ 749 */
750 cmpwi r3,0x100 /* Are we the first here? */
751 bge 43f
752 cmpwi r3,1 /* Are any other threads in the guest? */
753 ble 43f
719 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 754 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
720 beq 40f 755 beq 40f
721 cmpwi r3,0x100 /* Are we the first here? */
722 bge 40f
723 cmpwi r3,1
724 ble 40f
725 li r0,0 756 li r0,0
726 mtspr SPRN_HDEC,r0 757 mtspr SPRN_HDEC,r0
72740: 75840:
759 /*
760 * Send an IPI to any napping threads, since an HDEC interrupt
761 * doesn't wake CPUs up from nap.
762 */
763 lwz r3,VCORE_NAPPING_THREADS(r5)
764 lwz r4,VCPU_PTID(r9)
765 li r0,1
766 sldi r0,r0,r4
767 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
768 beq 43f
769 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
770 subf r6,r4,r13
77142: andi. r0,r3,1
772 beq 44f
773 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
774 li r0,IPI_PRIORITY
775 li r7,XICS_QIRR
776 stbcix r0,r7,r8 /* trigger the IPI */
77744: srdi. r3,r3,1
778 addi r6,r6,PACA_SIZE
779 bne 42b
728 780
729 /* Secondary threads wait for primary to do partition switch */ 781 /* Secondary threads wait for primary to do partition switch */
730 ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 78243: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
731 ld r5,HSTATE_KVM_VCORE(r13) 783 ld r5,HSTATE_KVM_VCORE(r13)
732 lwz r3,VCPU_PTID(r9) 784 lwz r3,VCPU_PTID(r9)
733 cmpwi r3,0 785 cmpwi r3,0
@@ -1080,7 +1132,6 @@ hcall_try_real_mode:
1080hcall_real_fallback: 1132hcall_real_fallback:
1081 li r12,BOOK3S_INTERRUPT_SYSCALL 1133 li r12,BOOK3S_INTERRUPT_SYSCALL
1082 ld r9, HSTATE_KVM_VCPU(r13) 1134 ld r9, HSTATE_KVM_VCPU(r13)
1083 ld r11, VCPU_MSR(r9)
1084 1135
1085 b hcall_real_cont 1136 b hcall_real_cont
1086 1137
@@ -1142,7 +1193,7 @@ hcall_real_table:
1142 .long 0 /* 0xd4 */ 1193 .long 0 /* 0xd4 */
1143 .long 0 /* 0xd8 */ 1194 .long 0 /* 0xd8 */
1144 .long 0 /* 0xdc */ 1195 .long 0 /* 0xdc */
1145 .long 0 /* 0xe0 */ 1196 .long .kvmppc_h_cede - hcall_real_table
1146 .long 0 /* 0xe4 */ 1197 .long 0 /* 0xe4 */
1147 .long 0 /* 0xe8 */ 1198 .long 0 /* 0xe8 */
1148 .long 0 /* 0xec */ 1199 .long 0 /* 0xec */
@@ -1171,7 +1222,8 @@ bounce_ext_interrupt:
1171 mtspr SPRN_SRR0,r10 1222 mtspr SPRN_SRR0,r10
1172 mtspr SPRN_SRR1,r11 1223 mtspr SPRN_SRR1,r11
1173 li r10,BOOK3S_INTERRUPT_EXTERNAL 1224 li r10,BOOK3S_INTERRUPT_EXTERNAL
1174 LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME); 1225 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1226 rotldi r11,r11,63
1175 b fast_guest_return 1227 b fast_guest_return
1176 1228
1177_GLOBAL(kvmppc_h_set_dabr) 1229_GLOBAL(kvmppc_h_set_dabr)
@@ -1180,6 +1232,178 @@ _GLOBAL(kvmppc_h_set_dabr)
1180 li r3,0 1232 li r3,0
1181 blr 1233 blr
1182 1234
1235_GLOBAL(kvmppc_h_cede)
1236 ori r11,r11,MSR_EE
1237 std r11,VCPU_MSR(r3)
1238 li r0,1
1239 stb r0,VCPU_CEDED(r3)
1240 sync /* order setting ceded vs. testing prodded */
1241 lbz r5,VCPU_PRODDED(r3)
1242 cmpwi r5,0
1243 bne 1f
1244 li r0,0 /* set trap to 0 to say hcall is handled */
1245 stw r0,VCPU_TRAP(r3)
1246 li r0,H_SUCCESS
1247 std r0,VCPU_GPR(r3)(r3)
1248BEGIN_FTR_SECTION
1249 b 2f /* just send it up to host on 970 */
1250END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1251
1252 /*
1253 * Set our bit in the bitmask of napping threads unless all the
1254 * other threads are already napping, in which case we send this
1255 * up to the host.
1256 */
1257 ld r5,HSTATE_KVM_VCORE(r13)
1258 lwz r6,VCPU_PTID(r3)
1259 lwz r8,VCORE_ENTRY_EXIT(r5)
1260 clrldi r8,r8,56
1261 li r0,1
1262 sld r0,r0,r6
1263 addi r6,r5,VCORE_NAPPING_THREADS
126431: lwarx r4,0,r6
1265 or r4,r4,r0
1266 popcntw r7,r4
1267 cmpw r7,r8
1268 bge 2f
1269 stwcx. r4,0,r6
1270 bne 31b
1271 li r0,1
1272 stb r0,HSTATE_NAPPING(r13)
1273 /* order napping_threads update vs testing entry_exit_count */
1274 lwsync
1275 mr r4,r3
1276 lwz r7,VCORE_ENTRY_EXIT(r5)
1277 cmpwi r7,0x100
1278 bge 33f /* another thread already exiting */
1279
1280/*
1281 * Although not specifically required by the architecture, POWER7
1282 * preserves the following registers in nap mode, even if an SMT mode
1283 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1284 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1285 */
1286 /* Save non-volatile GPRs */
1287 std r14, VCPU_GPR(r14)(r3)
1288 std r15, VCPU_GPR(r15)(r3)
1289 std r16, VCPU_GPR(r16)(r3)
1290 std r17, VCPU_GPR(r17)(r3)
1291 std r18, VCPU_GPR(r18)(r3)
1292 std r19, VCPU_GPR(r19)(r3)
1293 std r20, VCPU_GPR(r20)(r3)
1294 std r21, VCPU_GPR(r21)(r3)
1295 std r22, VCPU_GPR(r22)(r3)
1296 std r23, VCPU_GPR(r23)(r3)
1297 std r24, VCPU_GPR(r24)(r3)
1298 std r25, VCPU_GPR(r25)(r3)
1299 std r26, VCPU_GPR(r26)(r3)
1300 std r27, VCPU_GPR(r27)(r3)
1301 std r28, VCPU_GPR(r28)(r3)
1302 std r29, VCPU_GPR(r29)(r3)
1303 std r30, VCPU_GPR(r30)(r3)
1304 std r31, VCPU_GPR(r31)(r3)
1305
1306 /* save FP state */
1307 bl .kvmppc_save_fp
1308
1309 /*
1310 * Take a nap until a decrementer or external interrupt occurs,
1311 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1312 */
1313 li r0,0x80
1314 stb r0,PACAPROCSTART(r13)
1315 mfspr r5,SPRN_LPCR
1316 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1317 mtspr SPRN_LPCR,r5
1318 isync
1319 li r0, 0
1320 std r0, HSTATE_SCRATCH0(r13)
1321 ptesync
1322 ld r0, HSTATE_SCRATCH0(r13)
13231: cmpd r0, r0
1324 bne 1b
1325 nap
1326 b .
1327
1328kvm_end_cede:
1329 /* Woken by external or decrementer interrupt */
1330 ld r1, HSTATE_HOST_R1(r13)
1331 ld r2, PACATOC(r13)
1332
1333 /* If we're a secondary thread and we got here by an IPI, ack it */
1334 ld r4,HSTATE_KVM_VCPU(r13)
1335 lwz r3,VCPU_PTID(r4)
1336 cmpwi r3,0
1337 beq 27f
1338 mfspr r3,SPRN_SRR1
1339 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
1340 cmpwi r3,4 /* was it an external interrupt? */
1341 bne 27f
1342 ld r5, HSTATE_XICS_PHYS(r13)
1343 li r0,0xff
1344 li r6,XICS_QIRR
1345 li r7,XICS_XIRR
1346 lwzcix r8,r5,r7 /* ack the interrupt */
1347 sync
1348 stbcix r0,r5,r6 /* clear it */
1349 stwcix r8,r5,r7 /* EOI it */
135027:
1351 /* load up FP state */
1352 bl kvmppc_load_fp
1353
1354 /* Load NV GPRS */
1355 ld r14, VCPU_GPR(r14)(r4)
1356 ld r15, VCPU_GPR(r15)(r4)
1357 ld r16, VCPU_GPR(r16)(r4)
1358 ld r17, VCPU_GPR(r17)(r4)
1359 ld r18, VCPU_GPR(r18)(r4)
1360 ld r19, VCPU_GPR(r19)(r4)
1361 ld r20, VCPU_GPR(r20)(r4)
1362 ld r21, VCPU_GPR(r21)(r4)
1363 ld r22, VCPU_GPR(r22)(r4)
1364 ld r23, VCPU_GPR(r23)(r4)
1365 ld r24, VCPU_GPR(r24)(r4)
1366 ld r25, VCPU_GPR(r25)(r4)
1367 ld r26, VCPU_GPR(r26)(r4)
1368 ld r27, VCPU_GPR(r27)(r4)
1369 ld r28, VCPU_GPR(r28)(r4)
1370 ld r29, VCPU_GPR(r29)(r4)
1371 ld r30, VCPU_GPR(r30)(r4)
1372 ld r31, VCPU_GPR(r31)(r4)
1373
1374 /* clear our bit in vcore->napping_threads */
137533: ld r5,HSTATE_KVM_VCORE(r13)
1376 lwz r3,VCPU_PTID(r4)
1377 li r0,1
1378 sld r0,r0,r3
1379 addi r6,r5,VCORE_NAPPING_THREADS
138032: lwarx r7,0,r6
1381 andc r7,r7,r0
1382 stwcx. r7,0,r6
1383 bne 32b
1384 li r0,0
1385 stb r0,HSTATE_NAPPING(r13)
1386
1387 /* see if any other thread is already exiting */
1388 lwz r0,VCORE_ENTRY_EXIT(r5)
1389 cmpwi r0,0x100
1390 blt kvmppc_cede_reentry /* if not go back to guest */
1391
1392 /* some threads are exiting, so go to the guest exit path */
1393 b hcall_real_fallback
1394
1395 /* cede when already previously prodded case */
13961: li r0,0
1397 stb r0,VCPU_PRODDED(r3)
1398 sync /* order testing prodded vs. clearing ceded */
1399 stb r0,VCPU_CEDED(r3)
1400 li r3,H_SUCCESS
1401 blr
1402
1403 /* we've ceded but we want to give control to the host */
14042: li r3,H_TOO_HARD
1405 blr
1406
1183secondary_too_late: 1407secondary_too_late:
1184 ld r5,HSTATE_KVM_VCORE(r13) 1408 ld r5,HSTATE_KVM_VCORE(r13)
1185 HMT_LOW 1409 HMT_LOW
@@ -1197,14 +1421,20 @@ secondary_too_late:
1197 slbmte r6,r5 1421 slbmte r6,r5
11981: addi r11,r11,16 14221: addi r11,r11,16
1199 .endr 1423 .endr
1200 b 50f
1201 1424
1202secondary_nap: 1425secondary_nap:
1203 /* Clear any pending IPI */ 1426 /* Clear any pending IPI - assume we're a secondary thread */
120450: ld r5, HSTATE_XICS_PHYS(r13) 1427 ld r5, HSTATE_XICS_PHYS(r13)
1428 li r7, XICS_XIRR
1429 lwzcix r3, r5, r7 /* ack any pending interrupt */
1430 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
1431 beq 37f
1432 sync
1205 li r0, 0xff 1433 li r0, 0xff
1206 li r6, XICS_QIRR 1434 li r6, XICS_QIRR
1207 stbcix r0, r5, r6 1435 stbcix r0, r5, r6 /* clear the IPI */
1436 stwcix r3, r5, r7 /* EOI it */
143737: sync
1208 1438
1209 /* increment the nap count and then go to nap mode */ 1439 /* increment the nap count and then go to nap mode */
1210 ld r4, HSTATE_KVM_VCORE(r13) 1440 ld r4, HSTATE_KVM_VCORE(r13)
@@ -1214,13 +1444,12 @@ secondary_nap:
1214 addi r3, r3, 1 1444 addi r3, r3, 1
1215 stwcx. r3, 0, r4 1445 stwcx. r3, 0, r4
1216 bne 51b 1446 bne 51b
1217 isync
1218 1447
1448 li r3, LPCR_PECE0
1219 mfspr r4, SPRN_LPCR 1449 mfspr r4, SPRN_LPCR
1220 li r0, LPCR_PECE 1450 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
1221 andc r4, r4, r0
1222 ori r4, r4, LPCR_PECE0 /* exit nap on interrupt */
1223 mtspr SPRN_LPCR, r4 1451 mtspr SPRN_LPCR, r4
1452 isync
1224 li r0, 0 1453 li r0, 0
1225 std r0, HSTATE_SCRATCH0(r13) 1454 std r0, HSTATE_SCRATCH0(r13)
1226 ptesync 1455 ptesync