aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_rmhandlers.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rmhandlers.S')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S230
1 files changed, 225 insertions, 5 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 9ee223c35285..6dd33581a228 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -148,12 +148,20 @@ kvmppc_hv_entry:
148 lwz r7, VCPU_PMC + 12(r4) 148 lwz r7, VCPU_PMC + 12(r4)
149 lwz r8, VCPU_PMC + 16(r4) 149 lwz r8, VCPU_PMC + 16(r4)
150 lwz r9, VCPU_PMC + 20(r4) 150 lwz r9, VCPU_PMC + 20(r4)
151BEGIN_FTR_SECTION
152 lwz r10, VCPU_PMC + 24(r4)
153 lwz r11, VCPU_PMC + 28(r4)
154END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
151 mtspr SPRN_PMC1, r3 155 mtspr SPRN_PMC1, r3
152 mtspr SPRN_PMC2, r5 156 mtspr SPRN_PMC2, r5
153 mtspr SPRN_PMC3, r6 157 mtspr SPRN_PMC3, r6
154 mtspr SPRN_PMC4, r7 158 mtspr SPRN_PMC4, r7
155 mtspr SPRN_PMC5, r8 159 mtspr SPRN_PMC5, r8
156 mtspr SPRN_PMC6, r9 160 mtspr SPRN_PMC6, r9
161BEGIN_FTR_SECTION
162 mtspr SPRN_PMC7, r10
163 mtspr SPRN_PMC8, r11
164END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
157 ld r3, VCPU_MMCR(r4) 165 ld r3, VCPU_MMCR(r4)
158 ld r5, VCPU_MMCR + 8(r4) 166 ld r5, VCPU_MMCR + 8(r4)
159 ld r6, VCPU_MMCR + 16(r4) 167 ld r6, VCPU_MMCR + 16(r4)
@@ -165,9 +173,11 @@ kvmppc_hv_entry:
165 /* Load up FP, VMX and VSX registers */ 173 /* Load up FP, VMX and VSX registers */
166 bl kvmppc_load_fp 174 bl kvmppc_load_fp
167 175
176BEGIN_FTR_SECTION
168 /* Switch DSCR to guest value */ 177 /* Switch DSCR to guest value */
169 ld r5, VCPU_DSCR(r4) 178 ld r5, VCPU_DSCR(r4)
170 mtspr SPRN_DSCR, r5 179 mtspr SPRN_DSCR, r5
180END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
171 181
172 /* 182 /*
173 * Set the decrementer to the guest decrementer. 183 * Set the decrementer to the guest decrementer.
@@ -210,6 +220,7 @@ kvmppc_hv_entry:
210 mtspr SPRN_DABRX,r5 220 mtspr SPRN_DABRX,r5
211 mtspr SPRN_DABR,r6 221 mtspr SPRN_DABR,r6
212 222
223BEGIN_FTR_SECTION
213 /* Restore AMR and UAMOR, set AMOR to all 1s */ 224 /* Restore AMR and UAMOR, set AMOR to all 1s */
214 ld r5,VCPU_AMR(r4) 225 ld r5,VCPU_AMR(r4)
215 ld r6,VCPU_UAMOR(r4) 226 ld r6,VCPU_UAMOR(r4)
@@ -217,6 +228,7 @@ kvmppc_hv_entry:
217 mtspr SPRN_AMR,r5 228 mtspr SPRN_AMR,r5
218 mtspr SPRN_UAMOR,r6 229 mtspr SPRN_UAMOR,r6
219 mtspr SPRN_AMOR,r7 230 mtspr SPRN_AMOR,r7
231END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
220 232
221 /* Clear out SLB */ 233 /* Clear out SLB */
222 li r6,0 234 li r6,0
@@ -224,6 +236,14 @@ kvmppc_hv_entry:
224 slbia 236 slbia
225 ptesync 237 ptesync
226 238
239BEGIN_FTR_SECTION
240 b 30f
241END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
242 /*
243 * POWER7 host -> guest partition switch code.
244 * We don't have to lock against concurrent tlbies,
245 * but we do have to coordinate across hardware threads.
246 */
227 /* Increment entry count iff exit count is zero. */ 247 /* Increment entry count iff exit count is zero. */
228 ld r5,HSTATE_KVM_VCORE(r13) 248 ld r5,HSTATE_KVM_VCORE(r13)
229 addi r9,r5,VCORE_ENTRY_EXIT 249 addi r9,r5,VCORE_ENTRY_EXIT
@@ -315,9 +335,94 @@ kvmppc_hv_entry:
315 ld r8,VCPU_SPURR(r4) 335 ld r8,VCPU_SPURR(r4)
316 mtspr SPRN_PURR,r7 336 mtspr SPRN_PURR,r7
317 mtspr SPRN_SPURR,r8 337 mtspr SPRN_SPURR,r8
338 b 31f
339
340 /*
341 * PPC970 host -> guest partition switch code.
342 * We have to lock against concurrent tlbies,
343 * using native_tlbie_lock to lock against host tlbies
344 * and kvm->arch.tlbie_lock to lock against guest tlbies.
345 * We also have to invalidate the TLB since its
346 * entries aren't tagged with the LPID.
347 */
34830: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
349
350 /* first take native_tlbie_lock */
351 .section ".toc","aw"
352toc_tlbie_lock:
353 .tc native_tlbie_lock[TC],native_tlbie_lock
354 .previous
355 ld r3,toc_tlbie_lock@toc(2)
356 lwz r8,PACA_LOCK_TOKEN(r13)
35724: lwarx r0,0,r3
358 cmpwi r0,0
359 bne 24b
360 stwcx. r8,0,r3
361 bne 24b
362 isync
363
364 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
365 li r0,0x18f
366 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
367 or r0,r7,r0
368 ptesync
369 sync
370 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
371 isync
372 li r0,0
373 stw r0,0(r3) /* drop native_tlbie_lock */
374
375 /* invalidate the whole TLB */
376 li r0,256
377 mtctr r0
378 li r6,0
37925: tlbiel r6
380 addi r6,r6,0x1000
381 bdnz 25b
382 ptesync
383
384 /* Take the guest's tlbie_lock */
385 addi r3,r9,KVM_TLBIE_LOCK
38624: lwarx r0,0,r3
387 cmpwi r0,0
388 bne 24b
389 stwcx. r8,0,r3
390 bne 24b
391 isync
392 ld r6,KVM_SDR1(r9)
393 mtspr SPRN_SDR1,r6 /* switch to partition page table */
394
395 /* Set up HID4 with the guest's LPID etc. */
396 sync
397 mtspr SPRN_HID4,r7
398 isync
399
400 /* drop the guest's tlbie_lock */
401 li r0,0
402 stw r0,0(r3)
403
404 /* Check if HDEC expires soon */
405 mfspr r3,SPRN_HDEC
406 cmpwi r3,10
407 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
408 mr r9,r4
409 blt hdec_soon
410
411 /* Enable HDEC interrupts */
412 mfspr r0,SPRN_HID0
413 li r3,1
414 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
415 sync
416 mtspr SPRN_HID0,r0
417 mfspr r0,SPRN_HID0
418 mfspr r0,SPRN_HID0
419 mfspr r0,SPRN_HID0
420 mfspr r0,SPRN_HID0
421 mfspr r0,SPRN_HID0
422 mfspr r0,SPRN_HID0
318 423
319 /* Load up guest SLB entries */ 424 /* Load up guest SLB entries */
320 lwz r5,VCPU_SLB_MAX(r4) 42531: lwz r5,VCPU_SLB_MAX(r4)
321 cmpwi r5,0 426 cmpwi r5,0
322 beq 9f 427 beq 9f
323 mtctr r5 428 mtctr r5
@@ -472,6 +577,7 @@ kvmppc_interrupt:
472hcall_real_cont: 577hcall_real_cont:
473 578
474 /* Check for mediated interrupts (could be done earlier really ...) */ 579 /* Check for mediated interrupts (could be done earlier really ...) */
580BEGIN_FTR_SECTION
475 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL 581 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
476 bne+ 1f 582 bne+ 1f
477 ld r5,VCPU_KVM(r9) 583 ld r5,VCPU_KVM(r9)
@@ -481,6 +587,7 @@ hcall_real_cont:
481 andi. r0,r5,LPCR_MER 587 andi. r0,r5,LPCR_MER
482 bne bounce_ext_interrupt 588 bne bounce_ext_interrupt
4831: 5891:
590END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
484 591
485 /* Save DEC */ 592 /* Save DEC */
486 mfspr r5,SPRN_DEC 593 mfspr r5,SPRN_DEC
@@ -492,9 +599,11 @@ hcall_real_cont:
492 /* Save HEIR (HV emulation assist reg) in last_inst 599 /* Save HEIR (HV emulation assist reg) in last_inst
493 if this is an HEI (HV emulation interrupt, e40) */ 600 if this is an HEI (HV emulation interrupt, e40) */
494 li r3,-1 601 li r3,-1
602BEGIN_FTR_SECTION
495 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 603 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
496 bne 11f 604 bne 11f
497 mfspr r3,SPRN_HEIR 605 mfspr r3,SPRN_HEIR
606END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
49811: stw r3,VCPU_LAST_INST(r9) 60711: stw r3,VCPU_LAST_INST(r9)
499 608
500 /* Save more register state */ 609 /* Save more register state */
@@ -508,8 +617,10 @@ hcall_real_cont:
508 stw r7, VCPU_DSISR(r9) 617 stw r7, VCPU_DSISR(r9)
509 std r8, VCPU_CTR(r9) 618 std r8, VCPU_CTR(r9)
510 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ 619 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
620BEGIN_FTR_SECTION
511 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 621 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
512 beq 6f 622 beq 6f
623END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
5137: std r6, VCPU_FAULT_DAR(r9) 6247: std r6, VCPU_FAULT_DAR(r9)
514 stw r7, VCPU_FAULT_DSISR(r9) 625 stw r7, VCPU_FAULT_DSISR(r9)
515 626
@@ -543,6 +654,7 @@ hcall_real_cont:
543 /* 654 /*
544 * Save the guest PURR/SPURR 655 * Save the guest PURR/SPURR
545 */ 656 */
657BEGIN_FTR_SECTION
546 mfspr r5,SPRN_PURR 658 mfspr r5,SPRN_PURR
547 mfspr r6,SPRN_SPURR 659 mfspr r6,SPRN_SPURR
548 ld r7,VCPU_PURR(r9) 660 ld r7,VCPU_PURR(r9)
@@ -562,6 +674,7 @@ hcall_real_cont:
562 add r4,r4,r6 674 add r4,r4,r6
563 mtspr SPRN_PURR,r3 675 mtspr SPRN_PURR,r3
564 mtspr SPRN_SPURR,r4 676 mtspr SPRN_SPURR,r4
677END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
565 678
566 /* Clear out SLB */ 679 /* Clear out SLB */
567 li r5,0 680 li r5,0
@@ -570,6 +683,14 @@ hcall_real_cont:
570 ptesync 683 ptesync
571 684
572hdec_soon: 685hdec_soon:
686BEGIN_FTR_SECTION
687 b 32f
688END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
689 /*
690 * POWER7 guest -> host partition switch code.
691 * We don't have to lock against tlbies but we do
692 * have to coordinate the hardware threads.
693 */
573 /* Increment the threads-exiting-guest count in the 0xff00 694 /* Increment the threads-exiting-guest count in the 0xff00
574 bits of vcore->entry_exit_count */ 695 bits of vcore->entry_exit_count */
575 lwsync 696 lwsync
@@ -640,9 +761,82 @@ hdec_soon:
64016: ld r8,KVM_HOST_LPCR(r4) 76116: ld r8,KVM_HOST_LPCR(r4)
641 mtspr SPRN_LPCR,r8 762 mtspr SPRN_LPCR,r8
642 isync 763 isync
764 b 33f
765
766 /*
767 * PPC970 guest -> host partition switch code.
768 * We have to lock against concurrent tlbies, and
769 * we have to flush the whole TLB.
770 */
77132: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
772
773 /* Take the guest's tlbie_lock */
774 lwz r8,PACA_LOCK_TOKEN(r13)
775 addi r3,r4,KVM_TLBIE_LOCK
77624: lwarx r0,0,r3
777 cmpwi r0,0
778 bne 24b
779 stwcx. r8,0,r3
780 bne 24b
781 isync
782
783 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
784 li r0,0x18f
785 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
786 or r0,r7,r0
787 ptesync
788 sync
789 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
790 isync
791 li r0,0
792 stw r0,0(r3) /* drop guest tlbie_lock */
793
794 /* invalidate the whole TLB */
795 li r0,256
796 mtctr r0
797 li r6,0
79825: tlbiel r6
799 addi r6,r6,0x1000
800 bdnz 25b
801 ptesync
802
803 /* take native_tlbie_lock */
804 ld r3,toc_tlbie_lock@toc(2)
80524: lwarx r0,0,r3
806 cmpwi r0,0
807 bne 24b
808 stwcx. r8,0,r3
809 bne 24b
810 isync
811
812 ld r6,KVM_HOST_SDR1(r4)
813 mtspr SPRN_SDR1,r6 /* switch to host page table */
814
815 /* Set up host HID4 value */
816 sync
817 mtspr SPRN_HID4,r7
818 isync
819 li r0,0
820 stw r0,0(r3) /* drop native_tlbie_lock */
821
822 lis r8,0x7fff /* MAX_INT@h */
823 mtspr SPRN_HDEC,r8
824
825 /* Disable HDEC interrupts */
826 mfspr r0,SPRN_HID0
827 li r3,0
828 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
829 sync
830 mtspr SPRN_HID0,r0
831 mfspr r0,SPRN_HID0
832 mfspr r0,SPRN_HID0
833 mfspr r0,SPRN_HID0
834 mfspr r0,SPRN_HID0
835 mfspr r0,SPRN_HID0
836 mfspr r0,SPRN_HID0
643 837
644 /* load host SLB entries */ 838 /* load host SLB entries */
645 ld r8,PACA_SLBSHADOWPTR(r13) 83933: ld r8,PACA_SLBSHADOWPTR(r13)
646 840
647 .rept SLB_NUM_BOLTED 841 .rept SLB_NUM_BOLTED
648 ld r5,SLBSHADOW_SAVEAREA(r8) 842 ld r5,SLBSHADOW_SAVEAREA(r8)
@@ -654,12 +848,14 @@ hdec_soon:
654 .endr 848 .endr
655 849
656 /* Save and reset AMR and UAMOR before turning on the MMU */ 850 /* Save and reset AMR and UAMOR before turning on the MMU */
851BEGIN_FTR_SECTION
657 mfspr r5,SPRN_AMR 852 mfspr r5,SPRN_AMR
658 mfspr r6,SPRN_UAMOR 853 mfspr r6,SPRN_UAMOR
659 std r5,VCPU_AMR(r9) 854 std r5,VCPU_AMR(r9)
660 std r6,VCPU_UAMOR(r9) 855 std r6,VCPU_UAMOR(r9)
661 li r6,0 856 li r6,0
662 mtspr SPRN_AMR,r6 857 mtspr SPRN_AMR,r6
858END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
663 859
664 /* Restore host DABR and DABRX */ 860 /* Restore host DABR and DABRX */
665 ld r5,HSTATE_DABR(r13) 861 ld r5,HSTATE_DABR(r13)
@@ -668,10 +864,12 @@ hdec_soon:
668 mtspr SPRN_DABRX,r6 864 mtspr SPRN_DABRX,r6
669 865
670 /* Switch DSCR back to host value */ 866 /* Switch DSCR back to host value */
867BEGIN_FTR_SECTION
671 mfspr r8, SPRN_DSCR 868 mfspr r8, SPRN_DSCR
672 ld r7, HSTATE_DSCR(r13) 869 ld r7, HSTATE_DSCR(r13)
673 std r8, VCPU_DSCR(r7) 870 std r8, VCPU_DSCR(r7)
674 mtspr SPRN_DSCR, r7 871 mtspr SPRN_DSCR, r7
872END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
675 873
676 /* Save non-volatile GPRs */ 874 /* Save non-volatile GPRs */
677 std r14, VCPU_GPR(r14)(r9) 875 std r14, VCPU_GPR(r14)(r9)
@@ -735,21 +933,31 @@ hdec_soon:
735 mfspr r6, SPRN_PMC4 933 mfspr r6, SPRN_PMC4
736 mfspr r7, SPRN_PMC5 934 mfspr r7, SPRN_PMC5
737 mfspr r8, SPRN_PMC6 935 mfspr r8, SPRN_PMC6
936BEGIN_FTR_SECTION
937 mfspr r10, SPRN_PMC7
938 mfspr r11, SPRN_PMC8
939END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
738 stw r3, VCPU_PMC(r9) 940 stw r3, VCPU_PMC(r9)
739 stw r4, VCPU_PMC + 4(r9) 941 stw r4, VCPU_PMC + 4(r9)
740 stw r5, VCPU_PMC + 8(r9) 942 stw r5, VCPU_PMC + 8(r9)
741 stw r6, VCPU_PMC + 12(r9) 943 stw r6, VCPU_PMC + 12(r9)
742 stw r7, VCPU_PMC + 16(r9) 944 stw r7, VCPU_PMC + 16(r9)
743 stw r8, VCPU_PMC + 20(r9) 945 stw r8, VCPU_PMC + 20(r9)
946BEGIN_FTR_SECTION
947 stw r10, VCPU_PMC + 24(r9)
948 stw r11, VCPU_PMC + 28(r9)
949END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
74422: 95022:
745 /* save FP state */ 951 /* save FP state */
746 mr r3, r9 952 mr r3, r9
747 bl .kvmppc_save_fp 953 bl .kvmppc_save_fp
748 954
749 /* Secondary threads go off to take a nap */ 955 /* Secondary threads go off to take a nap on POWER7 */
956BEGIN_FTR_SECTION
750 lwz r0,VCPU_PTID(r3) 957 lwz r0,VCPU_PTID(r3)
751 cmpwi r0,0 958 cmpwi r0,0
752 bne secondary_nap 959 bne secondary_nap
960END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
753 961
754 /* 962 /*
755 * Reload DEC. HDEC interrupts were disabled when 963 * Reload DEC. HDEC interrupts were disabled when
@@ -771,12 +979,20 @@ hdec_soon:
771 lwz r6, HSTATE_PMC + 12(r13) 979 lwz r6, HSTATE_PMC + 12(r13)
772 lwz r8, HSTATE_PMC + 16(r13) 980 lwz r8, HSTATE_PMC + 16(r13)
773 lwz r9, HSTATE_PMC + 20(r13) 981 lwz r9, HSTATE_PMC + 20(r13)
982BEGIN_FTR_SECTION
983 lwz r10, HSTATE_PMC + 24(r13)
984 lwz r11, HSTATE_PMC + 28(r13)
985END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
774 mtspr SPRN_PMC1, r3 986 mtspr SPRN_PMC1, r3
775 mtspr SPRN_PMC2, r4 987 mtspr SPRN_PMC2, r4
776 mtspr SPRN_PMC3, r5 988 mtspr SPRN_PMC3, r5
777 mtspr SPRN_PMC4, r6 989 mtspr SPRN_PMC4, r6
778 mtspr SPRN_PMC5, r8 990 mtspr SPRN_PMC5, r8
779 mtspr SPRN_PMC6, r9 991 mtspr SPRN_PMC6, r9
992BEGIN_FTR_SECTION
993 mtspr SPRN_PMC7, r10
994 mtspr SPRN_PMC8, r11
995END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
780 ld r3, HSTATE_MMCR(r13) 996 ld r3, HSTATE_MMCR(r13)
781 ld r4, HSTATE_MMCR + 8(r13) 997 ld r4, HSTATE_MMCR + 8(r13)
782 ld r5, HSTATE_MMCR + 16(r13) 998 ld r5, HSTATE_MMCR + 16(r13)
@@ -802,7 +1018,7 @@ hdec_soon:
802 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1018 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
803 1019
804 /* RFI into the highmem handler, or branch to interrupt handler */ 1020 /* RFI into the highmem handler, or branch to interrupt handler */
805 mfmsr r6 102112: mfmsr r6
806 mtctr r12 1022 mtctr r12
807 li r0, MSR_RI 1023 li r0, MSR_RI
808 andc r6, r6, r0 1024 andc r6, r6, r0
@@ -812,7 +1028,11 @@ hdec_soon:
812 beqctr 1028 beqctr
813 RFI 1029 RFI
814 1030
81511: mtspr SPRN_HSRR0, r8 103111:
1032BEGIN_FTR_SECTION
1033 b 12b
1034END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1035 mtspr SPRN_HSRR0, r8
816 mtspr SPRN_HSRR1, r7 1036 mtspr SPRN_HSRR1, r7
817 ba 0x500 1037 ba 0x500
818 1038