diff options
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_asm.h | 8 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 29 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 26 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 102 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xics.c | 2 | ||||
-rw-r--r-- | arch/powerpc/sysdev/xics/icp-native.c | 8 |
7 files changed, 158 insertions, 19 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index cdc3d2717cc6..9039d3c97eec 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -20,6 +20,11 @@ | |||
20 | #ifndef __ASM_KVM_BOOK3S_ASM_H__ | 20 | #ifndef __ASM_KVM_BOOK3S_ASM_H__ |
21 | #define __ASM_KVM_BOOK3S_ASM_H__ | 21 | #define __ASM_KVM_BOOK3S_ASM_H__ |
22 | 22 | ||
23 | /* XICS ICP register offsets */ | ||
24 | #define XICS_XIRR 4 | ||
25 | #define XICS_MFRR 0xc | ||
26 | #define XICS_IPI 2 /* interrupt source # for IPIs */ | ||
27 | |||
23 | #ifdef __ASSEMBLY__ | 28 | #ifdef __ASSEMBLY__ |
24 | 29 | ||
25 | #ifdef CONFIG_KVM_BOOK3S_HANDLER | 30 | #ifdef CONFIG_KVM_BOOK3S_HANDLER |
@@ -81,10 +86,11 @@ struct kvmppc_host_state { | |||
81 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 86 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
82 | u8 hwthread_req; | 87 | u8 hwthread_req; |
83 | u8 hwthread_state; | 88 | u8 hwthread_state; |
84 | 89 | u8 host_ipi; | |
85 | struct kvm_vcpu *kvm_vcpu; | 90 | struct kvm_vcpu *kvm_vcpu; |
86 | struct kvmppc_vcore *kvm_vcore; | 91 | struct kvmppc_vcore *kvm_vcore; |
87 | unsigned long xics_phys; | 92 | unsigned long xics_phys; |
93 | u32 saved_xirr; | ||
88 | u64 dabr; | 94 | u64 dabr; |
89 | u64 host_mmcr[3]; | 95 | u64 host_mmcr[3]; |
90 | u32 host_pmc[8]; | 96 | u32 host_pmc[8]; |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 6582eed321ba..1589fd8bf063 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -264,6 +264,21 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) | |||
264 | paca[cpu].kvm_hstate.xics_phys = addr; | 264 | paca[cpu].kvm_hstate.xics_phys = addr; |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline u32 kvmppc_get_xics_latch(void) | ||
268 | { | ||
269 | u32 xirr = get_paca()->kvm_hstate.saved_xirr; | ||
270 | |||
271 | get_paca()->kvm_hstate.saved_xirr = 0; | ||
272 | |||
273 | return xirr; | ||
274 | } | ||
275 | |||
276 | static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) | ||
277 | { | ||
278 | paca[cpu].kvm_hstate.host_ipi = host_ipi; | ||
279 | } | ||
280 | |||
281 | extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); | ||
267 | extern void kvm_linear_init(void); | 282 | extern void kvm_linear_init(void); |
268 | 283 | ||
269 | #else | 284 | #else |
@@ -273,6 +288,18 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) | |||
273 | static inline void kvm_linear_init(void) | 288 | static inline void kvm_linear_init(void) |
274 | {} | 289 | {} |
275 | 290 | ||
291 | static inline u32 kvmppc_get_xics_latch(void) | ||
292 | { | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) | ||
297 | {} | ||
298 | |||
299 | static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) | ||
300 | { | ||
301 | kvm_vcpu_kick(vcpu); | ||
302 | } | ||
276 | #endif | 303 | #endif |
277 | 304 | ||
278 | #ifdef CONFIG_KVM_XICS | 305 | #ifdef CONFIG_KVM_XICS |
@@ -393,4 +420,6 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) | |||
393 | return ea; | 420 | return ea; |
394 | } | 421 | } |
395 | 422 | ||
423 | extern void xics_wake_cpu(int cpu); | ||
424 | |||
396 | #endif /* __POWERPC_KVM_PPC_H__ */ | 425 | #endif /* __POWERPC_KVM_PPC_H__ */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index dbfd5498f440..a791229329cf 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -574,6 +574,8 @@ int main(void) | |||
574 | HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); | 574 | HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); |
575 | HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); | 575 | HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); |
576 | HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); | 576 | HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); |
577 | HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); | ||
578 | HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); | ||
577 | HSTATE_FIELD(HSTATE_MMCR, host_mmcr); | 579 | HSTATE_FIELD(HSTATE_MMCR, host_mmcr); |
578 | HSTATE_FIELD(HSTATE_PMC, host_pmc); | 580 | HSTATE_FIELD(HSTATE_PMC, host_pmc); |
579 | HSTATE_FIELD(HSTATE_PURR, host_purr); | 581 | HSTATE_FIELD(HSTATE_PURR, host_purr); |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 82ba00f68b07..16191915e8d0 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -66,6 +66,31 @@ | |||
66 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); | 66 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
67 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); | 67 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
68 | 68 | ||
69 | void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) | ||
70 | { | ||
71 | int me; | ||
72 | int cpu = vcpu->cpu; | ||
73 | wait_queue_head_t *wqp; | ||
74 | |||
75 | wqp = kvm_arch_vcpu_wq(vcpu); | ||
76 | if (waitqueue_active(wqp)) { | ||
77 | wake_up_interruptible(wqp); | ||
78 | ++vcpu->stat.halt_wakeup; | ||
79 | } | ||
80 | |||
81 | me = get_cpu(); | ||
82 | |||
83 | /* CPU points to the first thread of the core */ | ||
84 | if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { | ||
85 | int real_cpu = cpu + vcpu->arch.ptid; | ||
86 | if (paca[real_cpu].kvm_hstate.xics_phys) | ||
87 | xics_wake_cpu(real_cpu); | ||
88 | else if (cpu_online(cpu)) | ||
89 | smp_send_reschedule(cpu); | ||
90 | } | ||
91 | put_cpu(); | ||
92 | } | ||
93 | |||
69 | /* | 94 | /* |
70 | * We use the vcpu_load/put functions to measure stolen time. | 95 | * We use the vcpu_load/put functions to measure stolen time. |
71 | * Stolen time is counted as time when either the vcpu is able to | 96 | * Stolen time is counted as time when either the vcpu is able to |
@@ -985,7 +1010,6 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu) | |||
985 | } | 1010 | } |
986 | 1011 | ||
987 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 1012 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
988 | extern void xics_wake_cpu(int cpu); | ||
989 | 1013 | ||
990 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | 1014 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, |
991 | struct kvm_vcpu *vcpu) | 1015 | struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 0f23bb851711..56f8927b0ddf 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -79,10 +79,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline) | |||
79 | * * | 79 | * * |
80 | *****************************************************************************/ | 80 | *****************************************************************************/ |
81 | 81 | ||
82 | #define XICS_XIRR 4 | ||
83 | #define XICS_QIRR 0xc | ||
84 | #define XICS_IPI 2 /* interrupt source # for IPIs */ | ||
85 | |||
86 | /* | 82 | /* |
87 | * We come in here when wakened from nap mode on a secondary hw thread. | 83 | * We come in here when wakened from nap mode on a secondary hw thread. |
88 | * Relocation is off and most register values are lost. | 84 | * Relocation is off and most register values are lost. |
@@ -122,7 +118,7 @@ kvm_start_guest: | |||
122 | beq 27f | 118 | beq 27f |
123 | 25: ld r5,HSTATE_XICS_PHYS(r13) | 119 | 25: ld r5,HSTATE_XICS_PHYS(r13) |
124 | li r0,0xff | 120 | li r0,0xff |
125 | li r6,XICS_QIRR | 121 | li r6,XICS_MFRR |
126 | li r7,XICS_XIRR | 122 | li r7,XICS_XIRR |
127 | lwzcix r8,r5,r7 /* get and ack the interrupt */ | 123 | lwzcix r8,r5,r7 /* get and ack the interrupt */ |
128 | sync | 124 | sync |
@@ -678,17 +674,91 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
678 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL | 674 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL |
679 | beq hcall_try_real_mode | 675 | beq hcall_try_real_mode |
680 | 676 | ||
681 | /* Check for mediated interrupts (could be done earlier really ...) */ | 677 | /* Only handle external interrupts here on arch 206 and later */ |
682 | BEGIN_FTR_SECTION | 678 | BEGIN_FTR_SECTION |
683 | cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL | 679 | b ext_interrupt_to_host |
684 | bne+ 1f | 680 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) |
685 | andi. r0,r11,MSR_EE | 681 | |
686 | beq 1f | 682 | /* External interrupt ? */ |
687 | mfspr r5,SPRN_LPCR | 683 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
688 | andi. r0,r5,LPCR_MER | 684 | bne+ ext_interrupt_to_host |
685 | |||
686 | /* External interrupt, first check for host_ipi. If this is | ||
687 | * set, we know the host wants us out so let's do it now | ||
688 | */ | ||
689 | lbz r0, HSTATE_HOST_IPI(r13) | ||
690 | cmpwi r0, 0 | ||
691 | bne ext_interrupt_to_host | ||
692 | |||
693 | /* Now read the interrupt from the ICP */ | ||
694 | ld r5, HSTATE_XICS_PHYS(r13) | ||
695 | li r7, XICS_XIRR | ||
696 | cmpdi r5, 0 | ||
697 | beq- ext_interrupt_to_host | ||
698 | lwzcix r3, r5, r7 | ||
699 | rlwinm. r0, r3, 0, 0xffffff | ||
700 | sync | ||
701 | bne 1f | ||
702 | |||
703 | /* Nothing pending in the ICP, check for mediated interrupts | ||
704 | * and bounce it to the guest | ||
705 | */ | ||
706 | andi. r0, r11, MSR_EE | ||
707 | beq ext_interrupt_to_host /* shouldn't happen ?? */ | ||
708 | mfspr r5, SPRN_LPCR | ||
709 | andi. r0, r5, LPCR_MER | ||
689 | bne bounce_ext_interrupt | 710 | bne bounce_ext_interrupt |
690 | 1: | 711 | b ext_interrupt_to_host /* shouldn't happen ?? */ |
691 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 712 | |
713 | 1: /* We found something in the ICP... | ||
714 | * | ||
715 | * If it's not an IPI, stash it in the PACA and return to | ||
716 | * the host, we don't (yet) handle directing real external | ||
717 | * interrupts directly to the guest | ||
718 | */ | ||
719 | cmpwi r0, XICS_IPI | ||
720 | bne ext_stash_for_host | ||
721 | |||
722 | /* It's an IPI, clear the MFRR and EOI it */ | ||
723 | li r0, 0xff | ||
724 | li r6, XICS_MFRR | ||
725 | stbcix r0, r5, r6 /* clear the IPI */ | ||
726 | stwcix r3, r5, r7 /* EOI it */ | ||
727 | sync | ||
728 | |||
729 | /* We need to re-check host IPI now in case it got set in the | ||
730 | * meantime. If it's clear, we bounce the interrupt to the | ||
731 | * guest | ||
732 | */ | ||
733 | lbz r0, HSTATE_HOST_IPI(r13) | ||
734 | cmpwi r0, 0 | ||
735 | bne- 1f | ||
736 | |||
737 | /* Allright, looks like an IPI for the guest, we need to set MER */ | ||
738 | mfspr r8,SPRN_LPCR | ||
739 | ori r8,r8,LPCR_MER | ||
740 | mtspr SPRN_LPCR,r8 | ||
741 | |||
742 | /* And if the guest EE is set, we can deliver immediately, else | ||
743 | * we return to the guest with MER set | ||
744 | */ | ||
745 | andi. r0, r11, MSR_EE | ||
746 | bne bounce_ext_interrupt | ||
747 | mr r4, r9 | ||
748 | b fast_guest_return | ||
749 | |||
750 | /* We raced with the host, we need to resend that IPI, bummer */ | ||
751 | 1: li r0, IPI_PRIORITY | ||
752 | stbcix r0, r5, r6 /* set the IPI */ | ||
753 | sync | ||
754 | b ext_interrupt_to_host | ||
755 | |||
756 | ext_stash_for_host: | ||
757 | /* It's not an IPI and it's for the host, stash it in the PACA | ||
758 | * before exit, it will be picked up by the host ICP driver | ||
759 | */ | ||
760 | stw r3, HSTATE_SAVED_XIRR(r13) | ||
761 | ext_interrupt_to_host: | ||
692 | 762 | ||
693 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ | 763 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
694 | /* Save DEC */ | 764 | /* Save DEC */ |
@@ -831,7 +901,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
831 | beq 44f | 901 | beq 44f |
832 | ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ | 902 | ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ |
833 | li r0,IPI_PRIORITY | 903 | li r0,IPI_PRIORITY |
834 | li r7,XICS_QIRR | 904 | li r7,XICS_MFRR |
835 | stbcix r0,r7,r8 /* trigger the IPI */ | 905 | stbcix r0,r7,r8 /* trigger the IPI */ |
836 | 44: srdi. r3,r3,1 | 906 | 44: srdi. r3,r3,1 |
837 | addi r6,r6,PACA_SIZE | 907 | addi r6,r6,PACA_SIZE |
@@ -1630,7 +1700,7 @@ secondary_nap: | |||
1630 | beq 37f | 1700 | beq 37f |
1631 | sync | 1701 | sync |
1632 | li r0, 0xff | 1702 | li r0, 0xff |
1633 | li r6, XICS_QIRR | 1703 | li r6, XICS_MFRR |
1634 | stbcix r0, r5, r6 /* clear the IPI */ | 1704 | stbcix r0, r5, r6 /* clear the IPI */ |
1635 | stwcix r3, r5, r7 /* EOI it */ | 1705 | stwcix r3, r5, r7 /* EOI it */ |
1636 | 37: sync | 1706 | 37: sync |
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 53af848116f2..1417e65b6bbd 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c | |||
@@ -227,7 +227,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp, | |||
227 | kvmppc_book3s_queue_irqprio(icp->vcpu, | 227 | kvmppc_book3s_queue_irqprio(icp->vcpu, |
228 | BOOK3S_INTERRUPT_EXTERNAL_LEVEL); | 228 | BOOK3S_INTERRUPT_EXTERNAL_LEVEL); |
229 | if (!change_self) | 229 | if (!change_self) |
230 | kvm_vcpu_kick(icp->vcpu); | 230 | kvmppc_fast_vcpu_kick(icp->vcpu); |
231 | } | 231 | } |
232 | bail: | 232 | bail: |
233 | return success; | 233 | return success; |
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 48861d3fcd07..20b328bb494d 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c | |||
@@ -51,6 +51,12 @@ static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; | |||
51 | static inline unsigned int icp_native_get_xirr(void) | 51 | static inline unsigned int icp_native_get_xirr(void) |
52 | { | 52 | { |
53 | int cpu = smp_processor_id(); | 53 | int cpu = smp_processor_id(); |
54 | unsigned int xirr; | ||
55 | |||
56 | /* Handled an interrupt latched by KVM */ | ||
57 | xirr = kvmppc_get_xics_latch(); | ||
58 | if (xirr) | ||
59 | return xirr; | ||
54 | 60 | ||
55 | return in_be32(&icp_native_regs[cpu]->xirr.word); | 61 | return in_be32(&icp_native_regs[cpu]->xirr.word); |
56 | } | 62 | } |
@@ -138,6 +144,7 @@ static unsigned int icp_native_get_irq(void) | |||
138 | 144 | ||
139 | static void icp_native_cause_ipi(int cpu, unsigned long data) | 145 | static void icp_native_cause_ipi(int cpu, unsigned long data) |
140 | { | 146 | { |
147 | kvmppc_set_host_ipi(cpu, 1); | ||
141 | icp_native_set_qirr(cpu, IPI_PRIORITY); | 148 | icp_native_set_qirr(cpu, IPI_PRIORITY); |
142 | } | 149 | } |
143 | 150 | ||
@@ -151,6 +158,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) | |||
151 | { | 158 | { |
152 | int cpu = smp_processor_id(); | 159 | int cpu = smp_processor_id(); |
153 | 160 | ||
161 | kvmppc_set_host_ipi(cpu, 0); | ||
154 | icp_native_set_qirr(cpu, 0xff); | 162 | icp_native_set_qirr(cpu, 0xff); |
155 | 163 | ||
156 | return smp_ipi_demux(); | 164 | return smp_ipi_demux(); |