aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-04-17 16:30:50 -0400
committerAlexander Graf <agraf@suse.de>2013-04-26 14:27:31 -0400
commit54695c3088a74e25474db8eb6b490b45d1aeb0ca (patch)
tree4ecaa9c41857cf4b380f54017830e2aeb8dc56fe
parentbc5ad3f3701116e7db57268e6f89010ec714697e (diff)
KVM: PPC: Book3S HV: Speed up wakeups of CPUs on HV KVM
Currently, we wake up a CPU by sending a host IPI with smp_send_reschedule() to thread 0 of that core, which will take all threads out of the guest, and cause them to re-evaluate their interrupt status on the way back in. This adds a mechanism to differentiate real host IPIs from IPIs sent by KVM for guest threads to poke each other, in order to target the guest threads precisely when possible and avoid that global switch of the core to host state. We then use this new facility in the in-kernel XICS code. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h29
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c26
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S102
-rw-r--r--arch/powerpc/kvm/book3s_xics.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c8
7 files changed, 158 insertions, 19 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index cdc3d2717cc6..9039d3c97eec 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -20,6 +20,11 @@
20#ifndef __ASM_KVM_BOOK3S_ASM_H__ 20#ifndef __ASM_KVM_BOOK3S_ASM_H__
21#define __ASM_KVM_BOOK3S_ASM_H__ 21#define __ASM_KVM_BOOK3S_ASM_H__
22 22
23/* XICS ICP register offsets */
24#define XICS_XIRR 4
25#define XICS_MFRR 0xc
26#define XICS_IPI 2 /* interrupt source # for IPIs */
27
23#ifdef __ASSEMBLY__ 28#ifdef __ASSEMBLY__
24 29
25#ifdef CONFIG_KVM_BOOK3S_HANDLER 30#ifdef CONFIG_KVM_BOOK3S_HANDLER
@@ -81,10 +86,11 @@ struct kvmppc_host_state {
81#ifdef CONFIG_KVM_BOOK3S_64_HV 86#ifdef CONFIG_KVM_BOOK3S_64_HV
82 u8 hwthread_req; 87 u8 hwthread_req;
83 u8 hwthread_state; 88 u8 hwthread_state;
84 89 u8 host_ipi;
85 struct kvm_vcpu *kvm_vcpu; 90 struct kvm_vcpu *kvm_vcpu;
86 struct kvmppc_vcore *kvm_vcore; 91 struct kvmppc_vcore *kvm_vcore;
87 unsigned long xics_phys; 92 unsigned long xics_phys;
93 u32 saved_xirr;
88 u64 dabr; 94 u64 dabr;
89 u64 host_mmcr[3]; 95 u64 host_mmcr[3];
90 u32 host_pmc[8]; 96 u32 host_pmc[8];
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 6582eed321ba..1589fd8bf063 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -264,6 +264,21 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
264 paca[cpu].kvm_hstate.xics_phys = addr; 264 paca[cpu].kvm_hstate.xics_phys = addr;
265} 265}
266 266
267static inline u32 kvmppc_get_xics_latch(void)
268{
269 u32 xirr = get_paca()->kvm_hstate.saved_xirr;
270
271 get_paca()->kvm_hstate.saved_xirr = 0;
272
273 return xirr;
274}
275
276static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
277{
278 paca[cpu].kvm_hstate.host_ipi = host_ipi;
279}
280
281extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
267extern void kvm_linear_init(void); 282extern void kvm_linear_init(void);
268 283
269#else 284#else
@@ -273,6 +288,18 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
273static inline void kvm_linear_init(void) 288static inline void kvm_linear_init(void)
274{} 289{}
275 290
291static inline u32 kvmppc_get_xics_latch(void)
292{
293 return 0;
294}
295
296static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
297{}
298
299static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
300{
301 kvm_vcpu_kick(vcpu);
302}
276#endif 303#endif
277 304
278#ifdef CONFIG_KVM_XICS 305#ifdef CONFIG_KVM_XICS
@@ -393,4 +420,6 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
393 return ea; 420 return ea;
394} 421}
395 422
423extern void xics_wake_cpu(int cpu);
424
396#endif /* __POWERPC_KVM_PPC_H__ */ 425#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index dbfd5498f440..a791229329cf 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -574,6 +574,8 @@ int main(void)
574 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); 574 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
575 HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); 575 HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
576 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); 576 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
577 HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
578 HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
577 HSTATE_FIELD(HSTATE_MMCR, host_mmcr); 579 HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
578 HSTATE_FIELD(HSTATE_PMC, host_pmc); 580 HSTATE_FIELD(HSTATE_PMC, host_pmc);
579 HSTATE_FIELD(HSTATE_PURR, host_purr); 581 HSTATE_FIELD(HSTATE_PURR, host_purr);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 82ba00f68b07..16191915e8d0 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -66,6 +66,31 @@
66static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 66static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
67static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 67static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
68 68
69void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
70{
71 int me;
72 int cpu = vcpu->cpu;
73 wait_queue_head_t *wqp;
74
75 wqp = kvm_arch_vcpu_wq(vcpu);
76 if (waitqueue_active(wqp)) {
77 wake_up_interruptible(wqp);
78 ++vcpu->stat.halt_wakeup;
79 }
80
81 me = get_cpu();
82
83 /* CPU points to the first thread of the core */
84 if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
85 int real_cpu = cpu + vcpu->arch.ptid;
86 if (paca[real_cpu].kvm_hstate.xics_phys)
87 xics_wake_cpu(real_cpu);
88 else if (cpu_online(cpu))
89 smp_send_reschedule(cpu);
90 }
91 put_cpu();
92}
93
69/* 94/*
70 * We use the vcpu_load/put functions to measure stolen time. 95 * We use the vcpu_load/put functions to measure stolen time.
71 * Stolen time is counted as time when either the vcpu is able to 96 * Stolen time is counted as time when either the vcpu is able to
@@ -985,7 +1010,6 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
985} 1010}
986 1011
987extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 1012extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
988extern void xics_wake_cpu(int cpu);
989 1013
990static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, 1014static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
991 struct kvm_vcpu *vcpu) 1015 struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 0f23bb851711..56f8927b0ddf 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -79,10 +79,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
79 * * 79 * *
80 *****************************************************************************/ 80 *****************************************************************************/
81 81
82#define XICS_XIRR 4
83#define XICS_QIRR 0xc
84#define XICS_IPI 2 /* interrupt source # for IPIs */
85
86/* 82/*
87 * We come in here when wakened from nap mode on a secondary hw thread. 83 * We come in here when wakened from nap mode on a secondary hw thread.
88 * Relocation is off and most register values are lost. 84 * Relocation is off and most register values are lost.
@@ -122,7 +118,7 @@ kvm_start_guest:
122 beq 27f 118 beq 27f
12325: ld r5,HSTATE_XICS_PHYS(r13) 11925: ld r5,HSTATE_XICS_PHYS(r13)
124 li r0,0xff 120 li r0,0xff
125 li r6,XICS_QIRR 121 li r6,XICS_MFRR
126 li r7,XICS_XIRR 122 li r7,XICS_XIRR
127 lwzcix r8,r5,r7 /* get and ack the interrupt */ 123 lwzcix r8,r5,r7 /* get and ack the interrupt */
128 sync 124 sync
@@ -678,17 +674,91 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
678 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 674 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
679 beq hcall_try_real_mode 675 beq hcall_try_real_mode
680 676
681 /* Check for mediated interrupts (could be done earlier really ...) */ 677 /* Only handle external interrupts here on arch 206 and later */
682BEGIN_FTR_SECTION 678BEGIN_FTR_SECTION
683 cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL 679 b ext_interrupt_to_host
684 bne+ 1f 680END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
685 andi. r0,r11,MSR_EE 681
686 beq 1f 682 /* External interrupt ? */
687 mfspr r5,SPRN_LPCR 683 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
688 andi. r0,r5,LPCR_MER 684 bne+ ext_interrupt_to_host
685
686 /* External interrupt, first check for host_ipi. If this is
687 * set, we know the host wants us out so let's do it now
688 */
689 lbz r0, HSTATE_HOST_IPI(r13)
690 cmpwi r0, 0
691 bne ext_interrupt_to_host
692
693 /* Now read the interrupt from the ICP */
694 ld r5, HSTATE_XICS_PHYS(r13)
695 li r7, XICS_XIRR
696 cmpdi r5, 0
697 beq- ext_interrupt_to_host
698 lwzcix r3, r5, r7
699 rlwinm. r0, r3, 0, 0xffffff
700 sync
701 bne 1f
702
703 /* Nothing pending in the ICP, check for mediated interrupts
704 * and bounce it to the guest
705 */
706 andi. r0, r11, MSR_EE
707 beq ext_interrupt_to_host /* shouldn't happen ?? */
708 mfspr r5, SPRN_LPCR
709 andi. r0, r5, LPCR_MER
689 bne bounce_ext_interrupt 710 bne bounce_ext_interrupt
6901: 711 b ext_interrupt_to_host /* shouldn't happen ?? */
691END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 712
7131: /* We found something in the ICP...
714 *
715 * If it's not an IPI, stash it in the PACA and return to
716 * the host, we don't (yet) handle directing real external
717 * interrupts directly to the guest
718 */
719 cmpwi r0, XICS_IPI
720 bne ext_stash_for_host
721
722 /* It's an IPI, clear the MFRR and EOI it */
723 li r0, 0xff
724 li r6, XICS_MFRR
725 stbcix r0, r5, r6 /* clear the IPI */
726 stwcix r3, r5, r7 /* EOI it */
727 sync
728
729 /* We need to re-check host IPI now in case it got set in the
730 * meantime. If it's clear, we bounce the interrupt to the
731 * guest
732 */
733 lbz r0, HSTATE_HOST_IPI(r13)
734 cmpwi r0, 0
735 bne- 1f
736
737 /* Allright, looks like an IPI for the guest, we need to set MER */
738 mfspr r8,SPRN_LPCR
739 ori r8,r8,LPCR_MER
740 mtspr SPRN_LPCR,r8
741
742 /* And if the guest EE is set, we can deliver immediately, else
743 * we return to the guest with MER set
744 */
745 andi. r0, r11, MSR_EE
746 bne bounce_ext_interrupt
747 mr r4, r9
748 b fast_guest_return
749
750 /* We raced with the host, we need to resend that IPI, bummer */
7511: li r0, IPI_PRIORITY
752 stbcix r0, r5, r6 /* set the IPI */
753 sync
754 b ext_interrupt_to_host
755
756ext_stash_for_host:
757 /* It's not an IPI and it's for the host, stash it in the PACA
758 * before exit, it will be picked up by the host ICP driver
759 */
760 stw r3, HSTATE_SAVED_XIRR(r13)
761ext_interrupt_to_host:
692 762
693guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 763guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
694 /* Save DEC */ 764 /* Save DEC */
@@ -831,7 +901,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
831 beq 44f 901 beq 44f
832 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 902 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
833 li r0,IPI_PRIORITY 903 li r0,IPI_PRIORITY
834 li r7,XICS_QIRR 904 li r7,XICS_MFRR
835 stbcix r0,r7,r8 /* trigger the IPI */ 905 stbcix r0,r7,r8 /* trigger the IPI */
83644: srdi. r3,r3,1 90644: srdi. r3,r3,1
837 addi r6,r6,PACA_SIZE 907 addi r6,r6,PACA_SIZE
@@ -1630,7 +1700,7 @@ secondary_nap:
1630 beq 37f 1700 beq 37f
1631 sync 1701 sync
1632 li r0, 0xff 1702 li r0, 0xff
1633 li r6, XICS_QIRR 1703 li r6, XICS_MFRR
1634 stbcix r0, r5, r6 /* clear the IPI */ 1704 stbcix r0, r5, r6 /* clear the IPI */
1635 stwcix r3, r5, r7 /* EOI it */ 1705 stwcix r3, r5, r7 /* EOI it */
163637: sync 170637: sync
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 53af848116f2..1417e65b6bbd 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -227,7 +227,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp,
227 kvmppc_book3s_queue_irqprio(icp->vcpu, 227 kvmppc_book3s_queue_irqprio(icp->vcpu,
228 BOOK3S_INTERRUPT_EXTERNAL_LEVEL); 228 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
229 if (!change_self) 229 if (!change_self)
230 kvm_vcpu_kick(icp->vcpu); 230 kvmppc_fast_vcpu_kick(icp->vcpu);
231 } 231 }
232 bail: 232 bail:
233 return success; 233 return success;
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 48861d3fcd07..20b328bb494d 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -51,6 +51,12 @@ static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
51static inline unsigned int icp_native_get_xirr(void) 51static inline unsigned int icp_native_get_xirr(void)
52{ 52{
53 int cpu = smp_processor_id(); 53 int cpu = smp_processor_id();
54 unsigned int xirr;
55
56 /* Handled an interrupt latched by KVM */
57 xirr = kvmppc_get_xics_latch();
58 if (xirr)
59 return xirr;
54 60
55 return in_be32(&icp_native_regs[cpu]->xirr.word); 61 return in_be32(&icp_native_regs[cpu]->xirr.word);
56} 62}
@@ -138,6 +144,7 @@ static unsigned int icp_native_get_irq(void)
138 144
139static void icp_native_cause_ipi(int cpu, unsigned long data) 145static void icp_native_cause_ipi(int cpu, unsigned long data)
140{ 146{
147 kvmppc_set_host_ipi(cpu, 1);
141 icp_native_set_qirr(cpu, IPI_PRIORITY); 148 icp_native_set_qirr(cpu, IPI_PRIORITY);
142} 149}
143 150
@@ -151,6 +158,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
151{ 158{
152 int cpu = smp_processor_id(); 159 int cpu = smp_processor_id();
153 160
161 kvmppc_set_host_ipi(cpu, 0);
154 icp_native_set_qirr(cpu, 0xff); 162 icp_native_set_qirr(cpu, 0xff);
155 163
156 return smp_ipi_demux(); 164 return smp_ipi_demux();