aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c51
1 files changed, 34 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ea1600ff52b2..48d3c5d2ecc9 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -51,6 +51,7 @@
51#include <asm/hvcall.h> 51#include <asm/hvcall.h>
52#include <asm/switch_to.h> 52#include <asm/switch_to.h>
53#include <asm/smp.h> 53#include <asm/smp.h>
54#include <asm/dbell.h>
54#include <linux/gfp.h> 55#include <linux/gfp.h>
55#include <linux/vmalloc.h> 56#include <linux/vmalloc.h>
56#include <linux/highmem.h> 57#include <linux/highmem.h>
@@ -84,9 +85,35 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
84static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 85static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
85static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 86static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
86 87
88static bool kvmppc_ipi_thread(int cpu)
89{
90 /* On POWER8 for IPIs to threads in the same core, use msgsnd */
91 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
92 preempt_disable();
93 if (cpu_first_thread_sibling(cpu) ==
94 cpu_first_thread_sibling(smp_processor_id())) {
95 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
96 msg |= cpu_thread_in_core(cpu);
97 smp_mb();
98 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
99 preempt_enable();
100 return true;
101 }
102 preempt_enable();
103 }
104
105#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
106 if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
107 xics_wake_cpu(cpu);
108 return true;
109 }
110#endif
111
112 return false;
113}
114
87static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) 115static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
88{ 116{
89 int me;
90 int cpu = vcpu->cpu; 117 int cpu = vcpu->cpu;
91 wait_queue_head_t *wqp; 118 wait_queue_head_t *wqp;
92 119
@@ -96,20 +123,12 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
96 ++vcpu->stat.halt_wakeup; 123 ++vcpu->stat.halt_wakeup;
97 } 124 }
98 125
99 me = get_cpu(); 126 if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid))
127 return;
100 128
101 /* CPU points to the first thread of the core */ 129 /* CPU points to the first thread of the core */
102 if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { 130 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
103#ifdef CONFIG_PPC_ICP_NATIVE 131 smp_send_reschedule(cpu);
104 int real_cpu = cpu + vcpu->arch.ptid;
105 if (paca[real_cpu].kvm_hstate.xics_phys)
106 xics_wake_cpu(real_cpu);
107 else
108#endif
109 if (cpu_online(cpu))
110 smp_send_reschedule(cpu);
111 }
112 put_cpu();
113} 132}
114 133
115/* 134/*
@@ -1781,10 +1800,8 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1781 /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */ 1800 /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
1782 smp_wmb(); 1801 smp_wmb();
1783 tpaca->kvm_hstate.kvm_vcpu = vcpu; 1802 tpaca->kvm_hstate.kvm_vcpu = vcpu;
1784#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1785 if (cpu != smp_processor_id()) 1803 if (cpu != smp_processor_id())
1786 xics_wake_cpu(cpu); 1804 kvmppc_ipi_thread(cpu);
1787#endif
1788} 1805}
1789 1806
1790static void kvmppc_wait_for_nap(void) 1807static void kvmppc_wait_for_nap(void)
@@ -1933,7 +1950,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
1933 * Run a set of guest threads on a physical core. 1950 * Run a set of guest threads on a physical core.
1934 * Called with vc->lock held. 1951 * Called with vc->lock held.
1935 */ 1952 */
1936static void kvmppc_run_core(struct kvmppc_vcore *vc) 1953static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
1937{ 1954{
1938 struct kvm_vcpu *vcpu; 1955 struct kvm_vcpu *vcpu;
1939 int i; 1956 int i;