summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorSuresh E. Warrier <warrier@linux.vnet.ibm.com>2015-12-21 17:22:51 -0500
committerPaul Mackerras <paulus@samba.org>2016-02-29 00:25:06 -0500
commite17769eb8c897101e2c6df62ec397e450b6e53b4 (patch)
treedc2a7e7ec2d299d7c121d9752603ac2db97825a0 /arch/powerpc/kvm
parent0c2a66062470cd1f6d11ae6db31059f59d3f725f (diff)
KVM: PPC: Book3S HV: Send IPI to host core to wake VCPU
This patch adds support to real-mode KVM to search for a core running in the host partition and send it an IPI message with VCPU to be woken. This avoids having to switch to the host partition to complete an H_IPI hypercall when the VCPU which is the target of the the H_IPI is not loaded (is not running in the guest). The patch also includes the support in the IPI handler running in the host to do the wakeup by calling kvmppc_xics_ipi_action for the PPC_MSG_RM_HOST_ACTION message. When a guest is being destroyed, we need to ensure that there are no pending IPIs waiting to wake up a VCPU before we free the VCPUs of the guest. This is accomplished by: - Forces a PPC_MSG_CALL_FUNCTION IPI to be completed by all CPUs before freeing any VCPUs in kvm_arch_destroy_vm(). - Any PPC_MSG_RM_HOST_ACTION messages must be executed first before any other PPC_MSG_CALL_FUNCTION messages. Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c92
-rw-r--r--arch/powerpc/kvm/powerpc.c10
2 files changed, 99 insertions, 3 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 43ffbfe2a18a..e673fb9fee98 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -51,11 +51,84 @@ static void ics_rm_check_resend(struct kvmppc_xics *xics,
51 51
52/* -- ICP routines -- */ 52/* -- ICP routines -- */
53 53
54#ifdef CONFIG_SMP
55static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
56{
57 int hcpu;
58
59 hcpu = hcore << threads_shift;
60 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
61 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
62 icp_native_cause_ipi_rm(hcpu);
63}
64#else
65static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
66#endif
67
68/*
69 * We start the search from our current CPU Id in the core map
70 * and go in a circle until we get back to our ID looking for a
71 * core that is running in host context and that hasn't already
72 * been targeted for another rm_host_ops.
73 *
74 * In the future, could consider using a fairer algorithm (one
75 * that distributes the IPIs better)
76 *
77 * Returns -1, if no CPU could be found in the host
78 * Else, returns a CPU Id which has been reserved for use
79 */
80static inline int grab_next_hostcore(int start,
81 struct kvmppc_host_rm_core *rm_core, int max, int action)
82{
83 bool success;
84 int core;
85 union kvmppc_rm_state old, new;
86
87 for (core = start + 1; core < max; core++) {
88 old = new = READ_ONCE(rm_core[core].rm_state);
89
90 if (!old.in_host || old.rm_action)
91 continue;
92
93 /* Try to grab this host core if not taken already. */
94 new.rm_action = action;
95
96 success = cmpxchg64(&rm_core[core].rm_state.raw,
97 old.raw, new.raw) == old.raw;
98 if (success) {
99 /*
100 * Make sure that the store to the rm_action is made
101 * visible before we return to caller (and the
102 * subsequent store to rm_data) to synchronize with
103 * the IPI handler.
104 */
105 smp_wmb();
106 return core;
107 }
108 }
109
110 return -1;
111}
112
113static inline int find_available_hostcore(int action)
114{
115 int core;
116 int my_core = smp_processor_id() >> threads_shift;
117 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
118
119 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
120 if (core == -1)
121 core = grab_next_hostcore(core, rm_core, my_core, action);
122
123 return core;
124}
125
54static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, 126static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
55 struct kvm_vcpu *this_vcpu) 127 struct kvm_vcpu *this_vcpu)
56{ 128{
57 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; 129 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
58 int cpu; 130 int cpu;
131 int hcore;
59 132
60 /* Mark the target VCPU as having an interrupt pending */ 133 /* Mark the target VCPU as having an interrupt pending */
61 vcpu->stat.queue_intr++; 134 vcpu->stat.queue_intr++;
@@ -67,11 +140,22 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
67 return; 140 return;
68 } 141 }
69 142
70 /* Check if the core is loaded, if not, too hard */ 143 /*
144 * Check if the core is loaded,
145 * if not, find an available host core to post to wake the VCPU,
146 * if we can't find one, set up state to eventually return too hard.
147 */
71 cpu = vcpu->arch.thread_cpu; 148 cpu = vcpu->arch.thread_cpu;
72 if (cpu < 0 || cpu >= nr_cpu_ids) { 149 if (cpu < 0 || cpu >= nr_cpu_ids) {
73 this_icp->rm_action |= XICS_RM_KICK_VCPU; 150 hcore = -1;
74 this_icp->rm_kick_target = vcpu; 151 if (kvmppc_host_rm_ops_hv)
152 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
153 if (hcore != -1) {
154 icp_send_hcore_msg(hcore, vcpu);
155 } else {
156 this_icp->rm_action |= XICS_RM_KICK_VCPU;
157 this_icp->rm_kick_target = vcpu;
158 }
75 return; 159 return;
76 } 160 }
77 161
@@ -655,7 +739,9 @@ void kvmppc_xics_ipi_action(void)
655 if (rm_corep->rm_data) { 739 if (rm_corep->rm_data) {
656 rm_host_ipi_action(rm_corep->rm_state.rm_action, 740 rm_host_ipi_action(rm_corep->rm_state.rm_action,
657 rm_corep->rm_data); 741 rm_corep->rm_data);
742 /* Order these stores against the real mode KVM */
658 rm_corep->rm_data = NULL; 743 rm_corep->rm_data = NULL;
744 smp_wmb();
659 rm_corep->rm_state.rm_action = 0; 745 rm_corep->rm_state.rm_action = 0;
660 } 746 }
661} 747}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 69f897da782d..9258675e2ff7 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -437,6 +437,16 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
437 unsigned int i; 437 unsigned int i;
438 struct kvm_vcpu *vcpu; 438 struct kvm_vcpu *vcpu;
439 439
440#ifdef CONFIG_KVM_XICS
441 /*
442 * We call kick_all_cpus_sync() to ensure that all
443 * CPUs have executed any pending IPIs before we
444 * continue and free VCPUs structures below.
445 */
446 if (is_kvmppc_hv_enabled(kvm))
447 kick_all_cpus_sync();
448#endif
449
440 kvm_for_each_vcpu(i, vcpu, kvm) 450 kvm_for_each_vcpu(i, vcpu, kvm)
441 kvm_arch_vcpu_free(vcpu); 451 kvm_arch_vcpu_free(vcpu);
442 452