aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-10-14 21:16:14 -0400
committerAlexander Graf <agraf@suse.de>2012-10-30 05:54:54 -0400
commit7b444c6710c6c4994e31eb19216ce055836e65c4 (patch)
tree52dc3cea624002ca0cfc023b9336676850e6acbd
parent512691d4907d7cf4b8d05c6f8572d1fa60ccec20 (diff)
KVM: PPC: Book3S HV: Fix some races in starting secondary threads
Subsequent patches implementing in-kernel XICS emulation will make it possible for IPIs to arrive at secondary threads at arbitrary times. This fixes some races in how we start the secondary threads, which if not fixed could lead to occasional crashes of the host kernel. This makes sure that (a) we have grabbed all the secondary threads, and verified that they are no longer in the kernel, before we start any thread, (b) that the secondary thread loads its vcpu pointer after clearing the IPI that woke it up (so we don't miss a wakeup), and (c) that the secondary thread clears its vcpu pointer before incrementing the nap count. It also removes unnecessary setting of the vcpu and vcore pointers in the paca in kvmppc_core_vcpu_load. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/kvm/book3s_hv.c41
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S11
2 files changed, 32 insertions, 20 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c5ddf048e19e..77dec0f8a030 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -64,8 +64,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
64{ 64{
65 struct kvmppc_vcore *vc = vcpu->arch.vcore; 65 struct kvmppc_vcore *vc = vcpu->arch.vcore;
66 66
67 local_paca->kvm_hstate.kvm_vcpu = vcpu;
68 local_paca->kvm_hstate.kvm_vcore = vc;
69 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) 67 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
70 vc->stolen_tb += mftb() - vc->preempt_tb; 68 vc->stolen_tb += mftb() - vc->preempt_tb;
71} 69}
@@ -880,6 +878,7 @@ static int kvmppc_grab_hwthread(int cpu)
880 878
881 /* Ensure the thread won't go into the kernel if it wakes */ 879 /* Ensure the thread won't go into the kernel if it wakes */
882 tpaca->kvm_hstate.hwthread_req = 1; 880 tpaca->kvm_hstate.hwthread_req = 1;
881 tpaca->kvm_hstate.kvm_vcpu = NULL;
883 882
884 /* 883 /*
885 * If the thread is already executing in the kernel (e.g. handling 884 * If the thread is already executing in the kernel (e.g. handling
@@ -929,7 +928,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
929 smp_wmb(); 928 smp_wmb();
930#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 929#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
931 if (vcpu->arch.ptid) { 930 if (vcpu->arch.ptid) {
932 kvmppc_grab_hwthread(cpu);
933 xics_wake_cpu(cpu); 931 xics_wake_cpu(cpu);
934 ++vc->n_woken; 932 ++vc->n_woken;
935 } 933 }
@@ -955,7 +953,8 @@ static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
955 953
956/* 954/*
957 * Check that we are on thread 0 and that any other threads in 955 * Check that we are on thread 0 and that any other threads in
958 * this core are off-line. 956 * this core are off-line. Then grab the threads so they can't
957 * enter the kernel.
959 */ 958 */
960static int on_primary_thread(void) 959static int on_primary_thread(void)
961{ 960{
@@ -967,6 +966,17 @@ static int on_primary_thread(void)
967 while (++thr < threads_per_core) 966 while (++thr < threads_per_core)
968 if (cpu_online(cpu + thr)) 967 if (cpu_online(cpu + thr))
969 return 0; 968 return 0;
969
970 /* Grab all hw threads so they can't go into the kernel */
971 for (thr = 1; thr < threads_per_core; ++thr) {
972 if (kvmppc_grab_hwthread(cpu + thr)) {
973 /* Couldn't grab one; let the others go */
974 do {
975 kvmppc_release_hwthread(cpu + thr);
976 } while (--thr > 0);
977 return 0;
978 }
979 }
970 return 1; 980 return 1;
971} 981}
972 982
@@ -1015,16 +1025,6 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
1015 } 1025 }
1016 1026
1017 /* 1027 /*
1018 * Make sure we are running on thread 0, and that
1019 * secondary threads are offline.
1020 */
1021 if (threads_per_core > 1 && !on_primary_thread()) {
1022 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1023 vcpu->arch.ret = -EBUSY;
1024 goto out;
1025 }
1026
1027 /*
1028 * Assign physical thread IDs, first to non-ceded vcpus 1028 * Assign physical thread IDs, first to non-ceded vcpus
1029 * and then to ceded ones. 1029 * and then to ceded ones.
1030 */ 1030 */
@@ -1043,15 +1043,22 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
1043 if (vcpu->arch.ceded) 1043 if (vcpu->arch.ceded)
1044 vcpu->arch.ptid = ptid++; 1044 vcpu->arch.ptid = ptid++;
1045 1045
1046 /*
1047 * Make sure we are running on thread 0, and that
1048 * secondary threads are offline.
1049 */
1050 if (threads_per_core > 1 && !on_primary_thread()) {
1051 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1052 vcpu->arch.ret = -EBUSY;
1053 goto out;
1054 }
1055
1046 vc->stolen_tb += mftb() - vc->preempt_tb; 1056 vc->stolen_tb += mftb() - vc->preempt_tb;
1047 vc->pcpu = smp_processor_id(); 1057 vc->pcpu = smp_processor_id();
1048 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 1058 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1049 kvmppc_start_thread(vcpu); 1059 kvmppc_start_thread(vcpu);
1050 kvmppc_create_dtl_entry(vcpu, vc); 1060 kvmppc_create_dtl_entry(vcpu, vc);
1051 } 1061 }
1052 /* Grab any remaining hw threads so they can't go into the kernel */
1053 for (i = ptid; i < threads_per_core; ++i)
1054 kvmppc_grab_hwthread(vc->pcpu + i);
1055 1062
1056 preempt_disable(); 1063 preempt_disable();
1057 spin_unlock(&vc->lock); 1064 spin_unlock(&vc->lock);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 44b72feaff7d..1e90ef6191a3 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -134,8 +134,11 @@ kvm_start_guest:
134 134
13527: /* XXX should handle hypervisor maintenance interrupts etc. here */ 13527: /* XXX should handle hypervisor maintenance interrupts etc. here */
136 136
137 /* reload vcpu pointer after clearing the IPI */
138 ld r4,HSTATE_KVM_VCPU(r13)
139 cmpdi r4,0
137 /* if we have no vcpu to run, go back to sleep */ 140 /* if we have no vcpu to run, go back to sleep */
138 beq cr1,kvm_no_guest 141 beq kvm_no_guest
139 142
140 /* were we napping due to cede? */ 143 /* were we napping due to cede? */
141 lbz r0,HSTATE_NAPPING(r13) 144 lbz r0,HSTATE_NAPPING(r13)
@@ -1587,6 +1590,10 @@ secondary_too_late:
1587 .endr 1590 .endr
1588 1591
1589secondary_nap: 1592secondary_nap:
1593 /* Clear our vcpu pointer so we don't come back in early */
1594 li r0, 0
1595 std r0, HSTATE_KVM_VCPU(r13)
1596 lwsync
1590 /* Clear any pending IPI - assume we're a secondary thread */ 1597 /* Clear any pending IPI - assume we're a secondary thread */
1591 ld r5, HSTATE_XICS_PHYS(r13) 1598 ld r5, HSTATE_XICS_PHYS(r13)
1592 li r7, XICS_XIRR 1599 li r7, XICS_XIRR
@@ -1612,8 +1619,6 @@ secondary_nap:
1612kvm_no_guest: 1619kvm_no_guest:
1613 li r0, KVM_HWTHREAD_IN_NAP 1620 li r0, KVM_HWTHREAD_IN_NAP
1614 stb r0, HSTATE_HWTHREAD_STATE(r13) 1621 stb r0, HSTATE_HWTHREAD_STATE(r13)
1615 li r0, 0
1616 std r0, HSTATE_KVM_VCPU(r13)
1617 1622
1618 li r3, LPCR_PECE0 1623 li r3, LPCR_PECE0
1619 mfspr r4, SPRN_LPCR 1624 mfspr r4, SPRN_LPCR