aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2014-01-08 05:25:20 -0500
committerAlexander Graf <agraf@suse.de>2014-01-27 10:00:59 -0500
commite0b7ec058c0eb7ba8d5d937d81de2bd16db6970e (patch)
tree32b266bf4e3c497ab0a306731c01761b2dde11ce /arch/powerpc/kvm/book3s_hv.c
parenteee7ff9d2cc0eaaa00496bdf4193144104c7dc63 (diff)
KVM: PPC: Book3S HV: Align physical and virtual CPU thread numbers
On a threaded processor such as POWER7, we group VCPUs into virtual cores and arrange that the VCPUs in a virtual core run on the same physical core. Currently we don't enforce any correspondence between virtual thread numbers within a virtual core and physical thread numbers. Physical threads are allocated starting at 0 on a first-come first-served basis to runnable virtual threads (VCPUs). POWER8 implements a new "msgsndp" instruction which guest kernels can use to interrupt other threads in the same core or sub-core. Since the instruction takes the destination physical thread ID as a parameter, it becomes necessary to align the physical thread IDs with the virtual thread IDs, that is, to make sure virtual thread N within a virtual core always runs on physical thread N. This means that it's possible that thread 0, which is where we call __kvmppc_vcore_entry, may end up running some other vcpu than the one whose task called kvmppc_run_core(), or it may end up running no vcpu at all, if for example thread 0 of the virtual core is currently executing in userspace. However, we do need thread 0 to be responsible for switching the MMU -- a previous version of this patch that had other threads switching the MMU was found to be responsible for occasional memory corruption and machine check interrupts in the guest on POWER7 machines. To accommodate this, we no longer pass the vcpu pointer to __kvmppc_vcore_entry, but instead let the assembly code load it from the PACA. Since the assembly code will need to know the kvm pointer and the thread ID for threads which don't have a vcpu, we move the thread ID into the PACA and we add a kvm pointer to the virtual core structure. In the case where thread 0 has no vcpu to run, it still calls into kvmppc_hv_entry in order to do the MMU switch, and then naps until either its vcpu is ready to run in the guest, or some other thread needs to exit the guest. In the latter case, thread 0 jumps to the code that switches the MMU back to the host. This control flow means that now we switch the MMU before loading any guest vcpu state. Similarly, on guest exit we now save all the guest vcpu state before switching the MMU back to the host. This has required substantial code movement, making the diff rather large. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c46
1 files changed, 17 insertions, 29 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7e1813ceabc1..7da53cd215db 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -990,6 +990,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
990 init_waitqueue_head(&vcore->wq); 990 init_waitqueue_head(&vcore->wq);
991 vcore->preempt_tb = TB_NIL; 991 vcore->preempt_tb = TB_NIL;
992 vcore->lpcr = kvm->arch.lpcr; 992 vcore->lpcr = kvm->arch.lpcr;
993 vcore->first_vcpuid = core * threads_per_core;
994 vcore->kvm = kvm;
993 } 995 }
994 kvm->arch.vcores[core] = vcore; 996 kvm->arch.vcores[core] = vcore;
995 kvm->arch.online_vcores++; 997 kvm->arch.online_vcores++;
@@ -1003,6 +1005,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1003 ++vcore->num_threads; 1005 ++vcore->num_threads;
1004 spin_unlock(&vcore->lock); 1006 spin_unlock(&vcore->lock);
1005 vcpu->arch.vcore = vcore; 1007 vcpu->arch.vcore = vcore;
1008 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
1006 1009
1007 vcpu->arch.cpu_type = KVM_CPU_3S_64; 1010 vcpu->arch.cpu_type = KVM_CPU_3S_64;
1008 kvmppc_sanity_check(vcpu); 1011 kvmppc_sanity_check(vcpu);
@@ -1066,7 +1069,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
1066 } 1069 }
1067} 1070}
1068 1071
1069extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 1072extern void __kvmppc_vcore_entry(void);
1070 1073
1071static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, 1074static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1072 struct kvm_vcpu *vcpu) 1075 struct kvm_vcpu *vcpu)
@@ -1140,15 +1143,16 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1140 tpaca = &paca[cpu]; 1143 tpaca = &paca[cpu];
1141 tpaca->kvm_hstate.kvm_vcpu = vcpu; 1144 tpaca->kvm_hstate.kvm_vcpu = vcpu;
1142 tpaca->kvm_hstate.kvm_vcore = vc; 1145 tpaca->kvm_hstate.kvm_vcore = vc;
1143 tpaca->kvm_hstate.napping = 0; 1146 tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
1144 vcpu->cpu = vc->pcpu; 1147 vcpu->cpu = vc->pcpu;
1145 smp_wmb(); 1148 smp_wmb();
1146#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 1149#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1147 if (vcpu->arch.ptid) { 1150 if (cpu != smp_processor_id()) {
1148#ifdef CONFIG_KVM_XICS 1151#ifdef CONFIG_KVM_XICS
1149 xics_wake_cpu(cpu); 1152 xics_wake_cpu(cpu);
1150#endif 1153#endif
1151 ++vc->n_woken; 1154 if (vcpu->arch.ptid)
1155 ++vc->n_woken;
1152 } 1156 }
1153#endif 1157#endif
1154} 1158}
@@ -1205,10 +1209,10 @@ static int on_primary_thread(void)
1205 */ 1209 */
1206static void kvmppc_run_core(struct kvmppc_vcore *vc) 1210static void kvmppc_run_core(struct kvmppc_vcore *vc)
1207{ 1211{
1208 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 1212 struct kvm_vcpu *vcpu, *vnext;
1209 long ret; 1213 long ret;
1210 u64 now; 1214 u64 now;
1211 int ptid, i, need_vpa_update; 1215 int i, need_vpa_update;
1212 int srcu_idx; 1216 int srcu_idx;
1213 struct kvm_vcpu *vcpus_to_update[threads_per_core]; 1217 struct kvm_vcpu *vcpus_to_update[threads_per_core];
1214 1218
@@ -1246,25 +1250,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1246 } 1250 }
1247 1251
1248 /* 1252 /*
1249 * Assign physical thread IDs, first to non-ceded vcpus
1250 * and then to ceded ones.
1251 */
1252 ptid = 0;
1253 vcpu0 = NULL;
1254 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1255 if (!vcpu->arch.ceded) {
1256 if (!ptid)
1257 vcpu0 = vcpu;
1258 vcpu->arch.ptid = ptid++;
1259 }
1260 }
1261 if (!vcpu0)
1262 goto out; /* nothing to run; should never happen */
1263 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1264 if (vcpu->arch.ceded)
1265 vcpu->arch.ptid = ptid++;
1266
1267 /*
1268 * Make sure we are running on thread 0, and that 1253 * Make sure we are running on thread 0, and that
1269 * secondary threads are offline. 1254 * secondary threads are offline.
1270 */ 1255 */
@@ -1280,15 +1265,19 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1280 kvmppc_create_dtl_entry(vcpu, vc); 1265 kvmppc_create_dtl_entry(vcpu, vc);
1281 } 1266 }
1282 1267
1268 /* Set this explicitly in case thread 0 doesn't have a vcpu */
1269 get_paca()->kvm_hstate.kvm_vcore = vc;
1270 get_paca()->kvm_hstate.ptid = 0;
1271
1283 vc->vcore_state = VCORE_RUNNING; 1272 vc->vcore_state = VCORE_RUNNING;
1284 preempt_disable(); 1273 preempt_disable();
1285 spin_unlock(&vc->lock); 1274 spin_unlock(&vc->lock);
1286 1275
1287 kvm_guest_enter(); 1276 kvm_guest_enter();
1288 1277
1289 srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu); 1278 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
1290 1279
1291 __kvmppc_vcore_entry(NULL, vcpu0); 1280 __kvmppc_vcore_entry();
1292 1281
1293 spin_lock(&vc->lock); 1282 spin_lock(&vc->lock);
1294 /* disable sending of IPIs on virtual external irqs */ 1283 /* disable sending of IPIs on virtual external irqs */
@@ -1303,7 +1292,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1303 vc->vcore_state = VCORE_EXITING; 1292 vc->vcore_state = VCORE_EXITING;
1304 spin_unlock(&vc->lock); 1293 spin_unlock(&vc->lock);
1305 1294
1306 srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx); 1295 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
1307 1296
1308 /* make sure updates to secondary vcpu structs are visible now */ 1297 /* make sure updates to secondary vcpu structs are visible now */
1309 smp_mb(); 1298 smp_mb();
@@ -1411,7 +1400,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1411 if (!signal_pending(current)) { 1400 if (!signal_pending(current)) {
1412 if (vc->vcore_state == VCORE_RUNNING && 1401 if (vc->vcore_state == VCORE_RUNNING &&
1413 VCORE_EXIT_COUNT(vc) == 0) { 1402 VCORE_EXIT_COUNT(vc) == 0) {
1414 vcpu->arch.ptid = vc->n_runnable - 1;
1415 kvmppc_create_dtl_entry(vcpu, vc); 1403 kvmppc_create_dtl_entry(vcpu, vc);
1416 kvmppc_start_thread(vcpu); 1404 kvmppc_start_thread(vcpu);
1417 } else if (vc->vcore_state == VCORE_SLEEPING) { 1405 } else if (vc->vcore_state == VCORE_SLEEPING) {