aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv.c
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2014-05-23 04:15:29 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-05-27 23:35:37 -0400
commit3102f7843c75014fa15d3e6fda3b49f61bc467b4 (patch)
tree16ab9545e46162b3c6735d13be0b8a04292b7098 /arch/powerpc/kvm/book3s_hv.c
parent6f5e40a3001d2497a134386a173e3ec3fdf2ad0b (diff)
powerpc/kvm/book3s_hv: Use threads_per_subcore in KVM
To support split core on POWER8 we need to modify various parts of the KVM code to use threads_per_subcore instead of threads_per_core. On systems that do not support split core threads_per_subcore == threads_per_core and these changes are a nop. We use threads_per_subcore as the value reported by KVM_CAP_PPC_SMT. This communicates to userspace that guests can only be created with a value of threads_per_core that is less than or equal to the current threads_per_subcore. This ensures that guests can only be created with a thread configuration that we are able to run given the current split core mode. Although threads_per_subcore can change during the life of the system, the commit that enables that will ensure that threads_per_subcore does not change during the life of a KVM VM. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Michael Neuling <mikey@neuling.org> Acked-by: Alexander Graf <agraf@suse.de> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index d7b74f888ad8..5e86f28c9d2f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1266,7 +1266,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1266 int core; 1266 int core;
1267 struct kvmppc_vcore *vcore; 1267 struct kvmppc_vcore *vcore;
1268 1268
1269 core = id / threads_per_core; 1269 core = id / threads_per_subcore;
1270 if (core >= KVM_MAX_VCORES) 1270 if (core >= KVM_MAX_VCORES)
1271 goto out; 1271 goto out;
1272 1272
@@ -1305,7 +1305,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1305 init_waitqueue_head(&vcore->wq); 1305 init_waitqueue_head(&vcore->wq);
1306 vcore->preempt_tb = TB_NIL; 1306 vcore->preempt_tb = TB_NIL;
1307 vcore->lpcr = kvm->arch.lpcr; 1307 vcore->lpcr = kvm->arch.lpcr;
1308 vcore->first_vcpuid = core * threads_per_core; 1308 vcore->first_vcpuid = core * threads_per_subcore;
1309 vcore->kvm = kvm; 1309 vcore->kvm = kvm;
1310 } 1310 }
1311 kvm->arch.vcores[core] = vcore; 1311 kvm->arch.vcores[core] = vcore;
@@ -1495,16 +1495,19 @@ static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
1495static int on_primary_thread(void) 1495static int on_primary_thread(void)
1496{ 1496{
1497 int cpu = smp_processor_id(); 1497 int cpu = smp_processor_id();
1498 int thr = cpu_thread_in_core(cpu); 1498 int thr;
1499 1499
1500 if (thr) 1500 /* Are we on a primary subcore? */
1501 if (cpu_thread_in_subcore(cpu))
1501 return 0; 1502 return 0;
1502 while (++thr < threads_per_core) 1503
1504 thr = 0;
1505 while (++thr < threads_per_subcore)
1503 if (cpu_online(cpu + thr)) 1506 if (cpu_online(cpu + thr))
1504 return 0; 1507 return 0;
1505 1508
1506 /* Grab all hw threads so they can't go into the kernel */ 1509 /* Grab all hw threads so they can't go into the kernel */
1507 for (thr = 1; thr < threads_per_core; ++thr) { 1510 for (thr = 1; thr < threads_per_subcore; ++thr) {
1508 if (kvmppc_grab_hwthread(cpu + thr)) { 1511 if (kvmppc_grab_hwthread(cpu + thr)) {
1509 /* Couldn't grab one; let the others go */ 1512 /* Couldn't grab one; let the others go */
1510 do { 1513 do {
@@ -1563,15 +1566,18 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1563 } 1566 }
1564 1567
1565 /* 1568 /*
1566 * Make sure we are running on thread 0, and that 1569 * Make sure we are running on primary threads, and that secondary
1567 * secondary threads are offline. 1570 * threads are offline. Also check if the number of threads in this
1571 * guest are greater than the current system threads per guest.
1568 */ 1572 */
1569 if (threads_per_core > 1 && !on_primary_thread()) { 1573 if ((threads_per_core > 1) &&
1574 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
1570 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 1575 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1571 vcpu->arch.ret = -EBUSY; 1576 vcpu->arch.ret = -EBUSY;
1572 goto out; 1577 goto out;
1573 } 1578 }
1574 1579
1580
1575 vc->pcpu = smp_processor_id(); 1581 vc->pcpu = smp_processor_id();
1576 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 1582 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1577 kvmppc_start_thread(vcpu); 1583 kvmppc_start_thread(vcpu);
@@ -1599,7 +1605,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1599 /* wait for secondary threads to finish writing their state to memory */ 1605 /* wait for secondary threads to finish writing their state to memory */
1600 if (vc->nap_count < vc->n_woken) 1606 if (vc->nap_count < vc->n_woken)
1601 kvmppc_wait_for_nap(vc); 1607 kvmppc_wait_for_nap(vc);
1602 for (i = 0; i < threads_per_core; ++i) 1608 for (i = 0; i < threads_per_subcore; ++i)
1603 kvmppc_release_hwthread(vc->pcpu + i); 1609 kvmppc_release_hwthread(vc->pcpu + i);
1604 /* prevent other vcpu threads from doing kvmppc_start_thread() now */ 1610 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
1605 vc->vcore_state = VCORE_EXITING; 1611 vc->vcore_state = VCORE_EXITING;