aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2015-03-27 23:21:03 -0400
committerAlexander Graf <agraf@suse.de>2015-04-21 09:21:31 -0400
commitd911f0beddc2a9248dbf375fc50a4bbf30947822 (patch)
tree60ef2eaa9cf91def5abbce3ae49181cf92d40ef2
parentb6c295df3131c6fa25f8f29625ee0609506150ad (diff)
KVM: PPC: Book3S HV: Simplify handling of VCPUs that need a VPA update
Previously, if kvmppc_run_core() was running a VCPU that needed a VPA update (i.e. one of its 3 virtual processor areas needed to be pinned in memory so the host real mode code can update it on guest entry and exit), we would drop the vcore lock and do the update there and then. Future changes will make it inconvenient to drop the lock, so instead we now remove it from the list of runnable VCPUs and wake up its VCPU task. This will have the effect that the VCPU task will exit kvmppc_run_vcpu(), go around the do loop in kvmppc_vcpu_run_hv(), and re-enter kvmppc_run_vcpu(), whereupon it will do the necessary call to kvmppc_update_vpas() and then rejoin the vcore. The one complication is that the runner VCPU (whose VCPU task is the current task) might be one of the ones that gets removed from the runnable list. In that case we just return from kvmppc_run_core() and let the code in kvmppc_run_vcpu() wake up another VCPU task to be the runner if necessary. This all means that the VCORE_STARTING state is no longer used, so we remove it. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c56
2 files changed, 32 insertions, 29 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d2068bba9059..2f339ff9b851 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -306,9 +306,8 @@ struct kvmppc_vcore {
306/* Values for vcore_state */ 306/* Values for vcore_state */
307#define VCORE_INACTIVE 0 307#define VCORE_INACTIVE 0
308#define VCORE_SLEEPING 1 308#define VCORE_SLEEPING 1
309#define VCORE_STARTING 2 309#define VCORE_RUNNING 2
310#define VCORE_RUNNING 3 310#define VCORE_EXITING 3
311#define VCORE_EXITING 4
312 311
313/* 312/*
314 * Struct used to manage memory for a virtual processor area 313 * Struct used to manage memory for a virtual processor area
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 64a02d4c737c..b38c10e00c16 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1863,6 +1863,25 @@ static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1863 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE); 1863 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1864} 1864}
1865 1865
1866static void prepare_threads(struct kvmppc_vcore *vc)
1867{
1868 struct kvm_vcpu *vcpu, *vnext;
1869
1870 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1871 arch.run_list) {
1872 if (signal_pending(vcpu->arch.run_task))
1873 vcpu->arch.ret = -EINTR;
1874 else if (vcpu->arch.vpa.update_pending ||
1875 vcpu->arch.slb_shadow.update_pending ||
1876 vcpu->arch.dtl.update_pending)
1877 vcpu->arch.ret = RESUME_GUEST;
1878 else
1879 continue;
1880 kvmppc_remove_runnable(vc, vcpu);
1881 wake_up(&vcpu->arch.cpu_run);
1882 }
1883}
1884
1866/* 1885/*
1867 * Run a set of guest threads on a physical core. 1886 * Run a set of guest threads on a physical core.
1868 * Called with vc->lock held. 1887 * Called with vc->lock held.
@@ -1872,46 +1891,31 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1872 struct kvm_vcpu *vcpu, *vnext; 1891 struct kvm_vcpu *vcpu, *vnext;
1873 long ret; 1892 long ret;
1874 u64 now; 1893 u64 now;
1875 int i, need_vpa_update; 1894 int i;
1876 int srcu_idx; 1895 int srcu_idx;
1877 struct kvm_vcpu *vcpus_to_update[threads_per_core];
1878 1896
1879 /* don't start if any threads have a signal pending */ 1897 /*
1880 need_vpa_update = 0; 1898 * Remove from the list any threads that have a signal pending
1881 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 1899 * or need a VPA update done
1882 if (signal_pending(vcpu->arch.run_task)) 1900 */
1883 return; 1901 prepare_threads(vc);
1884 if (vcpu->arch.vpa.update_pending || 1902
1885 vcpu->arch.slb_shadow.update_pending || 1903 /* if the runner is no longer runnable, let the caller pick a new one */
1886 vcpu->arch.dtl.update_pending) 1904 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
1887 vcpus_to_update[need_vpa_update++] = vcpu; 1905 return;
1888 }
1889 1906
1890 /* 1907 /*
1891 * Initialize *vc, in particular vc->vcore_state, so we can 1908 * Initialize *vc.
1892 * drop the vcore lock if necessary.
1893 */ 1909 */
1894 vc->n_woken = 0; 1910 vc->n_woken = 0;
1895 vc->nap_count = 0; 1911 vc->nap_count = 0;
1896 vc->entry_exit_count = 0; 1912 vc->entry_exit_count = 0;
1897 vc->preempt_tb = TB_NIL; 1913 vc->preempt_tb = TB_NIL;
1898 vc->vcore_state = VCORE_STARTING;
1899 vc->in_guest = 0; 1914 vc->in_guest = 0;
1900 vc->napping_threads = 0; 1915 vc->napping_threads = 0;
1901 vc->conferring_threads = 0; 1916 vc->conferring_threads = 0;
1902 1917
1903 /* 1918 /*
1904 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
1905 * which can't be called with any spinlocks held.
1906 */
1907 if (need_vpa_update) {
1908 spin_unlock(&vc->lock);
1909 for (i = 0; i < need_vpa_update; ++i)
1910 kvmppc_update_vpas(vcpus_to_update[i]);
1911 spin_lock(&vc->lock);
1912 }
1913
1914 /*
1915 * Make sure we are running on primary threads, and that secondary 1919 * Make sure we are running on primary threads, and that secondary
1916 * threads are offline. Also check if the number of threads in this 1920 * threads are offline. Also check if the number of threads in this
1917 * guest are greater than the current system threads per guest. 1921 * guest are greater than the current system threads per guest.