aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2015-03-27 23:21:05 -0400
committerAlexander Graf <agraf@suse.de>2015-04-21 09:21:32 -0400
commit25fedfca94cfbf2461314c6c34ef58e74a31b025 (patch)
treef97e12748bccd2e1f9da93f98a4727542d6b132c /arch
parent1f09c3ed86287d40fef90611cbbee055313f52cf (diff)
KVM: PPC: Book3S HV: Move vcore preemption point up into kvmppc_run_vcpu
Rather than calling cond_resched() in kvmppc_run_core() before doing the post-processing for the vcpus that we have just run (that is, calling kvmppc_handle_exit_hv(), kvmppc_set_timer(), etc.), we now do that post-processing before calling cond_resched(), and that post- processing is moved out into its own function, post_guest_process(). The reschedule point is now in kvmppc_run_vcpu() and we define a new vcore state, VCORE_PREEMPT, to indicate that that the vcore's runner task is runnable but not running. (Doing the reschedule with the vcore in VCORE_INACTIVE state would be bad because there are potentially other vcpus waiting for the runner in kvmppc_wait_for_exec() which then wouldn't get woken up.) Also, we make use of the handy cond_resched_lock() function, which unlocks and relocks vc->lock for us around the reschedule. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/kvm/book3s_hv.c92
2 files changed, 55 insertions, 42 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3eecd8868b01..83c44257b005 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -304,8 +304,9 @@ struct kvmppc_vcore {
304/* Values for vcore_state */ 304/* Values for vcore_state */
305#define VCORE_INACTIVE 0 305#define VCORE_INACTIVE 0
306#define VCORE_SLEEPING 1 306#define VCORE_SLEEPING 1
307#define VCORE_RUNNING 2 307#define VCORE_PREEMPT 2
308#define VCORE_EXITING 3 308#define VCORE_RUNNING 3
309#define VCORE_EXITING 4
309 310
310/* 311/*
311 * Struct used to manage memory for a virtual processor area 312 * Struct used to manage memory for a virtual processor area
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index b38c10e00c16..fb4f16628a51 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1882,15 +1882,50 @@ static void prepare_threads(struct kvmppc_vcore *vc)
1882 } 1882 }
1883} 1883}
1884 1884
1885static void post_guest_process(struct kvmppc_vcore *vc)
1886{
1887 u64 now;
1888 long ret;
1889 struct kvm_vcpu *vcpu, *vnext;
1890
1891 now = get_tb();
1892 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1893 arch.run_list) {
1894 /* cancel pending dec exception if dec is positive */
1895 if (now < vcpu->arch.dec_expires &&
1896 kvmppc_core_pending_dec(vcpu))
1897 kvmppc_core_dequeue_dec(vcpu);
1898
1899 trace_kvm_guest_exit(vcpu);
1900
1901 ret = RESUME_GUEST;
1902 if (vcpu->arch.trap)
1903 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1904 vcpu->arch.run_task);
1905
1906 vcpu->arch.ret = ret;
1907 vcpu->arch.trap = 0;
1908
1909 if (vcpu->arch.ceded) {
1910 if (!is_kvmppc_resume_guest(ret))
1911 kvmppc_end_cede(vcpu);
1912 else
1913 kvmppc_set_timer(vcpu);
1914 }
1915 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
1916 kvmppc_remove_runnable(vc, vcpu);
1917 wake_up(&vcpu->arch.cpu_run);
1918 }
1919 }
1920}
1921
1885/* 1922/*
1886 * Run a set of guest threads on a physical core. 1923 * Run a set of guest threads on a physical core.
1887 * Called with vc->lock held. 1924 * Called with vc->lock held.
1888 */ 1925 */
1889static void kvmppc_run_core(struct kvmppc_vcore *vc) 1926static void kvmppc_run_core(struct kvmppc_vcore *vc)
1890{ 1927{
1891 struct kvm_vcpu *vcpu, *vnext; 1928 struct kvm_vcpu *vcpu;
1892 long ret;
1893 u64 now;
1894 int i; 1929 int i;
1895 int srcu_idx; 1930 int srcu_idx;
1896 1931
@@ -1922,8 +1957,11 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1922 */ 1957 */
1923 if ((threads_per_core > 1) && 1958 if ((threads_per_core > 1) &&
1924 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { 1959 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
1925 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 1960 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1926 vcpu->arch.ret = -EBUSY; 1961 vcpu->arch.ret = -EBUSY;
1962 kvmppc_remove_runnable(vc, vcpu);
1963 wake_up(&vcpu->arch.cpu_run);
1964 }
1927 goto out; 1965 goto out;
1928 } 1966 }
1929 1967
@@ -1979,44 +2017,12 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
1979 kvm_guest_exit(); 2017 kvm_guest_exit();
1980 2018
1981 preempt_enable(); 2019 preempt_enable();
1982 cond_resched();
1983 2020
1984 spin_lock(&vc->lock); 2021 spin_lock(&vc->lock);
1985 now = get_tb(); 2022 post_guest_process(vc);
1986 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1987 /* cancel pending dec exception if dec is positive */
1988 if (now < vcpu->arch.dec_expires &&
1989 kvmppc_core_pending_dec(vcpu))
1990 kvmppc_core_dequeue_dec(vcpu);
1991
1992 trace_kvm_guest_exit(vcpu);
1993
1994 ret = RESUME_GUEST;
1995 if (vcpu->arch.trap)
1996 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1997 vcpu->arch.run_task);
1998
1999 vcpu->arch.ret = ret;
2000 vcpu->arch.trap = 0;
2001
2002 if (vcpu->arch.ceded) {
2003 if (!is_kvmppc_resume_guest(ret))
2004 kvmppc_end_cede(vcpu);
2005 else
2006 kvmppc_set_timer(vcpu);
2007 }
2008 }
2009 2023
2010 out: 2024 out:
2011 vc->vcore_state = VCORE_INACTIVE; 2025 vc->vcore_state = VCORE_INACTIVE;
2012 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2013 arch.run_list) {
2014 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
2015 kvmppc_remove_runnable(vc, vcpu);
2016 wake_up(&vcpu->arch.cpu_run);
2017 }
2018 }
2019
2020 trace_kvmppc_run_core(vc, 1); 2026 trace_kvmppc_run_core(vc, 1);
2021} 2027}
2022 2028
@@ -2138,7 +2144,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2138 } 2144 }
2139 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 2145 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2140 break; 2146 break;
2141 vc->runner = vcpu;
2142 n_ceded = 0; 2147 n_ceded = 0;
2143 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { 2148 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
2144 if (!v->arch.pending_exceptions) 2149 if (!v->arch.pending_exceptions)
@@ -2146,10 +2151,17 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2146 else 2151 else
2147 v->arch.ceded = 0; 2152 v->arch.ceded = 0;
2148 } 2153 }
2149 if (n_ceded == vc->n_runnable) 2154 vc->runner = vcpu;
2155 if (n_ceded == vc->n_runnable) {
2150 kvmppc_vcore_blocked(vc); 2156 kvmppc_vcore_blocked(vc);
2151 else 2157 } else if (should_resched()) {
2158 vc->vcore_state = VCORE_PREEMPT;
2159 /* Let something else run */
2160 cond_resched_lock(&vc->lock);
2161 vc->vcore_state = VCORE_INACTIVE;
2162 } else {
2152 kvmppc_run_core(vc); 2163 kvmppc_run_core(vc);
2164 }
2153 vc->runner = NULL; 2165 vc->runner = NULL;
2154 } 2166 }
2155 2167