aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 14a19b17674e..6530a27052f3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2011,6 +2011,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2011} 2011}
2012 2012
2013/* 2013/*
2014 * wait_task_context_switch - wait for a thread to complete at least one
2015 * context switch.
2016 *
2017 * @p must not be current.
2018 */
2019void wait_task_context_switch(struct task_struct *p)
2020{
2021 unsigned long nvcsw, nivcsw, flags;
2022 int running;
2023 struct rq *rq;
2024
2025 nvcsw = p->nvcsw;
2026 nivcsw = p->nivcsw;
2027 for (;;) {
2028 /*
2029 * The runqueue is assigned before the actual context
2030 * switch. We need to take the runqueue lock.
2031 *
2032 * We could check initially without the lock but it is
2033 * very likely that we need to take the lock in every
2034 * iteration.
2035 */
2036 rq = task_rq_lock(p, &flags);
2037 running = task_running(rq, p);
2038 task_rq_unlock(rq, &flags);
2039
2040 if (likely(!running))
2041 break;
2042 /*
2043 * The switch count is incremented before the actual
2044 * context switch. We thus wait for two switches to be
2045 * sure at least one completed.
2046 */
2047 if ((p->nvcsw - nvcsw) > 1)
2048 break;
2049 if ((p->nivcsw - nivcsw) > 1)
2050 break;
2051
2052 cpu_relax();
2053 }
2054}
2055
2056/*
2014 * wait_task_inactive - wait for a thread to unschedule. 2057 * wait_task_inactive - wait for a thread to unschedule.
2015 * 2058 *
2016 * If @match_state is nonzero, it's the @p->state value just checked and 2059 * If @match_state is nonzero, it's the @p->state value just checked and