aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b902e587a3a0..36322e8682c7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2015,6 +2015,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2015} 2015}
2016 2016
2017/* 2017/*
2018 * wait_task_context_switch - wait for a thread to complete at least one
2019 * context switch.
2020 *
2021 * @p must not be current.
2022 */
2023void wait_task_context_switch(struct task_struct *p)
2024{
2025 unsigned long nvcsw, nivcsw, flags;
2026 int running;
2027 struct rq *rq;
2028
2029 nvcsw = p->nvcsw;
2030 nivcsw = p->nivcsw;
2031 for (;;) {
2032 /*
2033 * The runqueue is assigned before the actual context
2034 * switch. We need to take the runqueue lock.
2035 *
2036 * We could check initially without the lock but it is
2037 * very likely that we need to take the lock in every
2038 * iteration.
2039 */
2040 rq = task_rq_lock(p, &flags);
2041 running = task_running(rq, p);
2042 task_rq_unlock(rq, &flags);
2043
2044 if (likely(!running))
2045 break;
2046 /*
2047 * The switch count is incremented before the actual
2048 * context switch. We thus wait for two switches to be
2049 * sure at least one completed.
2050 */
2051 if ((p->nvcsw - nvcsw) > 1)
2052 break;
2053 if ((p->nivcsw - nivcsw) > 1)
2054 break;
2055
2056 cpu_relax();
2057 }
2058}
2059
2060/*
2018 * wait_task_inactive - wait for a thread to unschedule. 2061 * wait_task_inactive - wait for a thread to unschedule.
2019 * 2062 *
2020 * If @match_state is nonzero, it's the @p->state value just checked and 2063 * If @match_state is nonzero, it's the @p->state value just checked and