aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c43
2 files changed, 45 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b94f3541f67b..a5b9a83065fa 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1993,8 +1993,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
1993extern char *get_task_comm(char *to, struct task_struct *tsk); 1993extern char *get_task_comm(char *to, struct task_struct *tsk);
1994 1994
1995#ifdef CONFIG_SMP 1995#ifdef CONFIG_SMP
1996extern void wait_task_context_switch(struct task_struct *p);
1996extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 1997extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1997#else 1998#else
1999static inline void wait_task_context_switch(struct task_struct *p) {}
1998static inline unsigned long wait_task_inactive(struct task_struct *p, 2000static inline unsigned long wait_task_inactive(struct task_struct *p,
1999 long match_state) 2001 long match_state)
2000{ 2002{
diff --git a/kernel/sched.c b/kernel/sched.c
index 6cc1fd5d5072..f91bc8141dc3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2003,6 +2003,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2003} 2003}
2004 2004
2005/* 2005/*
2006 * wait_task_context_switch - wait for a thread to complete at least one
2007 * context switch.
2008 *
2009 * @p must not be current.
2010 */
2011void wait_task_context_switch(struct task_struct *p)
2012{
2013 unsigned long nvcsw, nivcsw, flags;
2014 int running;
2015 struct rq *rq;
2016
2017 nvcsw = p->nvcsw;
2018 nivcsw = p->nivcsw;
2019 for (;;) {
2020 /*
2021 * The runqueue is assigned before the actual context
2022 * switch. We need to take the runqueue lock.
2023 *
2024 * We could check initially without the lock but it is
2025 * very likely that we need to take the lock in every
2026 * iteration.
2027 */
2028 rq = task_rq_lock(p, &flags);
2029 running = task_running(rq, p);
2030 task_rq_unlock(rq, &flags);
2031
2032 if (likely(!running))
2033 break;
2034 /*
2035 * The switch count is incremented before the actual
2036 * context switch. We thus wait for two switches to be
2037 * sure at least one completed.
2038 */
2039 if ((p->nvcsw - nvcsw) > 1)
2040 break;
2041 if ((p->nivcsw - nivcsw) > 1)
2042 break;
2043
2044 cpu_relax();
2045 }
2046}
2047
2048/*
2006 * wait_task_inactive - wait for a thread to unschedule. 2049 * wait_task_inactive - wait for a thread to unschedule.
2007 * 2050 *
2008 * If @match_state is nonzero, it's the @p->state value just checked and 2051 * If @match_state is nonzero, it's the @p->state value just checked and