diff options
author | Markus Metzger <markus.t.metzger@intel.com> | 2009-04-03 10:43:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-07 07:36:12 -0400 |
commit | a26b89f05d194413c7238e0bea071054f6b5d3c8 (patch) | |
tree | 8a2efbabce49ed747fe9b54e282e04b82d1b9982 /kernel/sched.c | |
parent | cac94f979326212831c0ea44ed9ea1622b4f4e93 (diff) |
sched, hw-branch-tracer: add wait_task_context_switch() function to sched.h
Add a function to wait until some other task has been
switched out at least once.
This differs from wait_task_inactive() subtly, in that the
latter will wait until the task has left the CPU.
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Cc: markus.t.metzger@gmail.com
Cc: roland@redhat.com
Cc: eranian@googlemail.com
Cc: oleg@redhat.com
Cc: juan.villacis@intel.com
Cc: ak@linux.jf.intel.com
LKML-Reference: <20090403144549.794157000@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6cc1fd5d5072..f91bc8141dc3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2003,6 +2003,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2003 | } | 2003 | } |
2004 | 2004 | ||
2005 | /* | 2005 | /* |
2006 | * wait_task_context_switch - wait for a thread to complete at least one | ||
2007 | * context switch. | ||
2008 | * | ||
2009 | * @p must not be current. | ||
2010 | */ | ||
2011 | void wait_task_context_switch(struct task_struct *p) | ||
2012 | { | ||
2013 | unsigned long nvcsw, nivcsw, flags; | ||
2014 | int running; | ||
2015 | struct rq *rq; | ||
2016 | |||
2017 | nvcsw = p->nvcsw; | ||
2018 | nivcsw = p->nivcsw; | ||
2019 | for (;;) { | ||
2020 | /* | ||
2021 | * The runqueue is assigned before the actual context | ||
2022 | * switch. We need to take the runqueue lock. | ||
2023 | * | ||
2024 | * We could check initially without the lock but it is | ||
2025 | * very likely that we need to take the lock in every | ||
2026 | * iteration. | ||
2027 | */ | ||
2028 | rq = task_rq_lock(p, &flags); | ||
2029 | running = task_running(rq, p); | ||
2030 | task_rq_unlock(rq, &flags); | ||
2031 | |||
2032 | if (likely(!running)) | ||
2033 | break; | ||
2034 | /* | ||
2035 | * The switch count is incremented before the actual | ||
2036 | * context switch. We thus wait for two switches to be | ||
2037 | * sure at least one completed. | ||
2038 | */ | ||
2039 | if ((p->nvcsw - nvcsw) > 1) | ||
2040 | break; | ||
2041 | if ((p->nivcsw - nivcsw) > 1) | ||
2042 | break; | ||
2043 | |||
2044 | cpu_relax(); | ||
2045 | } | ||
2046 | } | ||
2047 | |||
2048 | /* | ||
2006 | * wait_task_inactive - wait for a thread to unschedule. | 2049 | * wait_task_inactive - wait for a thread to unschedule. |
2007 | * | 2050 | * |
2008 | * If @match_state is nonzero, it's the @p->state value just checked and | 2051 | * If @match_state is nonzero, it's the @p->state value just checked and |