diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-07 05:18:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-07 07:36:22 -0400 |
commit | 0ad5d703c6c0fcd385d956555460df95dff7eb7e (patch) | |
tree | 4b777100f9be4fe90ca4bd043b9f98df672b5b3b /kernel/sched.c | |
parent | 44347d947f628060b92449702071bfe1d31dfb75 (diff) | |
parent | 1cb81b143fa8f0e4629f10690862e2e52ca792ff (diff) |
Merge branch 'tracing/hw-branch-tracing' into tracing/core
Merge reason: this topic is ready for upstream now. It passed
Oleg's review and Andrew had no further mm/*
objections/observations either.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 14a19b17674e..6530a27052f3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2011,6 +2011,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2011 | } | 2011 | } |
2012 | 2012 | ||
2013 | /* | 2013 | /* |
2014 | * wait_task_context_switch - wait for a thread to complete at least one | ||
2015 | * context switch. | ||
2016 | * | ||
2017 | * @p must not be current. | ||
2018 | */ | ||
2019 | void wait_task_context_switch(struct task_struct *p) | ||
2020 | { | ||
2021 | unsigned long nvcsw, nivcsw, flags; | ||
2022 | int running; | ||
2023 | struct rq *rq; | ||
2024 | |||
2025 | nvcsw = p->nvcsw; | ||
2026 | nivcsw = p->nivcsw; | ||
2027 | for (;;) { | ||
2028 | /* | ||
2029 | * The runqueue is assigned before the actual context | ||
2030 | * switch. We need to take the runqueue lock. | ||
2031 | * | ||
2032 | * We could check initially without the lock but it is | ||
2033 | * very likely that we need to take the lock in every | ||
2034 | * iteration. | ||
2035 | */ | ||
2036 | rq = task_rq_lock(p, &flags); | ||
2037 | running = task_running(rq, p); | ||
2038 | task_rq_unlock(rq, &flags); | ||
2039 | |||
2040 | if (likely(!running)) | ||
2041 | break; | ||
2042 | /* | ||
2043 | * The switch count is incremented before the actual | ||
2044 | * context switch. We thus wait for two switches to be | ||
2045 | * sure at least one completed. | ||
2046 | */ | ||
2047 | if ((p->nvcsw - nvcsw) > 1) | ||
2048 | break; | ||
2049 | if ((p->nivcsw - nivcsw) > 1) | ||
2050 | break; | ||
2051 | |||
2052 | cpu_relax(); | ||
2053 | } | ||
2054 | } | ||
2055 | |||
2056 | /* | ||
2014 | * wait_task_inactive - wait for a thread to unschedule. | 2057 | * wait_task_inactive - wait for a thread to unschedule. |
2015 | * | 2058 | * |
2016 | * If @match_state is nonzero, it's the @p->state value just checked and | 2059 | * If @match_state is nonzero, it's the @p->state value just checked and |