diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 22:53:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 22:53:40 -0400 |
commit | 862366118026a358882eefc70238dbcc3db37aac (patch) | |
tree | 4eb62bc10327a5afac064a95a091ea05ecd2acc1 /kernel/sched.c | |
parent | 57eee9ae7bbcfb692dc96c739a5184adb6349733 (diff) | |
parent | 511b01bdf64ad8a38414096eab283c7784aebfc4 (diff) |
Merge branch 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (244 commits)
Revert "x86, bts: reenable ptrace branch trace support"
tracing: do not translate event helper macros in print format
ftrace/documentation: fix typo in function grapher name
tracing/events: convert block trace points to TRACE_EVENT(), fix !CONFIG_BLOCK
tracing: add protection around module events unload
tracing: add trace_seq_vprint interface
tracing: fix the block trace points print size
tracing/events: convert block trace points to TRACE_EVENT()
ring-buffer: fix ret in rb_add_time_stamp
ring-buffer: pass in lockdep class key for reader_lock
tracing: add annotation to what type of stack trace is recorded
tracing: fix multiple use of __print_flags and __print_symbolic
tracing/events: fix output format of user stack
tracing/events: fix output format of kernel stack
tracing/trace_stack: fix the number of entries in the header
ring-buffer: discard timestamps that are at the start of the buffer
ring-buffer: try to discard unneeded timestamps
ring-buffer: fix bug in ring_buffer_discard_commit
ftrace: do not profile functions when disabled
tracing: make trace pipe recognize latency format flag
...
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 55 |
1 files changed, 47 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 076e403b9c88..14c447ae5d53 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -72,13 +72,15 @@ | |||
72 | #include <linux/debugfs.h> | 72 | #include <linux/debugfs.h> |
73 | #include <linux/ctype.h> | 73 | #include <linux/ctype.h> |
74 | #include <linux/ftrace.h> | 74 | #include <linux/ftrace.h> |
75 | #include <trace/sched.h> | ||
76 | 75 | ||
77 | #include <asm/tlb.h> | 76 | #include <asm/tlb.h> |
78 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
79 | 78 | ||
80 | #include "sched_cpupri.h" | 79 | #include "sched_cpupri.h" |
81 | 80 | ||
81 | #define CREATE_TRACE_POINTS | ||
82 | #include <trace/events/sched.h> | ||
83 | |||
82 | /* | 84 | /* |
83 | * Convert user-nice values [ -20 ... 0 ... 19 ] | 85 | * Convert user-nice values [ -20 ... 0 ... 19 ] |
84 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], | 86 | * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], |
@@ -118,12 +120,6 @@ | |||
118 | */ | 120 | */ |
119 | #define RUNTIME_INF ((u64)~0ULL) | 121 | #define RUNTIME_INF ((u64)~0ULL) |
120 | 122 | ||
121 | DEFINE_TRACE(sched_wait_task); | ||
122 | DEFINE_TRACE(sched_wakeup); | ||
123 | DEFINE_TRACE(sched_wakeup_new); | ||
124 | DEFINE_TRACE(sched_switch); | ||
125 | DEFINE_TRACE(sched_migrate_task); | ||
126 | |||
127 | #ifdef CONFIG_SMP | 123 | #ifdef CONFIG_SMP |
128 | 124 | ||
129 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | 125 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); |
@@ -1964,7 +1960,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1964 | 1960 | ||
1965 | clock_offset = old_rq->clock - new_rq->clock; | 1961 | clock_offset = old_rq->clock - new_rq->clock; |
1966 | 1962 | ||
1967 | trace_sched_migrate_task(p, task_cpu(p), new_cpu); | 1963 | trace_sched_migrate_task(p, new_cpu); |
1968 | 1964 | ||
1969 | #ifdef CONFIG_SCHEDSTATS | 1965 | #ifdef CONFIG_SCHEDSTATS |
1970 | if (p->se.wait_start) | 1966 | if (p->se.wait_start) |
@@ -2021,6 +2017,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2021 | } | 2017 | } |
2022 | 2018 | ||
2023 | /* | 2019 | /* |
2020 | * wait_task_context_switch - wait for a thread to complete at least one | ||
2021 | * context switch. | ||
2022 | * | ||
2023 | * @p must not be current. | ||
2024 | */ | ||
2025 | void wait_task_context_switch(struct task_struct *p) | ||
2026 | { | ||
2027 | unsigned long nvcsw, nivcsw, flags; | ||
2028 | int running; | ||
2029 | struct rq *rq; | ||
2030 | |||
2031 | nvcsw = p->nvcsw; | ||
2032 | nivcsw = p->nivcsw; | ||
2033 | for (;;) { | ||
2034 | /* | ||
2035 | * The runqueue is assigned before the actual context | ||
2036 | * switch. We need to take the runqueue lock. | ||
2037 | * | ||
2038 | * We could check initially without the lock but it is | ||
2039 | * very likely that we need to take the lock in every | ||
2040 | * iteration. | ||
2041 | */ | ||
2042 | rq = task_rq_lock(p, &flags); | ||
2043 | running = task_running(rq, p); | ||
2044 | task_rq_unlock(rq, &flags); | ||
2045 | |||
2046 | if (likely(!running)) | ||
2047 | break; | ||
2048 | /* | ||
2049 | * The switch count is incremented before the actual | ||
2050 | * context switch. We thus wait for two switches to be | ||
2051 | * sure at least one completed. | ||
2052 | */ | ||
2053 | if ((p->nvcsw - nvcsw) > 1) | ||
2054 | break; | ||
2055 | if ((p->nivcsw - nivcsw) > 1) | ||
2056 | break; | ||
2057 | |||
2058 | cpu_relax(); | ||
2059 | } | ||
2060 | } | ||
2061 | |||
2062 | /* | ||
2024 | * wait_task_inactive - wait for a thread to unschedule. | 2063 | * wait_task_inactive - wait for a thread to unschedule. |
2025 | * | 2064 | * |
2026 | * If @match_state is nonzero, it's the @p->state value just checked and | 2065 | * If @match_state is nonzero, it's the @p->state value just checked and |