diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-28 15:21:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-28 15:21:10 -0500 |
commit | b0f4b285d7ed174804658539129a834270f4829a (patch) | |
tree | be7f8dca58075aba2c6a137fcfd4d44c5c333efc /kernel/sched.c | |
parent | be9c5ae4eeec2e85527e95647348b8ea4eb25128 (diff) | |
parent | 5250d329e38cdf7580faeb9c53c17d3588d7d19c (diff) |
Merge branch 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (241 commits)
sched, trace: update trace_sched_wakeup()
tracing/ftrace: don't trace on early stage of a secondary cpu boot, v3
Revert "x86: disable X86_PTRACE_BTS"
ring-buffer: prevent false positive warning
ring-buffer: fix dangling commit race
ftrace: enable format arguments checking
x86, bts: memory accounting
x86, bts: add fork and exit handling
ftrace: introduce tracing_reset_online_cpus() helper
tracing: fix warnings in kernel/trace/trace_sched_switch.c
tracing: fix warning in kernel/trace/trace.c
tracing/ring-buffer: remove unused ring_buffer size
trace: fix task state printout
ftrace: add not to regex on filtering functions
trace: better use of stack_trace_enabled for boot up code
trace: add a way to enable or disable the stack tracer
x86: entry_64 - introduce FTRACE_ frame macro v2
tracing/ftrace: add the printk-msg-only option
tracing/ftrace: use preempt_enable_no_resched_notrace in ring_buffer_time_stamp()
x86, bts: correctly report invalid bts records
...
Fixed up trivial conflict in scripts/recordmcount.pl due to SH bits
being already partly merged by the SH merge.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 33cf4a1cbcd1..3798b954e6e8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -118,6 +118,12 @@ | |||
118 | */ | 118 | */ |
119 | #define RUNTIME_INF ((u64)~0ULL) | 119 | #define RUNTIME_INF ((u64)~0ULL) |
120 | 120 | ||
121 | DEFINE_TRACE(sched_wait_task); | ||
122 | DEFINE_TRACE(sched_wakeup); | ||
123 | DEFINE_TRACE(sched_wakeup_new); | ||
124 | DEFINE_TRACE(sched_switch); | ||
125 | DEFINE_TRACE(sched_migrate_task); | ||
126 | |||
121 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
122 | /* | 128 | /* |
123 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
@@ -1847,6 +1853,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1847 | 1853 | ||
1848 | clock_offset = old_rq->clock - new_rq->clock; | 1854 | clock_offset = old_rq->clock - new_rq->clock; |
1849 | 1855 | ||
1856 | trace_sched_migrate_task(p, task_cpu(p), new_cpu); | ||
1857 | |||
1850 | #ifdef CONFIG_SCHEDSTATS | 1858 | #ifdef CONFIG_SCHEDSTATS |
1851 | if (p->se.wait_start) | 1859 | if (p->se.wait_start) |
1852 | p->se.wait_start -= clock_offset; | 1860 | p->se.wait_start -= clock_offset; |
@@ -2318,7 +2326,7 @@ out_activate: | |||
2318 | success = 1; | 2326 | success = 1; |
2319 | 2327 | ||
2320 | out_running: | 2328 | out_running: |
2321 | trace_sched_wakeup(rq, p); | 2329 | trace_sched_wakeup(rq, p, success); |
2322 | check_preempt_curr(rq, p, sync); | 2330 | check_preempt_curr(rq, p, sync); |
2323 | 2331 | ||
2324 | p->state = TASK_RUNNING; | 2332 | p->state = TASK_RUNNING; |
@@ -2451,7 +2459,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2451 | p->sched_class->task_new(rq, p); | 2459 | p->sched_class->task_new(rq, p); |
2452 | inc_nr_running(rq); | 2460 | inc_nr_running(rq); |
2453 | } | 2461 | } |
2454 | trace_sched_wakeup_new(rq, p); | 2462 | trace_sched_wakeup_new(rq, p, 1); |
2455 | check_preempt_curr(rq, p, 0); | 2463 | check_preempt_curr(rq, p, 0); |
2456 | #ifdef CONFIG_SMP | 2464 | #ifdef CONFIG_SMP |
2457 | if (p->sched_class->task_wake_up) | 2465 | if (p->sched_class->task_wake_up) |
@@ -2864,7 +2872,6 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2864 | || unlikely(!cpu_active(dest_cpu))) | 2872 | || unlikely(!cpu_active(dest_cpu))) |
2865 | goto out; | 2873 | goto out; |
2866 | 2874 | ||
2867 | trace_sched_migrate_task(rq, p, dest_cpu); | ||
2868 | /* force the process onto the specified CPU */ | 2875 | /* force the process onto the specified CPU */ |
2869 | if (migrate_task(p, dest_cpu, &req)) { | 2876 | if (migrate_task(p, dest_cpu, &req)) { |
2870 | /* Need to wait for migration thread (might exit: take ref). */ | 2877 | /* Need to wait for migration thread (might exit: take ref). */ |
@@ -5912,6 +5919,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5912 | * The idle tasks have their own, simple scheduling class: | 5919 | * The idle tasks have their own, simple scheduling class: |
5913 | */ | 5920 | */ |
5914 | idle->sched_class = &idle_sched_class; | 5921 | idle->sched_class = &idle_sched_class; |
5922 | ftrace_graph_init_task(idle); | ||
5915 | } | 5923 | } |
5916 | 5924 | ||
5917 | /* | 5925 | /* |