diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-05-12 15:20:54 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 15:13:32 -0400 |
commit | 4d9493c90f8e6e1b164aede3814010a290161abb (patch) | |
tree | ff92e089d2da9fb5a40511d81f8e57a7e06fdf80 | |
parent | d05cdb25d80f06f77aa6bddb53cd1390d4d91a0b (diff) |
ftrace: remove add-hoc code
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/sched.c | 47 | ||||
-rw-r--r-- | kernel/sched_fair.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 10 |
3 files changed, 2 insertions, 58 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1ec3fb2efee6..ad95cca4e42e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2412,53 +2412,6 @@ static int sched_balance_self(int cpu, int flag) | |||
2412 | 2412 | ||
2413 | #endif /* CONFIG_SMP */ | 2413 | #endif /* CONFIG_SMP */ |
2414 | 2414 | ||
2415 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | ||
2416 | |||
2417 | void ftrace_task(struct task_struct *p, void *__tr, void *__data) | ||
2418 | { | ||
2419 | #if 0 | ||
2420 | /* | ||
2421 | * trace timeline tree | ||
2422 | */ | ||
2423 | __trace_special(__tr, __data, | ||
2424 | p->pid, p->se.vruntime, p->se.sum_exec_runtime); | ||
2425 | #else | ||
2426 | /* | ||
2427 | * trace balance metrics | ||
2428 | */ | ||
2429 | __trace_special(__tr, __data, | ||
2430 | p->pid, p->se.avg_overlap, 0); | ||
2431 | #endif | ||
2432 | } | ||
2433 | |||
2434 | void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data) | ||
2435 | { | ||
2436 | struct task_struct *p; | ||
2437 | struct sched_entity *se; | ||
2438 | struct rb_node *curr; | ||
2439 | struct rq *rq = __rq; | ||
2440 | |||
2441 | if (rq->cfs.curr) { | ||
2442 | p = task_of(rq->cfs.curr); | ||
2443 | ftrace_task(p, __tr, __data); | ||
2444 | } | ||
2445 | if (rq->cfs.next) { | ||
2446 | p = task_of(rq->cfs.next); | ||
2447 | ftrace_task(p, __tr, __data); | ||
2448 | } | ||
2449 | |||
2450 | for (curr = first_fair(&rq->cfs); curr; curr = rb_next(curr)) { | ||
2451 | se = rb_entry(curr, struct sched_entity, run_node); | ||
2452 | if (!entity_is_task(se)) | ||
2453 | continue; | ||
2454 | |||
2455 | p = task_of(se); | ||
2456 | ftrace_task(p, __tr, __data); | ||
2457 | } | ||
2458 | } | ||
2459 | |||
2460 | #endif | ||
2461 | |||
2462 | /*** | 2415 | /*** |
2463 | * try_to_wake_up - wake up a thread | 2416 | * try_to_wake_up - wake up a thread |
2464 | * @p: the to-be-woken-up thread | 2417 | * @p: the to-be-woken-up thread |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index dc1856f10795..e24ecd39c4b8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1061,8 +1061,6 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1061 | if (!(this_sd->flags & SD_WAKE_AFFINE)) | 1061 | if (!(this_sd->flags & SD_WAKE_AFFINE)) |
1062 | return 0; | 1062 | return 0; |
1063 | 1063 | ||
1064 | ftrace_special(__LINE__, curr->se.avg_overlap, sync); | ||
1065 | ftrace_special(__LINE__, p->se.avg_overlap, -1); | ||
1066 | /* | 1064 | /* |
1067 | * If the currently running task will sleep within | 1065 | * If the currently running task will sleep within |
1068 | * a reasonable amount of time then attract this newly | 1066 | * a reasonable amount of time then attract this newly |
@@ -1240,7 +1238,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1240 | if (unlikely(se == pse)) | 1238 | if (unlikely(se == pse)) |
1241 | return; | 1239 | return; |
1242 | 1240 | ||
1243 | ftrace_special(__LINE__, p->pid, se->last_wakeup); | ||
1244 | cfs_rq_of(pse)->next = pse; | 1241 | cfs_rq_of(pse)->next = pse; |
1245 | 1242 | ||
1246 | /* | 1243 | /* |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index bddf676914ed..5671db0e1827 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -36,11 +36,8 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) | |||
36 | data = tr->data[cpu]; | 36 | data = tr->data[cpu]; |
37 | disabled = atomic_inc_return(&data->disabled); | 37 | disabled = atomic_inc_return(&data->disabled); |
38 | 38 | ||
39 | if (likely(disabled == 1)) { | 39 | if (likely(disabled == 1)) |
40 | tracing_sched_switch_trace(tr, data, prev, next, flags); | 40 | tracing_sched_switch_trace(tr, data, prev, next, flags); |
41 | if (trace_flags & TRACE_ITER_SCHED_TREE) | ||
42 | ftrace_all_fair_tasks(__rq, tr, data); | ||
43 | } | ||
44 | 41 | ||
45 | atomic_dec(&data->disabled); | 42 | atomic_dec(&data->disabled); |
46 | local_irq_restore(flags); | 43 | local_irq_restore(flags); |
@@ -65,11 +62,8 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) | |||
65 | data = tr->data[cpu]; | 62 | data = tr->data[cpu]; |
66 | disabled = atomic_inc_return(&data->disabled); | 63 | disabled = atomic_inc_return(&data->disabled); |
67 | 64 | ||
68 | if (likely(disabled == 1)) { | 65 | if (likely(disabled == 1)) |
69 | tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); | 66 | tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); |
70 | if (trace_flags & TRACE_ITER_SCHED_TREE) | ||
71 | ftrace_all_fair_tasks(__rq, tr, data); | ||
72 | } | ||
73 | 67 | ||
74 | atomic_dec(&data->disabled); | 68 | atomic_dec(&data->disabled); |
75 | local_irq_restore(flags); | 69 | local_irq_restore(flags); |