aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-05-12 15:20:52 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 15:04:44 -0400
commit4e65551905fb0300ae7e667cbaa41ee2e3f29a13 (patch)
tree0e642c311e31043eecf86c218128c40e1ddac782 /kernel
parent4c1f4d4f0175129934d5dbc19a39296430937a05 (diff)
ftrace: sched tracer, trace full rbtree
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c35
-rw-r--r--kernel/trace/trace.c55
-rw-r--r--kernel/trace/trace.h14
-rw-r--r--kernel/trace/trace_sched_switch.c24
4 files changed, 85 insertions, 43 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 53ab1174664f..b9208a0e33a0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2394,6 +2394,35 @@ static int sched_balance_self(int cpu, int flag)
2394 2394
2395#endif /* CONFIG_SMP */ 2395#endif /* CONFIG_SMP */
2396 2396
2397#ifdef CONFIG_CONTEXT_SWITCH_TRACER
2398
2399void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
2400{
2401 struct sched_entity *se;
2402 struct task_struct *p;
2403 struct rb_node *curr;
2404 struct rq *rq = __rq;
2405
2406 curr = first_fair(&rq->cfs);
2407 if (!curr)
2408 return;
2409
2410 while (curr) {
2411 se = rb_entry(curr, struct sched_entity, run_node);
2412 if (!entity_is_task(se))
2413 continue;
2414
2415 p = task_of(se);
2416
2417 __trace_special(__tr, __data,
2418 p->pid, p->se.vruntime, p->se.sum_exec_runtime);
2419
2420 curr = rb_next(curr);
2421 }
2422}
2423
2424#endif
2425
2397/*** 2426/***
2398 * try_to_wake_up - wake up a thread 2427 * try_to_wake_up - wake up a thread
2399 * @p: the to-be-woken-up thread 2428 * @p: the to-be-woken-up thread
@@ -2468,7 +2497,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2468 2497
2469out_activate: 2498out_activate:
2470#endif /* CONFIG_SMP */ 2499#endif /* CONFIG_SMP */
2471 ftrace_wake_up_task(p, rq->curr); 2500 ftrace_wake_up_task(rq, p, rq->curr);
2472 schedstat_inc(p, se.nr_wakeups); 2501 schedstat_inc(p, se.nr_wakeups);
2473 if (sync) 2502 if (sync)
2474 schedstat_inc(p, se.nr_wakeups_sync); 2503 schedstat_inc(p, se.nr_wakeups_sync);
@@ -2613,7 +2642,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2613 p->sched_class->task_new(rq, p); 2642 p->sched_class->task_new(rq, p);
2614 inc_nr_running(rq); 2643 inc_nr_running(rq);
2615 } 2644 }
2616 ftrace_wake_up_task(p, rq->curr); 2645 ftrace_wake_up_task(rq, p, rq->curr);
2617 check_preempt_curr(rq, p); 2646 check_preempt_curr(rq, p);
2618#ifdef CONFIG_SMP 2647#ifdef CONFIG_SMP
2619 if (p->sched_class->task_wake_up) 2648 if (p->sched_class->task_wake_up)
@@ -2786,7 +2815,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2786 struct mm_struct *mm, *oldmm; 2815 struct mm_struct *mm, *oldmm;
2787 2816
2788 prepare_task_switch(rq, prev, next); 2817 prepare_task_switch(rq, prev, next);
2789 ftrace_ctx_switch(prev, next); 2818 ftrace_ctx_switch(rq, prev, next);
2790 mm = next->mm; 2819 mm = next->mm;
2791 oldmm = prev->active_mm; 2820 oldmm = prev->active_mm;
2792 /* 2821 /*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0e4b7119e263..65173b14b914 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -66,7 +66,18 @@ static struct tracer *current_trace __read_mostly;
66static int max_tracer_type_len; 66static int max_tracer_type_len;
67 67
68static DEFINE_MUTEX(trace_types_lock); 68static DEFINE_MUTEX(trace_types_lock);
69static DECLARE_WAIT_QUEUE_HEAD (trace_wait); 69static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
70
71unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
72
73/*
74 * FIXME: where should this be called?
75 */
76void trace_wake_up(void)
77{
78 if (!(trace_flags & TRACE_ITER_BLOCK))
79 wake_up(&trace_wait);
80}
70 81
71#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry)) 82#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
72 83
@@ -103,18 +114,6 @@ enum trace_flag_type {
103 TRACE_FLAG_SOFTIRQ = 0x08, 114 TRACE_FLAG_SOFTIRQ = 0x08,
104}; 115};
105 116
106enum trace_iterator_flags {
107 TRACE_ITER_PRINT_PARENT = 0x01,
108 TRACE_ITER_SYM_OFFSET = 0x02,
109 TRACE_ITER_SYM_ADDR = 0x04,
110 TRACE_ITER_VERBOSE = 0x08,
111 TRACE_ITER_RAW = 0x10,
112 TRACE_ITER_HEX = 0x20,
113 TRACE_ITER_BIN = 0x40,
114 TRACE_ITER_BLOCK = 0x80,
115 TRACE_ITER_STACKTRACE = 0x100,
116};
117
118#define TRACE_ITER_SYM_MASK \ 117#define TRACE_ITER_SYM_MASK \
119 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 118 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
120 119
@@ -132,8 +131,6 @@ static const char *trace_options[] = {
132 NULL 131 NULL
133}; 132};
134 133
135static unsigned trace_flags = TRACE_ITER_PRINT_PARENT;
136
137static DEFINE_SPINLOCK(ftrace_max_lock); 134static DEFINE_SPINLOCK(ftrace_max_lock);
138 135
139/* 136/*
@@ -660,9 +657,6 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
660 entry->fn.ip = ip; 657 entry->fn.ip = ip;
661 entry->fn.parent_ip = parent_ip; 658 entry->fn.parent_ip = parent_ip;
662 spin_unlock_irqrestore(&data->lock, irq_flags); 659 spin_unlock_irqrestore(&data->lock, irq_flags);
663
664 if (!(trace_flags & TRACE_ITER_BLOCK))
665 wake_up(&trace_wait);
666} 660}
667 661
668void 662void
@@ -673,10 +667,14 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
673 trace_function(tr, data, ip, parent_ip, flags); 667 trace_function(tr, data, ip, parent_ip, flags);
674} 668}
675 669
670#ifdef CONFIG_CONTEXT_SWITCH_TRACER
671
676void 672void
677trace_special(struct trace_array *tr, struct trace_array_cpu *data, 673__trace_special(void *__tr, void *__data,
678 unsigned long arg1, unsigned long arg2, unsigned long arg3) 674 unsigned long arg1, unsigned long arg2, unsigned long arg3)
679{ 675{
676 struct trace_array_cpu *data = __data;
677 struct trace_array *tr = __tr;
680 struct trace_entry *entry; 678 struct trace_entry *entry;
681 unsigned long irq_flags; 679 unsigned long irq_flags;
682 680
@@ -688,11 +686,10 @@ trace_special(struct trace_array *tr, struct trace_array_cpu *data,
688 entry->special.arg2 = arg2; 686 entry->special.arg2 = arg2;
689 entry->special.arg3 = arg3; 687 entry->special.arg3 = arg3;
690 spin_unlock_irqrestore(&data->lock, irq_flags); 688 spin_unlock_irqrestore(&data->lock, irq_flags);
691
692 if (!(trace_flags & TRACE_ITER_BLOCK))
693 wake_up(&trace_wait);
694} 689}
695 690
691#endif
692
696void __trace_stack(struct trace_array *tr, 693void __trace_stack(struct trace_array *tr,
697 struct trace_array_cpu *data, 694 struct trace_array_cpu *data,
698 unsigned long flags, 695 unsigned long flags,
@@ -739,9 +736,6 @@ tracing_sched_switch_trace(struct trace_array *tr,
739 entry->ctx.next_prio = next->prio; 736 entry->ctx.next_prio = next->prio;
740 __trace_stack(tr, data, flags, 4); 737 __trace_stack(tr, data, flags, 4);
741 spin_unlock_irqrestore(&data->lock, irq_flags); 738 spin_unlock_irqrestore(&data->lock, irq_flags);
742
743 if (!(trace_flags & TRACE_ITER_BLOCK))
744 wake_up(&trace_wait);
745} 739}
746 740
747void 741void
@@ -765,9 +759,6 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
765 entry->ctx.next_prio = wakee->prio; 759 entry->ctx.next_prio = wakee->prio;
766 __trace_stack(tr, data, flags, 5); 760 __trace_stack(tr, data, flags, 5);
767 spin_unlock_irqrestore(&data->lock, irq_flags); 761 spin_unlock_irqrestore(&data->lock, irq_flags);
768
769 if (!(trace_flags & TRACE_ITER_BLOCK))
770 wake_up(&trace_wait);
771} 762}
772 763
773#ifdef CONFIG_FTRACE 764#ifdef CONFIG_FTRACE
@@ -1258,7 +1249,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1258 comm); 1249 comm);
1259 break; 1250 break;
1260 case TRACE_SPECIAL: 1251 case TRACE_SPECIAL:
1261 trace_seq_printf(s, " %lx %lx %lx\n", 1252 trace_seq_printf(s, " %ld %ld %ld\n",
1262 entry->special.arg1, 1253 entry->special.arg1,
1263 entry->special.arg2, 1254 entry->special.arg2,
1264 entry->special.arg3); 1255 entry->special.arg3);
@@ -1344,7 +1335,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
1344 return 0; 1335 return 0;
1345 break; 1336 break;
1346 case TRACE_SPECIAL: 1337 case TRACE_SPECIAL:
1347 ret = trace_seq_printf(s, " %lx %lx %lx\n", 1338 ret = trace_seq_printf(s, " %ld %ld %ld\n",
1348 entry->special.arg1, 1339 entry->special.arg1,
1349 entry->special.arg2, 1340 entry->special.arg2,
1350 entry->special.arg3); 1341 entry->special.arg3);
@@ -1409,7 +1400,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
1409 break; 1400 break;
1410 case TRACE_SPECIAL: 1401 case TRACE_SPECIAL:
1411 case TRACE_STACK: 1402 case TRACE_STACK:
1412 ret = trace_seq_printf(s, " %lx %lx %lx\n", 1403 ret = trace_seq_printf(s, " %ld %ld %ld\n",
1413 entry->special.arg1, 1404 entry->special.arg1,
1414 entry->special.arg2, 1405 entry->special.arg2,
1415 entry->special.arg3); 1406 entry->special.arg3);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 387bdcf45e28..75e237475674 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -274,4 +274,18 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
274 274
275extern void *head_page(struct trace_array_cpu *data); 275extern void *head_page(struct trace_array_cpu *data);
276 276
277extern unsigned long trace_flags;
278
279enum trace_iterator_flags {
280 TRACE_ITER_PRINT_PARENT = 0x01,
281 TRACE_ITER_SYM_OFFSET = 0x02,
282 TRACE_ITER_SYM_ADDR = 0x04,
283 TRACE_ITER_VERBOSE = 0x08,
284 TRACE_ITER_RAW = 0x10,
285 TRACE_ITER_HEX = 0x20,
286 TRACE_ITER_BIN = 0x40,
287 TRACE_ITER_BLOCK = 0x80,
288 TRACE_ITER_STACKTRACE = 0x100,
289};
290
277#endif /* _LINUX_KERNEL_TRACE_H */ 291#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 8b1cf1a3aee0..12658b3f2b28 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -18,7 +18,7 @@ static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled; 18static int __read_mostly tracer_enabled;
19 19
20static void 20static void
21ctx_switch_func(struct task_struct *prev, struct task_struct *next) 21ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
22{ 22{
23 struct trace_array *tr = ctx_trace; 23 struct trace_array *tr = ctx_trace;
24 struct trace_array_cpu *data; 24 struct trace_array_cpu *data;
@@ -34,14 +34,17 @@ ctx_switch_func(struct task_struct *prev, struct task_struct *next)
34 data = tr->data[cpu]; 34 data = tr->data[cpu];
35 disabled = atomic_inc_return(&data->disabled); 35 disabled = atomic_inc_return(&data->disabled);
36 36
37 if (likely(disabled == 1)) 37 if (likely(disabled == 1)) {
38 tracing_sched_switch_trace(tr, data, prev, next, flags); 38 tracing_sched_switch_trace(tr, data, prev, next, flags);
39 ftrace_all_fair_tasks(__rq, tr, data);
40 }
39 41
40 atomic_dec(&data->disabled); 42 atomic_dec(&data->disabled);
41 local_irq_restore(flags); 43 local_irq_restore(flags);
42} 44}
43 45
44static void wakeup_func(struct task_struct *wakee, struct task_struct *curr) 46static void
47wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
45{ 48{
46 struct trace_array *tr = ctx_trace; 49 struct trace_array *tr = ctx_trace;
47 struct trace_array_cpu *data; 50 struct trace_array_cpu *data;
@@ -57,14 +60,18 @@ static void wakeup_func(struct task_struct *wakee, struct task_struct *curr)
57 data = tr->data[cpu]; 60 data = tr->data[cpu];
58 disabled = atomic_inc_return(&data->disabled); 61 disabled = atomic_inc_return(&data->disabled);
59 62
60 if (likely(disabled == 1)) 63 if (likely(disabled == 1)) {
61 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); 64 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
65 ftrace_all_fair_tasks(__rq, tr, data);
66 }
62 67
63 atomic_dec(&data->disabled); 68 atomic_dec(&data->disabled);
64 local_irq_restore(flags); 69 local_irq_restore(flags);
65} 70}
66 71
67void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next) 72void
73ftrace_ctx_switch(void *__rq, struct task_struct *prev,
74 struct task_struct *next)
68{ 75{
69 tracing_record_cmdline(prev); 76 tracing_record_cmdline(prev);
70 77
@@ -72,7 +79,7 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
72 * If tracer_switch_func only points to the local 79 * If tracer_switch_func only points to the local
73 * switch func, it still needs the ptr passed to it. 80 * switch func, it still needs the ptr passed to it.
74 */ 81 */
75 ctx_switch_func(prev, next); 82 ctx_switch_func(__rq, prev, next);
76 83
77 /* 84 /*
78 * Chain to the wakeup tracer (this is a NOP if disabled): 85 * Chain to the wakeup tracer (this is a NOP if disabled):
@@ -81,11 +88,12 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
81} 88}
82 89
83void 90void
84ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr) 91ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
92 struct task_struct *curr)
85{ 93{
86 tracing_record_cmdline(curr); 94 tracing_record_cmdline(curr);
87 95
88 wakeup_func(wakee, curr); 96 wakeup_func(__rq, wakee, curr);
89 97
90 /* 98 /*
91 * Chain to the wakeup tracer (this is a NOP if disabled): 99 * Chain to the wakeup tracer (this is a NOP if disabled):