diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-05-12 15:21:00 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-23 15:52:01 -0400 |
commit | 4fcdae83cebda24b519a89d3dd976081fff1ca80 (patch) | |
tree | 6ad0c14f709af1fe2352ce591d3c84d76c7646a0 | |
parent | ab46428c6969d50ecf6f6e97b7a84abba6274368 (diff) |
ftrace: comment code
This is first installment of adding documentation to the ftrace.
Expect many more patches of this kind in the near future.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/trace/trace.c | 135 | ||||
-rw-r--r-- | kernel/trace/trace.h | 7 |
2 files changed, 141 insertions, 1 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5da391c5fb0d..a102b11eacf2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -64,26 +64,79 @@ cycle_t ftrace_now(int cpu) | |||
64 | return cpu_clock(cpu); | 64 | return cpu_clock(cpu); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * The global_trace is the descriptor that holds the tracing | ||
69 | * buffers for the live tracing. For each CPU, it contains | ||
70 | * a link list of pages that will store trace entries. The | ||
71 | * page descriptor of the pages in the memory is used to hold | ||
72 | * the link list by linking the lru item in the page descriptor | ||
73 | * to each of the pages in the buffer per CPU. | ||
74 | * | ||
75 | * For each active CPU there is a data field that holds the | ||
76 | * pages for the buffer for that CPU. Each CPU has the same number | ||
77 | * of pages allocated for its buffer. | ||
78 | */ | ||
67 | static struct trace_array global_trace; | 79 | static struct trace_array global_trace; |
68 | 80 | ||
69 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 81 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
70 | 82 | ||
83 | /* | ||
84 | * The max_tr is used to snapshot the global_trace when a maximum | ||
85 | * latency is reached. Some tracers will use this to store a maximum | ||
86 | * trace while it continues examining live traces. | ||
87 | * | ||
88 | * The buffers for the max_tr are set up the same as the global_trace. | ||
89 | * When a snapshot is taken, the link list of the max_tr is swapped | ||
90 | * with the link list of the global_trace and the buffers are reset for | ||
91 | * the global_trace so the tracing can continue. | ||
92 | */ | ||
71 | static struct trace_array max_tr; | 93 | static struct trace_array max_tr; |
72 | 94 | ||
73 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 95 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); |
74 | 96 | ||
97 | /* tracer_enabled is used to toggle activation of a tracer */ | ||
75 | static int tracer_enabled = 1; | 98 | static int tracer_enabled = 1; |
99 | |||
100 | /* | ||
101 | * trace_nr_entries is the number of entries that is allocated | ||
102 | * for a buffer. Note, the number of entries is always rounded | ||
103 | * to ENTRIES_PER_PAGE. | ||
104 | */ | ||
76 | static unsigned long trace_nr_entries = 65536UL; | 105 | static unsigned long trace_nr_entries = 65536UL; |
77 | 106 | ||
107 | /* trace_types holds a link list of available tracers. */ | ||
78 | static struct tracer *trace_types __read_mostly; | 108 | static struct tracer *trace_types __read_mostly; |
109 | |||
110 | /* current_trace points to the tracer that is currently active */ | ||
79 | static struct tracer *current_trace __read_mostly; | 111 | static struct tracer *current_trace __read_mostly; |
112 | |||
113 | /* | ||
114 | * max_tracer_type_len is used to simplify the allocating of | ||
115 | * buffers to read userspace tracer names. We keep track of | ||
116 | * the longest tracer name registered. | ||
117 | */ | ||
80 | static int max_tracer_type_len; | 118 | static int max_tracer_type_len; |
81 | 119 | ||
120 | /* | ||
121 | * trace_types_lock is used to protect the trace_types list. | ||
122 | * This lock is also used to keep user access serialized. | ||
123 | * Accesses from userspace will grab this lock while userspace | ||
124 | * activities happen inside the kernel. | ||
125 | */ | ||
82 | static DEFINE_MUTEX(trace_types_lock); | 126 | static DEFINE_MUTEX(trace_types_lock); |
127 | |||
128 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | ||
83 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 129 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
84 | 130 | ||
131 | /* trace_flags holds iter_ctrl options */ | ||
85 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; | 132 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; |
86 | 133 | ||
134 | /** | ||
135 | * trace_wake_up - wake up tasks waiting for trace input | ||
136 | * | ||
137 | * Simply wakes up any task that is blocked on the trace_wait | ||
138 | * queue. These is used with trace_poll for tasks polling the trace. | ||
139 | */ | ||
87 | void trace_wake_up(void) | 140 | void trace_wake_up(void) |
88 | { | 141 | { |
89 | /* | 142 | /* |
@@ -117,6 +170,14 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) | |||
117 | return nsecs / 1000; | 170 | return nsecs / 1000; |
118 | } | 171 | } |
119 | 172 | ||
173 | /* | ||
174 | * trace_flag_type is an enumeration that holds different | ||
175 | * states when a trace occurs. These are: | ||
176 | * IRQS_OFF - interrupts were disabled | ||
177 | * NEED_RESCED - reschedule is requested | ||
178 | * HARDIRQ - inside an interrupt handler | ||
179 | * SOFTIRQ - inside a softirq handler | ||
180 | */ | ||
120 | enum trace_flag_type { | 181 | enum trace_flag_type { |
121 | TRACE_FLAG_IRQS_OFF = 0x01, | 182 | TRACE_FLAG_IRQS_OFF = 0x01, |
122 | TRACE_FLAG_NEED_RESCHED = 0x02, | 183 | TRACE_FLAG_NEED_RESCHED = 0x02, |
@@ -124,10 +185,14 @@ enum trace_flag_type { | |||
124 | TRACE_FLAG_SOFTIRQ = 0x08, | 185 | TRACE_FLAG_SOFTIRQ = 0x08, |
125 | }; | 186 | }; |
126 | 187 | ||
188 | /* | ||
189 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
190 | * control the output of kernel symbols. | ||
191 | */ | ||
127 | #define TRACE_ITER_SYM_MASK \ | 192 | #define TRACE_ITER_SYM_MASK \ |
128 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | 193 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) |
129 | 194 | ||
130 | /* These must match the bit postions above */ | 195 | /* These must match the bit postions in trace_iterator_flags */ |
131 | static const char *trace_options[] = { | 196 | static const char *trace_options[] = { |
132 | "print-parent", | 197 | "print-parent", |
133 | "sym-offset", | 198 | "sym-offset", |
@@ -142,6 +207,15 @@ static const char *trace_options[] = { | |||
142 | NULL | 207 | NULL |
143 | }; | 208 | }; |
144 | 209 | ||
210 | /* | ||
211 | * ftrace_max_lock is used to protect the swapping of buffers | ||
212 | * when taking a max snapshot. The buffers themselves are | ||
213 | * protected by per_cpu spinlocks. But the action of the swap | ||
214 | * needs its own lock. | ||
215 | * | ||
216 | * This is defined as a raw_spinlock_t in order to help | ||
217 | * with performance when lockdep debugging is enabled. | ||
218 | */ | ||
145 | static raw_spinlock_t ftrace_max_lock = | 219 | static raw_spinlock_t ftrace_max_lock = |
146 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 220 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
147 | 221 | ||
@@ -172,6 +246,13 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
172 | tracing_record_cmdline(current); | 246 | tracing_record_cmdline(current); |
173 | } | 247 | } |
174 | 248 | ||
249 | /** | ||
250 | * check_pages - integrity check of trace buffers | ||
251 | * | ||
252 | * As a safty measure we check to make sure the data pages have not | ||
253 | * been corrupted. TODO: configure to disable this because it adds | ||
254 | * a bit of overhead. | ||
255 | */ | ||
175 | void check_pages(struct trace_array_cpu *data) | 256 | void check_pages(struct trace_array_cpu *data) |
176 | { | 257 | { |
177 | struct page *page, *tmp; | 258 | struct page *page, *tmp; |
@@ -185,6 +266,13 @@ void check_pages(struct trace_array_cpu *data) | |||
185 | } | 266 | } |
186 | } | 267 | } |
187 | 268 | ||
269 | /** | ||
270 | * head_page - page address of the first page in per_cpu buffer. | ||
271 | * | ||
272 | * head_page returns the page address of the first page in | ||
273 | * a per_cpu buffer. This also preforms various consistency | ||
274 | * checks to make sure the buffer has not been corrupted. | ||
275 | */ | ||
188 | void *head_page(struct trace_array_cpu *data) | 276 | void *head_page(struct trace_array_cpu *data) |
189 | { | 277 | { |
190 | struct page *page; | 278 | struct page *page; |
@@ -199,6 +287,17 @@ void *head_page(struct trace_array_cpu *data) | |||
199 | return page_address(page); | 287 | return page_address(page); |
200 | } | 288 | } |
201 | 289 | ||
290 | /** | ||
291 | * trace_seq_printf - sequence printing of trace information | ||
292 | * @s: trace sequence descriptor | ||
293 | * @fmt: printf format string | ||
294 | * | ||
295 | * The tracer may use either sequence operations or its own | ||
296 | * copy to user routines. To simplify formating of a trace | ||
297 | * trace_seq_printf is used to store strings into a special | ||
298 | * buffer (@s). Then the output may be either used by | ||
299 | * the sequencer or pulled into another buffer. | ||
300 | */ | ||
202 | int | 301 | int |
203 | trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 302 | trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
204 | { | 303 | { |
@@ -222,6 +321,16 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
222 | return len; | 321 | return len; |
223 | } | 322 | } |
224 | 323 | ||
324 | /** | ||
325 | * trace_seq_puts - trace sequence printing of simple string | ||
326 | * @s: trace sequence descriptor | ||
327 | * @str: simple string to record | ||
328 | * | ||
329 | * The tracer may use either the sequence operations or its own | ||
330 | * copy to user routines. This function records a simple string | ||
331 | * into a special buffer (@s) for later retrieval by a sequencer | ||
332 | * or other mechanism. | ||
333 | */ | ||
225 | static int | 334 | static int |
226 | trace_seq_puts(struct trace_seq *s, const char *str) | 335 | trace_seq_puts(struct trace_seq *s, const char *str) |
227 | { | 336 | { |
@@ -304,6 +413,13 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
304 | trace_seq_reset(s); | 413 | trace_seq_reset(s); |
305 | } | 414 | } |
306 | 415 | ||
416 | /* | ||
417 | * flip the trace buffers between two trace descriptors. | ||
418 | * This usually is the buffers between the global_trace and | ||
419 | * the max_tr to record a snapshot of a current trace. | ||
420 | * | ||
421 | * The ftrace_max_lock must be held. | ||
422 | */ | ||
307 | static void | 423 | static void |
308 | flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) | 424 | flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) |
309 | { | 425 | { |
@@ -325,6 +441,15 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) | |||
325 | check_pages(tr2); | 441 | check_pages(tr2); |
326 | } | 442 | } |
327 | 443 | ||
444 | /** | ||
445 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | ||
446 | * @tr: tracer | ||
447 | * @tsk: the task with the latency | ||
448 | * @cpu: The cpu that initiated the trace. | ||
449 | * | ||
450 | * Flip the buffers between the @tr and the max_tr and record information | ||
451 | * about which task was the cause of this latency. | ||
452 | */ | ||
328 | void | 453 | void |
329 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 454 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
330 | { | 455 | { |
@@ -349,6 +474,8 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
349 | * @tr - tracer | 474 | * @tr - tracer |
350 | * @tsk - task with the latency | 475 | * @tsk - task with the latency |
351 | * @cpu - the cpu of the buffer to copy. | 476 | * @cpu - the cpu of the buffer to copy. |
477 | * | ||
478 | * Flip the trace of a single CPU buffer between the @tr and the max_tr. | ||
352 | */ | 479 | */ |
353 | void | 480 | void |
354 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | 481 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
@@ -368,6 +495,12 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
368 | __raw_spin_unlock(&ftrace_max_lock); | 495 | __raw_spin_unlock(&ftrace_max_lock); |
369 | } | 496 | } |
370 | 497 | ||
498 | /** | ||
499 | * register_tracer - register a tracer with the ftrace system. | ||
500 | * @type - the plugin for the tracer | ||
501 | * | ||
502 | * Register a new plugin tracer. | ||
503 | */ | ||
371 | int register_tracer(struct tracer *type) | 504 | int register_tracer(struct tracer *type) |
372 | { | 505 | { |
373 | struct tracer *t; | 506 | struct tracer *t; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b0ca7473671b..21c29ee13e53 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -294,6 +294,13 @@ extern long ns2usecs(cycle_t nsec); | |||
294 | 294 | ||
295 | extern unsigned long trace_flags; | 295 | extern unsigned long trace_flags; |
296 | 296 | ||
297 | /* | ||
298 | * trace_iterator_flags is an enumeration that defines bit | ||
299 | * positions into trace_flags that controls the output. | ||
300 | * | ||
301 | * NOTE: These bits must match the trace_options array in | ||
302 | * trace.c. | ||
303 | */ | ||
297 | enum trace_iterator_flags { | 304 | enum trace_iterator_flags { |
298 | TRACE_ITER_PRINT_PARENT = 0x01, | 305 | TRACE_ITER_PRINT_PARENT = 0x01, |
299 | TRACE_ITER_SYM_OFFSET = 0x02, | 306 | TRACE_ITER_SYM_OFFSET = 0x02, |