diff options
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r-- | kernel/trace/trace.h | 208 |
1 files changed, 161 insertions, 47 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4d3d381bfd95..d80ca0d464d9 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <trace/boot.h> | 11 | #include <trace/boot.h> |
12 | #include <trace/kmemtrace.h> | ||
13 | #include <trace/power.h> | ||
12 | 14 | ||
13 | enum trace_type { | 15 | enum trace_type { |
14 | __TRACE_FIRST_TYPE = 0, | 16 | __TRACE_FIRST_TYPE = 0, |
@@ -16,7 +18,6 @@ enum trace_type { | |||
16 | TRACE_FN, | 18 | TRACE_FN, |
17 | TRACE_CTX, | 19 | TRACE_CTX, |
18 | TRACE_WAKE, | 20 | TRACE_WAKE, |
19 | TRACE_CONT, | ||
20 | TRACE_STACK, | 21 | TRACE_STACK, |
21 | TRACE_PRINT, | 22 | TRACE_PRINT, |
22 | TRACE_SPECIAL, | 23 | TRACE_SPECIAL, |
@@ -29,9 +30,14 @@ enum trace_type { | |||
29 | TRACE_GRAPH_ENT, | 30 | TRACE_GRAPH_ENT, |
30 | TRACE_USER_STACK, | 31 | TRACE_USER_STACK, |
31 | TRACE_HW_BRANCHES, | 32 | TRACE_HW_BRANCHES, |
33 | TRACE_SYSCALL_ENTER, | ||
34 | TRACE_SYSCALL_EXIT, | ||
35 | TRACE_KMEM_ALLOC, | ||
36 | TRACE_KMEM_FREE, | ||
32 | TRACE_POWER, | 37 | TRACE_POWER, |
38 | TRACE_BLK, | ||
33 | 39 | ||
34 | __TRACE_LAST_TYPE | 40 | __TRACE_LAST_TYPE, |
35 | }; | 41 | }; |
36 | 42 | ||
37 | /* | 43 | /* |
@@ -42,7 +48,6 @@ enum trace_type { | |||
42 | */ | 48 | */ |
43 | struct trace_entry { | 49 | struct trace_entry { |
44 | unsigned char type; | 50 | unsigned char type; |
45 | unsigned char cpu; | ||
46 | unsigned char flags; | 51 | unsigned char flags; |
47 | unsigned char preempt_count; | 52 | unsigned char preempt_count; |
48 | int pid; | 53 | int pid; |
@@ -60,13 +65,13 @@ struct ftrace_entry { | |||
60 | 65 | ||
61 | /* Function call entry */ | 66 | /* Function call entry */ |
62 | struct ftrace_graph_ent_entry { | 67 | struct ftrace_graph_ent_entry { |
63 | struct trace_entry ent; | 68 | struct trace_entry ent; |
64 | struct ftrace_graph_ent graph_ent; | 69 | struct ftrace_graph_ent graph_ent; |
65 | }; | 70 | }; |
66 | 71 | ||
67 | /* Function return entry */ | 72 | /* Function return entry */ |
68 | struct ftrace_graph_ret_entry { | 73 | struct ftrace_graph_ret_entry { |
69 | struct trace_entry ent; | 74 | struct trace_entry ent; |
70 | struct ftrace_graph_ret ret; | 75 | struct ftrace_graph_ret ret; |
71 | }; | 76 | }; |
72 | extern struct tracer boot_tracer; | 77 | extern struct tracer boot_tracer; |
@@ -112,13 +117,14 @@ struct userstack_entry { | |||
112 | }; | 117 | }; |
113 | 118 | ||
114 | /* | 119 | /* |
115 | * ftrace_printk entry: | 120 | * trace_printk entry: |
116 | */ | 121 | */ |
117 | struct print_entry { | 122 | struct print_entry { |
118 | struct trace_entry ent; | 123 | struct trace_entry ent; |
119 | unsigned long ip; | 124 | unsigned long ip; |
120 | int depth; | 125 | int depth; |
121 | char buf[]; | 126 | const char *fmt; |
127 | u32 buf[]; | ||
122 | }; | 128 | }; |
123 | 129 | ||
124 | #define TRACE_OLD_SIZE 88 | 130 | #define TRACE_OLD_SIZE 88 |
@@ -170,15 +176,45 @@ struct trace_power { | |||
170 | struct power_trace state_data; | 176 | struct power_trace state_data; |
171 | }; | 177 | }; |
172 | 178 | ||
179 | struct kmemtrace_alloc_entry { | ||
180 | struct trace_entry ent; | ||
181 | enum kmemtrace_type_id type_id; | ||
182 | unsigned long call_site; | ||
183 | const void *ptr; | ||
184 | size_t bytes_req; | ||
185 | size_t bytes_alloc; | ||
186 | gfp_t gfp_flags; | ||
187 | int node; | ||
188 | }; | ||
189 | |||
190 | struct kmemtrace_free_entry { | ||
191 | struct trace_entry ent; | ||
192 | enum kmemtrace_type_id type_id; | ||
193 | unsigned long call_site; | ||
194 | const void *ptr; | ||
195 | }; | ||
196 | |||
197 | struct syscall_trace_enter { | ||
198 | struct trace_entry ent; | ||
199 | int nr; | ||
200 | unsigned long args[]; | ||
201 | }; | ||
202 | |||
203 | struct syscall_trace_exit { | ||
204 | struct trace_entry ent; | ||
205 | int nr; | ||
206 | unsigned long ret; | ||
207 | }; | ||
208 | |||
209 | |||
173 | /* | 210 | /* |
174 | * trace_flag_type is an enumeration that holds different | 211 | * trace_flag_type is an enumeration that holds different |
175 | * states when a trace occurs. These are: | 212 | * states when a trace occurs. These are: |
176 | * IRQS_OFF - interrupts were disabled | 213 | * IRQS_OFF - interrupts were disabled |
177 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags | 214 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
178 | * NEED_RESCED - reschedule is requested | 215 | * NEED_RESCED - reschedule is requested |
179 | * HARDIRQ - inside an interrupt handler | 216 | * HARDIRQ - inside an interrupt handler |
180 | * SOFTIRQ - inside a softirq handler | 217 | * SOFTIRQ - inside a softirq handler |
181 | * CONT - multiple entries hold the trace item | ||
182 | */ | 218 | */ |
183 | enum trace_flag_type { | 219 | enum trace_flag_type { |
184 | TRACE_FLAG_IRQS_OFF = 0x01, | 220 | TRACE_FLAG_IRQS_OFF = 0x01, |
@@ -186,7 +222,6 @@ enum trace_flag_type { | |||
186 | TRACE_FLAG_NEED_RESCHED = 0x04, | 222 | TRACE_FLAG_NEED_RESCHED = 0x04, |
187 | TRACE_FLAG_HARDIRQ = 0x08, | 223 | TRACE_FLAG_HARDIRQ = 0x08, |
188 | TRACE_FLAG_SOFTIRQ = 0x10, | 224 | TRACE_FLAG_SOFTIRQ = 0x10, |
189 | TRACE_FLAG_CONT = 0x20, | ||
190 | }; | 225 | }; |
191 | 226 | ||
192 | #define TRACE_BUF_SIZE 1024 | 227 | #define TRACE_BUF_SIZE 1024 |
@@ -198,6 +233,7 @@ enum trace_flag_type { | |||
198 | */ | 233 | */ |
199 | struct trace_array_cpu { | 234 | struct trace_array_cpu { |
200 | atomic_t disabled; | 235 | atomic_t disabled; |
236 | void *buffer_page; /* ring buffer spare */ | ||
201 | 237 | ||
202 | /* these fields get copied into max-trace: */ | 238 | /* these fields get copied into max-trace: */ |
203 | unsigned long trace_idx; | 239 | unsigned long trace_idx; |
@@ -262,7 +298,6 @@ extern void __ftrace_bad_type(void); | |||
262 | do { \ | 298 | do { \ |
263 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | 299 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ |
264 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | 300 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
265 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ | ||
266 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | 301 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
267 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 302 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
268 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 303 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
@@ -279,7 +314,15 @@ extern void __ftrace_bad_type(void); | |||
279 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | 314 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
280 | TRACE_GRAPH_RET); \ | 315 | TRACE_GRAPH_RET); \ |
281 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | 316 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
282 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | 317 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ |
318 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | ||
319 | TRACE_KMEM_ALLOC); \ | ||
320 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | ||
321 | TRACE_KMEM_FREE); \ | ||
322 | IF_ASSIGN(var, ent, struct syscall_trace_enter, \ | ||
323 | TRACE_SYSCALL_ENTER); \ | ||
324 | IF_ASSIGN(var, ent, struct syscall_trace_exit, \ | ||
325 | TRACE_SYSCALL_EXIT); \ | ||
283 | __ftrace_bad_type(); \ | 326 | __ftrace_bad_type(); \ |
284 | } while (0) | 327 | } while (0) |
285 | 328 | ||
@@ -287,7 +330,8 @@ extern void __ftrace_bad_type(void); | |||
287 | enum print_line_t { | 330 | enum print_line_t { |
288 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | 331 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ |
289 | TRACE_TYPE_HANDLED = 1, | 332 | TRACE_TYPE_HANDLED = 1, |
290 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ | 333 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ |
334 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | ||
291 | }; | 335 | }; |
292 | 336 | ||
293 | 337 | ||
@@ -297,8 +341,8 @@ enum print_line_t { | |||
297 | * flags value in struct tracer_flags. | 341 | * flags value in struct tracer_flags. |
298 | */ | 342 | */ |
299 | struct tracer_opt { | 343 | struct tracer_opt { |
300 | const char *name; /* Will appear on the trace_options file */ | 344 | const char *name; /* Will appear on the trace_options file */ |
301 | u32 bit; /* Mask assigned in val field in tracer_flags */ | 345 | u32 bit; /* Mask assigned in val field in tracer_flags */ |
302 | }; | 346 | }; |
303 | 347 | ||
304 | /* | 348 | /* |
@@ -307,28 +351,51 @@ struct tracer_opt { | |||
307 | */ | 351 | */ |
308 | struct tracer_flags { | 352 | struct tracer_flags { |
309 | u32 val; | 353 | u32 val; |
310 | struct tracer_opt *opts; | 354 | struct tracer_opt *opts; |
311 | }; | 355 | }; |
312 | 356 | ||
313 | /* Makes more easy to define a tracer opt */ | 357 | /* Makes more easy to define a tracer opt */ |
314 | #define TRACER_OPT(s, b) .name = #s, .bit = b | 358 | #define TRACER_OPT(s, b) .name = #s, .bit = b |
315 | 359 | ||
316 | /* | 360 | |
317 | * A specific tracer, represented by methods that operate on a trace array: | 361 | /** |
362 | * struct tracer - a specific tracer and its callbacks to interact with debugfs | ||
363 | * @name: the name chosen to select it on the available_tracers file | ||
364 | * @init: called when one switches to this tracer (echo name > current_tracer) | ||
365 | * @reset: called when one switches to another tracer | ||
366 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) | ||
367 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | ||
368 | * @open: called when the trace file is opened | ||
369 | * @pipe_open: called when the trace_pipe file is opened | ||
370 | * @wait_pipe: override how the user waits for traces on trace_pipe | ||
371 | * @close: called when the trace file is released | ||
372 | * @read: override the default read callback on trace_pipe | ||
373 | * @splice_read: override the default splice_read callback on trace_pipe | ||
374 | * @selftest: selftest to run on boot (see trace_selftest.c) | ||
375 | * @print_headers: override the first lines that describe your columns | ||
376 | * @print_line: callback that prints a trace | ||
377 | * @set_flag: signals one of your private flags changed (trace_options file) | ||
378 | * @flags: your private flags | ||
318 | */ | 379 | */ |
319 | struct tracer { | 380 | struct tracer { |
320 | const char *name; | 381 | const char *name; |
321 | /* Your tracer should raise a warning if init fails */ | ||
322 | int (*init)(struct trace_array *tr); | 382 | int (*init)(struct trace_array *tr); |
323 | void (*reset)(struct trace_array *tr); | 383 | void (*reset)(struct trace_array *tr); |
324 | void (*start)(struct trace_array *tr); | 384 | void (*start)(struct trace_array *tr); |
325 | void (*stop)(struct trace_array *tr); | 385 | void (*stop)(struct trace_array *tr); |
326 | void (*open)(struct trace_iterator *iter); | 386 | void (*open)(struct trace_iterator *iter); |
327 | void (*pipe_open)(struct trace_iterator *iter); | 387 | void (*pipe_open)(struct trace_iterator *iter); |
388 | void (*wait_pipe)(struct trace_iterator *iter); | ||
328 | void (*close)(struct trace_iterator *iter); | 389 | void (*close)(struct trace_iterator *iter); |
329 | ssize_t (*read)(struct trace_iterator *iter, | 390 | ssize_t (*read)(struct trace_iterator *iter, |
330 | struct file *filp, char __user *ubuf, | 391 | struct file *filp, char __user *ubuf, |
331 | size_t cnt, loff_t *ppos); | 392 | size_t cnt, loff_t *ppos); |
393 | ssize_t (*splice_read)(struct trace_iterator *iter, | ||
394 | struct file *filp, | ||
395 | loff_t *ppos, | ||
396 | struct pipe_inode_info *pipe, | ||
397 | size_t len, | ||
398 | unsigned int flags); | ||
332 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 399 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
333 | int (*selftest)(struct tracer *trace, | 400 | int (*selftest)(struct tracer *trace, |
334 | struct trace_array *tr); | 401 | struct trace_array *tr); |
@@ -339,7 +406,8 @@ struct tracer { | |||
339 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 406 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
340 | struct tracer *next; | 407 | struct tracer *next; |
341 | int print_max; | 408 | int print_max; |
342 | struct tracer_flags *flags; | 409 | struct tracer_flags *flags; |
410 | struct tracer_stat *stats; | ||
343 | }; | 411 | }; |
344 | 412 | ||
345 | struct trace_seq { | 413 | struct trace_seq { |
@@ -348,6 +416,16 @@ struct trace_seq { | |||
348 | unsigned int readpos; | 416 | unsigned int readpos; |
349 | }; | 417 | }; |
350 | 418 | ||
419 | static inline void | ||
420 | trace_seq_init(struct trace_seq *s) | ||
421 | { | ||
422 | s->len = 0; | ||
423 | s->readpos = 0; | ||
424 | } | ||
425 | |||
426 | |||
427 | #define TRACE_PIPE_ALL_CPU -1 | ||
428 | |||
351 | /* | 429 | /* |
352 | * Trace iterator - used by printout routines who present trace | 430 | * Trace iterator - used by printout routines who present trace |
353 | * results to users and which routines might sleep, etc: | 431 | * results to users and which routines might sleep, etc: |
@@ -356,6 +434,8 @@ struct trace_iterator { | |||
356 | struct trace_array *tr; | 434 | struct trace_array *tr; |
357 | struct tracer *trace; | 435 | struct tracer *trace; |
358 | void *private; | 436 | void *private; |
437 | int cpu_file; | ||
438 | struct mutex mutex; | ||
359 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | 439 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
360 | 440 | ||
361 | /* The below is zeroed out in pipe_read */ | 441 | /* The below is zeroed out in pipe_read */ |
@@ -371,6 +451,7 @@ struct trace_iterator { | |||
371 | cpumask_var_t started; | 451 | cpumask_var_t started; |
372 | }; | 452 | }; |
373 | 453 | ||
454 | int tracer_init(struct tracer *t, struct trace_array *tr); | ||
374 | int tracing_is_enabled(void); | 455 | int tracing_is_enabled(void); |
375 | void trace_wake_up(void); | 456 | void trace_wake_up(void); |
376 | void tracing_reset(struct trace_array *tr, int cpu); | 457 | void tracing_reset(struct trace_array *tr, int cpu); |
@@ -379,26 +460,48 @@ int tracing_open_generic(struct inode *inode, struct file *filp); | |||
379 | struct dentry *tracing_init_dentry(void); | 460 | struct dentry *tracing_init_dentry(void); |
380 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | 461 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
381 | 462 | ||
463 | struct ring_buffer_event; | ||
464 | |||
465 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | ||
466 | unsigned char type, | ||
467 | unsigned long len, | ||
468 | unsigned long flags, | ||
469 | int pc); | ||
470 | void trace_buffer_unlock_commit(struct trace_array *tr, | ||
471 | struct ring_buffer_event *event, | ||
472 | unsigned long flags, int pc); | ||
473 | |||
474 | struct ring_buffer_event * | ||
475 | trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | ||
476 | unsigned long flags, int pc); | ||
477 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | ||
478 | unsigned long flags, int pc); | ||
479 | |||
382 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 480 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
383 | struct trace_array_cpu *data); | 481 | struct trace_array_cpu *data); |
482 | |||
483 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | ||
484 | int *ent_cpu, u64 *ent_ts); | ||
485 | |||
384 | void tracing_generic_entry_update(struct trace_entry *entry, | 486 | void tracing_generic_entry_update(struct trace_entry *entry, |
385 | unsigned long flags, | 487 | unsigned long flags, |
386 | int pc); | 488 | int pc); |
387 | 489 | ||
490 | void default_wait_pipe(struct trace_iterator *iter); | ||
491 | void poll_wait_pipe(struct trace_iterator *iter); | ||
492 | |||
388 | void ftrace(struct trace_array *tr, | 493 | void ftrace(struct trace_array *tr, |
389 | struct trace_array_cpu *data, | 494 | struct trace_array_cpu *data, |
390 | unsigned long ip, | 495 | unsigned long ip, |
391 | unsigned long parent_ip, | 496 | unsigned long parent_ip, |
392 | unsigned long flags, int pc); | 497 | unsigned long flags, int pc); |
393 | void tracing_sched_switch_trace(struct trace_array *tr, | 498 | void tracing_sched_switch_trace(struct trace_array *tr, |
394 | struct trace_array_cpu *data, | ||
395 | struct task_struct *prev, | 499 | struct task_struct *prev, |
396 | struct task_struct *next, | 500 | struct task_struct *next, |
397 | unsigned long flags, int pc); | 501 | unsigned long flags, int pc); |
398 | void tracing_record_cmdline(struct task_struct *tsk); | 502 | void tracing_record_cmdline(struct task_struct *tsk); |
399 | 503 | ||
400 | void tracing_sched_wakeup_trace(struct trace_array *tr, | 504 | void tracing_sched_wakeup_trace(struct trace_array *tr, |
401 | struct trace_array_cpu *data, | ||
402 | struct task_struct *wakee, | 505 | struct task_struct *wakee, |
403 | struct task_struct *cur, | 506 | struct task_struct *cur, |
404 | unsigned long flags, int pc); | 507 | unsigned long flags, int pc); |
@@ -408,14 +511,12 @@ void trace_special(struct trace_array *tr, | |||
408 | unsigned long arg2, | 511 | unsigned long arg2, |
409 | unsigned long arg3, int pc); | 512 | unsigned long arg3, int pc); |
410 | void trace_function(struct trace_array *tr, | 513 | void trace_function(struct trace_array *tr, |
411 | struct trace_array_cpu *data, | ||
412 | unsigned long ip, | 514 | unsigned long ip, |
413 | unsigned long parent_ip, | 515 | unsigned long parent_ip, |
414 | unsigned long flags, int pc); | 516 | unsigned long flags, int pc); |
415 | 517 | ||
416 | void trace_graph_return(struct ftrace_graph_ret *trace); | 518 | void trace_graph_return(struct ftrace_graph_ret *trace); |
417 | int trace_graph_entry(struct ftrace_graph_ent *trace); | 519 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
418 | void trace_hw_branch(struct trace_array *tr, u64 from, u64 to); | ||
419 | 520 | ||
420 | void tracing_start_cmdline_record(void); | 521 | void tracing_start_cmdline_record(void); |
421 | void tracing_stop_cmdline_record(void); | 522 | void tracing_stop_cmdline_record(void); |
@@ -434,15 +535,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |||
434 | void update_max_tr_single(struct trace_array *tr, | 535 | void update_max_tr_single(struct trace_array *tr, |
435 | struct task_struct *tsk, int cpu); | 536 | struct task_struct *tsk, int cpu); |
436 | 537 | ||
437 | extern cycle_t ftrace_now(int cpu); | 538 | void __trace_stack(struct trace_array *tr, |
539 | unsigned long flags, | ||
540 | int skip, int pc); | ||
438 | 541 | ||
439 | #ifdef CONFIG_FUNCTION_TRACER | 542 | extern cycle_t ftrace_now(int cpu); |
440 | void tracing_start_function_trace(void); | ||
441 | void tracing_stop_function_trace(void); | ||
442 | #else | ||
443 | # define tracing_start_function_trace() do { } while (0) | ||
444 | # define tracing_stop_function_trace() do { } while (0) | ||
445 | #endif | ||
446 | 543 | ||
447 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 544 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
448 | typedef void | 545 | typedef void |
@@ -456,10 +553,10 @@ struct tracer_switch_ops { | |||
456 | void *private; | 553 | void *private; |
457 | struct tracer_switch_ops *next; | 554 | struct tracer_switch_ops *next; |
458 | }; | 555 | }; |
459 | |||
460 | char *trace_find_cmdline(int pid); | ||
461 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 556 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
462 | 557 | ||
558 | extern char *trace_find_cmdline(int pid); | ||
559 | |||
463 | #ifdef CONFIG_DYNAMIC_FTRACE | 560 | #ifdef CONFIG_DYNAMIC_FTRACE |
464 | extern unsigned long ftrace_update_tot_cnt; | 561 | extern unsigned long ftrace_update_tot_cnt; |
465 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 562 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
@@ -469,6 +566,8 @@ extern int DYN_FTRACE_TEST_NAME(void); | |||
469 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 566 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
470 | extern int trace_selftest_startup_function(struct tracer *trace, | 567 | extern int trace_selftest_startup_function(struct tracer *trace, |
471 | struct trace_array *tr); | 568 | struct trace_array *tr); |
569 | extern int trace_selftest_startup_function_graph(struct tracer *trace, | ||
570 | struct trace_array *tr); | ||
472 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, | 571 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
473 | struct trace_array *tr); | 572 | struct trace_array *tr); |
474 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, | 573 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
@@ -488,15 +587,6 @@ extern int trace_selftest_startup_branch(struct tracer *trace, | |||
488 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 587 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
489 | 588 | ||
490 | extern void *head_page(struct trace_array_cpu *data); | 589 | extern void *head_page(struct trace_array_cpu *data); |
491 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | ||
492 | extern void trace_seq_print_cont(struct trace_seq *s, | ||
493 | struct trace_iterator *iter); | ||
494 | |||
495 | extern int | ||
496 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | ||
497 | unsigned long sym_flags); | ||
498 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
499 | size_t cnt); | ||
500 | extern long ns2usecs(cycle_t nsec); | 590 | extern long ns2usecs(cycle_t nsec); |
501 | extern int | 591 | extern int |
502 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); | 592 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); |
@@ -580,7 +670,9 @@ enum trace_iterator_flags { | |||
580 | TRACE_ITER_ANNOTATE = 0x2000, | 670 | TRACE_ITER_ANNOTATE = 0x2000, |
581 | TRACE_ITER_USERSTACKTRACE = 0x4000, | 671 | TRACE_ITER_USERSTACKTRACE = 0x4000, |
582 | TRACE_ITER_SYM_USEROBJ = 0x8000, | 672 | TRACE_ITER_SYM_USEROBJ = 0x8000, |
583 | TRACE_ITER_PRINTK_MSGONLY = 0x10000 | 673 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, |
674 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ | ||
675 | TRACE_ITER_LATENCY_FMT = 0x40000, | ||
584 | }; | 676 | }; |
585 | 677 | ||
586 | /* | 678 | /* |
@@ -601,12 +693,12 @@ extern struct tracer nop_trace; | |||
601 | * preempt_enable (after a disable), a schedule might take place | 693 | * preempt_enable (after a disable), a schedule might take place |
602 | * causing an infinite recursion. | 694 | * causing an infinite recursion. |
603 | * | 695 | * |
604 | * To prevent this, we read the need_recshed flag before | 696 | * To prevent this, we read the need_resched flag before |
605 | * disabling preemption. When we want to enable preemption we | 697 | * disabling preemption. When we want to enable preemption we |
606 | * check the flag, if it is set, then we call preempt_enable_no_resched. | 698 | * check the flag, if it is set, then we call preempt_enable_no_resched. |
607 | * Otherwise, we call preempt_enable. | 699 | * Otherwise, we call preempt_enable. |
608 | * | 700 | * |
609 | * The rational for doing the above is that if need resched is set | 701 | * The rational for doing the above is that if need_resched is set |
610 | * and we have yet to reschedule, we are either in an atomic location | 702 | * and we have yet to reschedule, we are either in an atomic location |
611 | * (where we do not need to check for scheduling) or we are inside | 703 | * (where we do not need to check for scheduling) or we are inside |
612 | * the scheduler and do not want to resched. | 704 | * the scheduler and do not want to resched. |
@@ -627,7 +719,7 @@ static inline int ftrace_preempt_disable(void) | |||
627 | * | 719 | * |
628 | * This is a scheduler safe way to enable preemption and not miss | 720 | * This is a scheduler safe way to enable preemption and not miss |
629 | * any preemption checks. The disabled saved the state of preemption. | 721 | * any preemption checks. The disabled saved the state of preemption. |
630 | * If resched is set, then we were either inside an atomic or | 722 | * If resched is set, then we are either inside an atomic or |
631 | * are inside the scheduler (we would have already scheduled | 723 | * are inside the scheduler (we would have already scheduled |
632 | * otherwise). In this case, we do not want to call normal | 724 | * otherwise). In this case, we do not want to call normal |
633 | * preempt_enable, but preempt_enable_no_resched instead. | 725 | * preempt_enable, but preempt_enable_no_resched instead. |
@@ -664,4 +756,26 @@ static inline void trace_branch_disable(void) | |||
664 | } | 756 | } |
665 | #endif /* CONFIG_BRANCH_TRACER */ | 757 | #endif /* CONFIG_BRANCH_TRACER */ |
666 | 758 | ||
759 | /* trace event type bit fields, not numeric */ | ||
760 | enum { | ||
761 | TRACE_EVENT_TYPE_PRINTF = 1, | ||
762 | TRACE_EVENT_TYPE_RAW = 2, | ||
763 | }; | ||
764 | |||
765 | struct ftrace_event_call { | ||
766 | char *name; | ||
767 | char *system; | ||
768 | struct dentry *dir; | ||
769 | int enabled; | ||
770 | int (*regfunc)(void); | ||
771 | void (*unregfunc)(void); | ||
772 | int id; | ||
773 | int (*raw_init)(void); | ||
774 | int (*show_format)(struct trace_seq *s); | ||
775 | }; | ||
776 | |||
777 | void event_trace_printk(unsigned long ip, const char *fmt, ...); | ||
778 | extern struct ftrace_event_call __start_ftrace_events[]; | ||
779 | extern struct ftrace_event_call __stop_ftrace_events[]; | ||
780 | |||
667 | #endif /* _LINUX_KERNEL_TRACE_H */ | 781 | #endif /* _LINUX_KERNEL_TRACE_H */ |