diff options
Diffstat (limited to 'kernel/trace/trace.h')
| -rw-r--r-- | kernel/trace/trace.h | 107 |
1 files changed, 24 insertions, 83 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2cd96399463f..d39b3c5454a5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -9,10 +9,7 @@ | |||
| 9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
| 10 | #include <linux/tracepoint.h> | 10 | #include <linux/tracepoint.h> |
| 11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
| 12 | #include <trace/boot.h> | ||
| 13 | #include <linux/kmemtrace.h> | ||
| 14 | #include <linux/hw_breakpoint.h> | 12 | #include <linux/hw_breakpoint.h> |
| 15 | |||
| 16 | #include <linux/trace_seq.h> | 13 | #include <linux/trace_seq.h> |
| 17 | #include <linux/ftrace_event.h> | 14 | #include <linux/ftrace_event.h> |
| 18 | 15 | ||
| @@ -25,30 +22,17 @@ enum trace_type { | |||
| 25 | TRACE_STACK, | 22 | TRACE_STACK, |
| 26 | TRACE_PRINT, | 23 | TRACE_PRINT, |
| 27 | TRACE_BPRINT, | 24 | TRACE_BPRINT, |
| 28 | TRACE_SPECIAL, | ||
| 29 | TRACE_MMIO_RW, | 25 | TRACE_MMIO_RW, |
| 30 | TRACE_MMIO_MAP, | 26 | TRACE_MMIO_MAP, |
| 31 | TRACE_BRANCH, | 27 | TRACE_BRANCH, |
| 32 | TRACE_BOOT_CALL, | ||
| 33 | TRACE_BOOT_RET, | ||
| 34 | TRACE_GRAPH_RET, | 28 | TRACE_GRAPH_RET, |
| 35 | TRACE_GRAPH_ENT, | 29 | TRACE_GRAPH_ENT, |
| 36 | TRACE_USER_STACK, | 30 | TRACE_USER_STACK, |
| 37 | TRACE_KMEM_ALLOC, | ||
| 38 | TRACE_KMEM_FREE, | ||
| 39 | TRACE_BLK, | 31 | TRACE_BLK, |
| 40 | TRACE_KSYM, | ||
| 41 | 32 | ||
| 42 | __TRACE_LAST_TYPE, | 33 | __TRACE_LAST_TYPE, |
| 43 | }; | 34 | }; |
| 44 | 35 | ||
| 45 | enum kmemtrace_type_id { | ||
| 46 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | ||
| 47 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | ||
| 48 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | extern struct tracer boot_tracer; | ||
| 52 | 36 | ||
| 53 | #undef __field | 37 | #undef __field |
| 54 | #define __field(type, item) type item; | 38 | #define __field(type, item) type item; |
| @@ -204,23 +188,15 @@ extern void __ftrace_bad_type(void); | |||
| 204 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 188 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
| 205 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 189 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
| 206 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ | 190 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
| 207 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | ||
| 208 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 191 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
| 209 | TRACE_MMIO_RW); \ | 192 | TRACE_MMIO_RW); \ |
| 210 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 193 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
| 211 | TRACE_MMIO_MAP); \ | 194 | TRACE_MMIO_MAP); \ |
| 212 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ | ||
| 213 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | ||
| 214 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | 195 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
| 215 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ | 196 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
| 216 | TRACE_GRAPH_ENT); \ | 197 | TRACE_GRAPH_ENT); \ |
| 217 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | 198 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
| 218 | TRACE_GRAPH_RET); \ | 199 | TRACE_GRAPH_RET); \ |
| 219 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | ||
| 220 | TRACE_KMEM_ALLOC); \ | ||
| 221 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | ||
| 222 | TRACE_KMEM_FREE); \ | ||
| 223 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ | ||
| 224 | __ftrace_bad_type(); \ | 200 | __ftrace_bad_type(); \ |
| 225 | } while (0) | 201 | } while (0) |
| 226 | 202 | ||
| @@ -298,6 +274,7 @@ struct tracer { | |||
| 298 | struct tracer *next; | 274 | struct tracer *next; |
| 299 | int print_max; | 275 | int print_max; |
| 300 | struct tracer_flags *flags; | 276 | struct tracer_flags *flags; |
| 277 | int use_max_tr; | ||
| 301 | }; | 278 | }; |
| 302 | 279 | ||
| 303 | 280 | ||
| @@ -318,7 +295,6 @@ struct dentry *trace_create_file(const char *name, | |||
| 318 | const struct file_operations *fops); | 295 | const struct file_operations *fops); |
| 319 | 296 | ||
| 320 | struct dentry *tracing_init_dentry(void); | 297 | struct dentry *tracing_init_dentry(void); |
| 321 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | ||
| 322 | 298 | ||
| 323 | struct ring_buffer_event; | 299 | struct ring_buffer_event; |
| 324 | 300 | ||
| @@ -338,6 +314,14 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
| 338 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 314 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
| 339 | int *ent_cpu, u64 *ent_ts); | 315 | int *ent_cpu, u64 *ent_ts); |
| 340 | 316 | ||
| 317 | int trace_empty(struct trace_iterator *iter); | ||
| 318 | |||
| 319 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | ||
| 320 | |||
| 321 | void trace_init_global_iter(struct trace_iterator *iter); | ||
| 322 | |||
| 323 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | ||
| 324 | |||
| 341 | void default_wait_pipe(struct trace_iterator *iter); | 325 | void default_wait_pipe(struct trace_iterator *iter); |
| 342 | void poll_wait_pipe(struct trace_iterator *iter); | 326 | void poll_wait_pipe(struct trace_iterator *iter); |
| 343 | 327 | ||
| @@ -355,11 +339,6 @@ void tracing_sched_wakeup_trace(struct trace_array *tr, | |||
| 355 | struct task_struct *wakee, | 339 | struct task_struct *wakee, |
| 356 | struct task_struct *cur, | 340 | struct task_struct *cur, |
| 357 | unsigned long flags, int pc); | 341 | unsigned long flags, int pc); |
| 358 | void trace_special(struct trace_array *tr, | ||
| 359 | struct trace_array_cpu *data, | ||
| 360 | unsigned long arg1, | ||
| 361 | unsigned long arg2, | ||
| 362 | unsigned long arg3, int pc); | ||
| 363 | void trace_function(struct trace_array *tr, | 342 | void trace_function(struct trace_array *tr, |
| 364 | unsigned long ip, | 343 | unsigned long ip, |
| 365 | unsigned long parent_ip, | 344 | unsigned long parent_ip, |
| @@ -380,8 +359,15 @@ void tracing_start_sched_switch_record(void); | |||
| 380 | int register_tracer(struct tracer *type); | 359 | int register_tracer(struct tracer *type); |
| 381 | void unregister_tracer(struct tracer *type); | 360 | void unregister_tracer(struct tracer *type); |
| 382 | int is_tracing_stopped(void); | 361 | int is_tracing_stopped(void); |
| 362 | enum trace_file_type { | ||
| 363 | TRACE_FILE_LAT_FMT = 1, | ||
| 364 | TRACE_FILE_ANNOTATE = 2, | ||
| 365 | }; | ||
| 366 | |||
| 367 | extern cpumask_var_t __read_mostly tracing_buffer_mask; | ||
| 383 | 368 | ||
| 384 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | 369 | #define for_each_tracing_cpu(cpu) \ |
| 370 | for_each_cpu(cpu, tracing_buffer_mask) | ||
| 385 | 371 | ||
| 386 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 372 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
| 387 | 373 | ||
| @@ -452,12 +438,8 @@ extern int trace_selftest_startup_nop(struct tracer *trace, | |||
| 452 | struct trace_array *tr); | 438 | struct trace_array *tr); |
| 453 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, | 439 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
| 454 | struct trace_array *tr); | 440 | struct trace_array *tr); |
| 455 | extern int trace_selftest_startup_sysprof(struct tracer *trace, | ||
| 456 | struct trace_array *tr); | ||
| 457 | extern int trace_selftest_startup_branch(struct tracer *trace, | 441 | extern int trace_selftest_startup_branch(struct tracer *trace, |
| 458 | struct trace_array *tr); | 442 | struct trace_array *tr); |
| 459 | extern int trace_selftest_startup_ksym(struct tracer *trace, | ||
| 460 | struct trace_array *tr); | ||
| 461 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 443 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
| 462 | 444 | ||
| 463 | extern void *head_page(struct trace_array_cpu *data); | 445 | extern void *head_page(struct trace_array_cpu *data); |
| @@ -471,6 +453,8 @@ trace_array_vprintk(struct trace_array *tr, | |||
| 471 | unsigned long ip, const char *fmt, va_list args); | 453 | unsigned long ip, const char *fmt, va_list args); |
| 472 | int trace_array_printk(struct trace_array *tr, | 454 | int trace_array_printk(struct trace_array *tr, |
| 473 | unsigned long ip, const char *fmt, ...); | 455 | unsigned long ip, const char *fmt, ...); |
| 456 | void trace_printk_seq(struct trace_seq *s); | ||
| 457 | enum print_line_t print_trace_line(struct trace_iterator *iter); | ||
| 474 | 458 | ||
| 475 | extern unsigned long trace_flags; | 459 | extern unsigned long trace_flags; |
| 476 | 460 | ||
| @@ -617,6 +601,7 @@ enum trace_iterator_flags { | |||
| 617 | TRACE_ITER_LATENCY_FMT = 0x20000, | 601 | TRACE_ITER_LATENCY_FMT = 0x20000, |
| 618 | TRACE_ITER_SLEEP_TIME = 0x40000, | 602 | TRACE_ITER_SLEEP_TIME = 0x40000, |
| 619 | TRACE_ITER_GRAPH_TIME = 0x80000, | 603 | TRACE_ITER_GRAPH_TIME = 0x80000, |
| 604 | TRACE_ITER_RECORD_CMD = 0x100000, | ||
| 620 | }; | 605 | }; |
| 621 | 606 | ||
| 622 | /* | 607 | /* |
| @@ -628,54 +613,6 @@ enum trace_iterator_flags { | |||
| 628 | 613 | ||
| 629 | extern struct tracer nop_trace; | 614 | extern struct tracer nop_trace; |
| 630 | 615 | ||
| 631 | /** | ||
| 632 | * ftrace_preempt_disable - disable preemption scheduler safe | ||
| 633 | * | ||
| 634 | * When tracing can happen inside the scheduler, there exists | ||
| 635 | * cases that the tracing might happen before the need_resched | ||
| 636 | * flag is checked. If this happens and the tracer calls | ||
| 637 | * preempt_enable (after a disable), a schedule might take place | ||
| 638 | * causing an infinite recursion. | ||
| 639 | * | ||
| 640 | * To prevent this, we read the need_resched flag before | ||
| 641 | * disabling preemption. When we want to enable preemption we | ||
| 642 | * check the flag, if it is set, then we call preempt_enable_no_resched. | ||
| 643 | * Otherwise, we call preempt_enable. | ||
| 644 | * | ||
| 645 | * The rational for doing the above is that if need_resched is set | ||
| 646 | * and we have yet to reschedule, we are either in an atomic location | ||
| 647 | * (where we do not need to check for scheduling) or we are inside | ||
| 648 | * the scheduler and do not want to resched. | ||
| 649 | */ | ||
| 650 | static inline int ftrace_preempt_disable(void) | ||
| 651 | { | ||
| 652 | int resched; | ||
| 653 | |||
| 654 | resched = need_resched(); | ||
| 655 | preempt_disable_notrace(); | ||
| 656 | |||
| 657 | return resched; | ||
| 658 | } | ||
| 659 | |||
| 660 | /** | ||
| 661 | * ftrace_preempt_enable - enable preemption scheduler safe | ||
| 662 | * @resched: the return value from ftrace_preempt_disable | ||
| 663 | * | ||
| 664 | * This is a scheduler safe way to enable preemption and not miss | ||
| 665 | * any preemption checks. The disabled saved the state of preemption. | ||
| 666 | * If resched is set, then we are either inside an atomic or | ||
| 667 | * are inside the scheduler (we would have already scheduled | ||
| 668 | * otherwise). In this case, we do not want to call normal | ||
| 669 | * preempt_enable, but preempt_enable_no_resched instead. | ||
| 670 | */ | ||
| 671 | static inline void ftrace_preempt_enable(int resched) | ||
| 672 | { | ||
| 673 | if (resched) | ||
| 674 | preempt_enable_no_resched_notrace(); | ||
| 675 | else | ||
| 676 | preempt_enable_notrace(); | ||
| 677 | } | ||
| 678 | |||
| 679 | #ifdef CONFIG_BRANCH_TRACER | 616 | #ifdef CONFIG_BRANCH_TRACER |
| 680 | extern int enable_branch_tracing(struct trace_array *tr); | 617 | extern int enable_branch_tracing(struct trace_array *tr); |
| 681 | extern void disable_branch_tracing(void); | 618 | extern void disable_branch_tracing(void); |
| @@ -766,6 +703,8 @@ struct filter_pred { | |||
| 766 | int pop_n; | 703 | int pop_n; |
| 767 | }; | 704 | }; |
| 768 | 705 | ||
| 706 | extern struct list_head ftrace_common_fields; | ||
| 707 | |||
| 769 | extern enum regex_type | 708 | extern enum regex_type |
| 770 | filter_parse_regex(char *buff, int len, char **search, int *not); | 709 | filter_parse_regex(char *buff, int len, char **search, int *not); |
| 771 | extern void print_event_filter(struct ftrace_event_call *call, | 710 | extern void print_event_filter(struct ftrace_event_call *call, |
| @@ -795,6 +734,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, | |||
| 795 | return 0; | 734 | return 0; |
| 796 | } | 735 | } |
| 797 | 736 | ||
| 737 | extern void trace_event_enable_cmd_record(bool enable); | ||
| 738 | |||
| 798 | extern struct mutex event_mutex; | 739 | extern struct mutex event_mutex; |
| 799 | extern struct list_head ftrace_events; | 740 | extern struct list_head ftrace_events; |
| 800 | 741 | ||
