diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-04-13 11:20:49 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-04-14 12:57:58 -0400 |
commit | 97f2025153499faa17267a0d4e18c7afaf73f39d (patch) | |
tree | cd3ea51a93093ea5250ef38b4b5bf6c2aa3f9ed5 /kernel/trace/trace.h | |
parent | 9504504cbab29ecb694186b1c5b15d3579c43c51 (diff) |
tracing/events: move declarations from trace directory to core include
In preparation to allowing trace events to happen in modules, we need
to move some of the local declarations in the kernel/trace directory
into include/linux.
This patch simply moves the declarations and performs no context changes.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r-- | kernel/trace/trace.h | 120 |
1 files changed, 1 insertions, 119 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 1882846b738..6bcdf4af9b2 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <trace/power.h> | 13 | #include <trace/power.h> |
14 | 14 | ||
15 | #include <linux/trace_seq.h> | 15 | #include <linux/trace_seq.h> |
16 | #include <linux/ftrace_event.h> | ||
16 | 17 | ||
17 | enum trace_type { | 18 | enum trace_type { |
18 | __TRACE_FIRST_TYPE = 0, | 19 | __TRACE_FIRST_TYPE = 0, |
@@ -44,20 +45,6 @@ enum trace_type { | |||
44 | }; | 45 | }; |
45 | 46 | ||
46 | /* | 47 | /* |
47 | * The trace entry - the most basic unit of tracing. This is what | ||
48 | * is printed in the end as a single line in the trace output, such as: | ||
49 | * | ||
50 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | ||
51 | */ | ||
52 | struct trace_entry { | ||
53 | unsigned char type; | ||
54 | unsigned char flags; | ||
55 | unsigned char preempt_count; | ||
56 | int pid; | ||
57 | int tgid; | ||
58 | }; | ||
59 | |||
60 | /* | ||
61 | * Function trace entry - function address and parent function addres: | 48 | * Function trace entry - function address and parent function addres: |
62 | */ | 49 | */ |
63 | struct ftrace_entry { | 50 | struct ftrace_entry { |
@@ -265,8 +252,6 @@ struct trace_array_cpu { | |||
265 | char comm[TASK_COMM_LEN]; | 252 | char comm[TASK_COMM_LEN]; |
266 | }; | 253 | }; |
267 | 254 | ||
268 | struct trace_iterator; | ||
269 | |||
270 | /* | 255 | /* |
271 | * The trace array - an array of per-CPU trace arrays. This is the | 256 | * The trace array - an array of per-CPU trace arrays. This is the |
272 | * highest level data structure that individual tracers deal with. | 257 | * highest level data structure that individual tracers deal with. |
@@ -341,15 +326,6 @@ extern void __ftrace_bad_type(void); | |||
341 | __ftrace_bad_type(); \ | 326 | __ftrace_bad_type(); \ |
342 | } while (0) | 327 | } while (0) |
343 | 328 | ||
344 | /* Return values for print_line callback */ | ||
345 | enum print_line_t { | ||
346 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | ||
347 | TRACE_TYPE_HANDLED = 1, | ||
348 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ | ||
349 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | ||
350 | }; | ||
351 | |||
352 | |||
353 | /* | 329 | /* |
354 | * An option specific to a tracer. This is a boolean value. | 330 | * An option specific to a tracer. This is a boolean value. |
355 | * The bit is the bit index that sets its value on the | 331 | * The bit is the bit index that sets its value on the |
@@ -428,31 +404,6 @@ struct tracer { | |||
428 | 404 | ||
429 | #define TRACE_PIPE_ALL_CPU -1 | 405 | #define TRACE_PIPE_ALL_CPU -1 |
430 | 406 | ||
431 | /* | ||
432 | * Trace iterator - used by printout routines who present trace | ||
433 | * results to users and which routines might sleep, etc: | ||
434 | */ | ||
435 | struct trace_iterator { | ||
436 | struct trace_array *tr; | ||
437 | struct tracer *trace; | ||
438 | void *private; | ||
439 | int cpu_file; | ||
440 | struct mutex mutex; | ||
441 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | ||
442 | |||
443 | /* The below is zeroed out in pipe_read */ | ||
444 | struct trace_seq seq; | ||
445 | struct trace_entry *ent; | ||
446 | int cpu; | ||
447 | u64 ts; | ||
448 | |||
449 | unsigned long iter_flags; | ||
450 | loff_t pos; | ||
451 | long idx; | ||
452 | |||
453 | cpumask_var_t started; | ||
454 | }; | ||
455 | |||
456 | int tracer_init(struct tracer *t, struct trace_array *tr); | 407 | int tracer_init(struct tracer *t, struct trace_array *tr); |
457 | int tracing_is_enabled(void); | 408 | int tracing_is_enabled(void); |
458 | void trace_wake_up(void); | 409 | void trace_wake_up(void); |
@@ -479,15 +430,6 @@ void trace_buffer_unlock_commit(struct trace_array *tr, | |||
479 | struct ring_buffer_event *event, | 430 | struct ring_buffer_event *event, |
480 | unsigned long flags, int pc); | 431 | unsigned long flags, int pc); |
481 | 432 | ||
482 | struct ring_buffer_event * | ||
483 | trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | ||
484 | unsigned long flags, int pc); | ||
485 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | ||
486 | unsigned long flags, int pc); | ||
487 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | ||
488 | unsigned long flags, int pc); | ||
489 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event); | ||
490 | |||
491 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 433 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
492 | struct trace_array_cpu *data); | 434 | struct trace_array_cpu *data); |
493 | 435 | ||
@@ -510,7 +452,6 @@ void tracing_sched_switch_trace(struct trace_array *tr, | |||
510 | struct task_struct *prev, | 452 | struct task_struct *prev, |
511 | struct task_struct *next, | 453 | struct task_struct *next, |
512 | unsigned long flags, int pc); | 454 | unsigned long flags, int pc); |
513 | void tracing_record_cmdline(struct task_struct *tsk); | ||
514 | 455 | ||
515 | void tracing_sched_wakeup_trace(struct trace_array *tr, | 456 | void tracing_sched_wakeup_trace(struct trace_array *tr, |
516 | struct task_struct *wakee, | 457 | struct task_struct *wakee, |
@@ -790,28 +731,6 @@ struct ftrace_event_field { | |||
790 | int size; | 731 | int size; |
791 | }; | 732 | }; |
792 | 733 | ||
793 | struct ftrace_event_call { | ||
794 | char *name; | ||
795 | char *system; | ||
796 | struct dentry *dir; | ||
797 | int enabled; | ||
798 | int (*regfunc)(void); | ||
799 | void (*unregfunc)(void); | ||
800 | int id; | ||
801 | int (*raw_init)(void); | ||
802 | int (*show_format)(struct trace_seq *s); | ||
803 | int (*define_fields)(void); | ||
804 | struct list_head fields; | ||
805 | int n_preds; | ||
806 | struct filter_pred **preds; | ||
807 | |||
808 | #ifdef CONFIG_EVENT_PROFILE | ||
809 | atomic_t profile_count; | ||
810 | int (*profile_enable)(struct ftrace_event_call *); | ||
811 | void (*profile_disable)(struct ftrace_event_call *); | ||
812 | #endif | ||
813 | }; | ||
814 | |||
815 | struct event_subsystem { | 734 | struct event_subsystem { |
816 | struct list_head list; | 735 | struct list_head list; |
817 | const char *name; | 736 | const char *name; |
@@ -825,9 +744,6 @@ struct event_subsystem { | |||
825 | (unsigned long)event < (unsigned long)__stop_ftrace_events; \ | 744 | (unsigned long)event < (unsigned long)__stop_ftrace_events; \ |
826 | event++) | 745 | event++) |
827 | 746 | ||
828 | #define MAX_FILTER_PRED 8 | ||
829 | #define MAX_FILTER_STR_VAL 128 | ||
830 | |||
831 | struct filter_pred; | 747 | struct filter_pred; |
832 | 748 | ||
833 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); | 749 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); |
@@ -845,9 +761,6 @@ struct filter_pred { | |||
845 | int clear; | 761 | int clear; |
846 | }; | 762 | }; |
847 | 763 | ||
848 | int trace_define_field(struct ftrace_event_call *call, char *type, | ||
849 | char *name, int offset, int size); | ||
850 | extern int init_preds(struct ftrace_event_call *call); | ||
851 | extern void filter_free_pred(struct filter_pred *pred); | 764 | extern void filter_free_pred(struct filter_pred *pred); |
852 | extern void filter_print_preds(struct filter_pred **preds, int n_preds, | 765 | extern void filter_print_preds(struct filter_pred **preds, int n_preds, |
853 | struct trace_seq *s); | 766 | struct trace_seq *s); |
@@ -855,13 +768,9 @@ extern int filter_parse(char **pbuf, struct filter_pred *pred); | |||
855 | extern int filter_add_pred(struct ftrace_event_call *call, | 768 | extern int filter_add_pred(struct ftrace_event_call *call, |
856 | struct filter_pred *pred); | 769 | struct filter_pred *pred); |
857 | extern void filter_disable_preds(struct ftrace_event_call *call); | 770 | extern void filter_disable_preds(struct ftrace_event_call *call); |
858 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | ||
859 | extern void filter_free_subsystem_preds(struct event_subsystem *system); | 771 | extern void filter_free_subsystem_preds(struct event_subsystem *system); |
860 | extern int filter_add_subsystem_pred(struct event_subsystem *system, | 772 | extern int filter_add_subsystem_pred(struct event_subsystem *system, |
861 | struct filter_pred *pred); | 773 | struct filter_pred *pred); |
862 | extern int filter_current_check_discard(struct ftrace_event_call *call, | ||
863 | void *rec, | ||
864 | struct ring_buffer_event *event); | ||
865 | 774 | ||
866 | static inline int | 775 | static inline int |
867 | filter_check_discard(struct ftrace_event_call *call, void *rec, | 776 | filter_check_discard(struct ftrace_event_call *call, void *rec, |
@@ -876,14 +785,6 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, | |||
876 | return 0; | 785 | return 0; |
877 | } | 786 | } |
878 | 787 | ||
879 | #define __common_field(type, item) \ | ||
880 | ret = trace_define_field(event_call, #type, "common_" #item, \ | ||
881 | offsetof(typeof(field.ent), item), \ | ||
882 | sizeof(field.ent.item)); \ | ||
883 | if (ret) \ | ||
884 | return ret; | ||
885 | |||
886 | void event_trace_printk(unsigned long ip, const char *fmt, ...); | ||
887 | extern struct ftrace_event_call __start_ftrace_events[]; | 788 | extern struct ftrace_event_call __start_ftrace_events[]; |
888 | extern struct ftrace_event_call __stop_ftrace_events[]; | 789 | extern struct ftrace_event_call __stop_ftrace_events[]; |
889 | 790 | ||
@@ -895,25 +796,6 @@ extern struct ftrace_event_call __stop_ftrace_events[]; | |||
895 | extern const char *__start___trace_bprintk_fmt[]; | 796 | extern const char *__start___trace_bprintk_fmt[]; |
896 | extern const char *__stop___trace_bprintk_fmt[]; | 797 | extern const char *__stop___trace_bprintk_fmt[]; |
897 | 798 | ||
898 | /* | ||
899 | * The double __builtin_constant_p is because gcc will give us an error | ||
900 | * if we try to allocate the static variable to fmt if it is not a | ||
901 | * constant. Even with the outer if statement optimizing out. | ||
902 | */ | ||
903 | #define event_trace_printk(ip, fmt, args...) \ | ||
904 | do { \ | ||
905 | __trace_printk_check_format(fmt, ##args); \ | ||
906 | tracing_record_cmdline(current); \ | ||
907 | if (__builtin_constant_p(fmt)) { \ | ||
908 | static const char *trace_printk_fmt \ | ||
909 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
910 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
911 | \ | ||
912 | __trace_bprintk(ip, trace_printk_fmt, ##args); \ | ||
913 | } else \ | ||
914 | __trace_printk(ip, fmt, ##args); \ | ||
915 | } while (0) | ||
916 | |||
917 | #undef TRACE_EVENT_FORMAT | 799 | #undef TRACE_EVENT_FORMAT |
918 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | 800 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ |
919 | extern struct ftrace_event_call event_##call; | 801 | extern struct ftrace_event_call event_##call; |