aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-06 12:30:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-06 12:30:52 -0400
commit4aed2fd8e3181fea7c09ba79cf64e7e3f4413bf9 (patch)
tree1f69733e5daab4915a76a41de0e4d1dc61e12cfb /kernel/trace/trace.h
parent3a3527b6461b1298cc53ce72f336346739297ac8 (diff)
parentfc9ea5a1e53ee54f681e226d735008e2a6f8f470 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (162 commits) tracing/kprobes: unregister_trace_probe needs to be called under mutex perf: expose event__process function perf events: Fix mmap offset determination perf, powerpc: fsl_emb: Restore setting perf_sample_data.period perf, powerpc: Convert the FSL driver to use local64_t perf tools: Don't keep unreferenced maps when unmaps are detected perf session: Invalidate last_match when removing threads from rb_tree perf session: Free the ref_reloc_sym memory at the right place x86,mmiotrace: Add support for tracing STOS instruction perf, sched migration: Librarize task states and event headers helpers perf, sched migration: Librarize the GUI class perf, sched migration: Make the GUI class client agnostic perf, sched migration: Make it vertically scrollable perf, sched migration: Parameterize cpu height and spacing perf, sched migration: Fix key bindings perf, sched migration: Ignore unhandled task states perf, sched migration: Handle ignored migrate out events perf: New migration tool overview tracing: Drop cpparg() macro perf: Use tracepoint_synchronize_unregister() to flush any pending tracepoint call ... Fix up trivial conflicts in Makefile and drivers/cpufreq/cpufreq.c
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r--kernel/trace/trace.h90
1 files changed, 6 insertions, 84 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 0605fc00c176..d39b3c5454a5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -9,10 +9,7 @@
9#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/tracepoint.h> 10#include <linux/tracepoint.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <trace/boot.h>
13#include <linux/kmemtrace.h>
14#include <linux/hw_breakpoint.h> 12#include <linux/hw_breakpoint.h>
15
16#include <linux/trace_seq.h> 13#include <linux/trace_seq.h>
17#include <linux/ftrace_event.h> 14#include <linux/ftrace_event.h>
18 15
@@ -25,30 +22,17 @@ enum trace_type {
25 TRACE_STACK, 22 TRACE_STACK,
26 TRACE_PRINT, 23 TRACE_PRINT,
27 TRACE_BPRINT, 24 TRACE_BPRINT,
28 TRACE_SPECIAL,
29 TRACE_MMIO_RW, 25 TRACE_MMIO_RW,
30 TRACE_MMIO_MAP, 26 TRACE_MMIO_MAP,
31 TRACE_BRANCH, 27 TRACE_BRANCH,
32 TRACE_BOOT_CALL,
33 TRACE_BOOT_RET,
34 TRACE_GRAPH_RET, 28 TRACE_GRAPH_RET,
35 TRACE_GRAPH_ENT, 29 TRACE_GRAPH_ENT,
36 TRACE_USER_STACK, 30 TRACE_USER_STACK,
37 TRACE_KMEM_ALLOC,
38 TRACE_KMEM_FREE,
39 TRACE_BLK, 31 TRACE_BLK,
40 TRACE_KSYM,
41 32
42 __TRACE_LAST_TYPE, 33 __TRACE_LAST_TYPE,
43}; 34};
44 35
45enum kmemtrace_type_id {
46 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
47 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
48 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
49};
50
51extern struct tracer boot_tracer;
52 36
53#undef __field 37#undef __field
54#define __field(type, item) type item; 38#define __field(type, item) type item;
@@ -204,23 +188,15 @@ extern void __ftrace_bad_type(void);
204 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 188 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
205 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 189 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
206 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 190 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
207 IF_ASSIGN(var, ent, struct special_entry, 0); \
208 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 191 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
209 TRACE_MMIO_RW); \ 192 TRACE_MMIO_RW); \
210 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 193 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
211 TRACE_MMIO_MAP); \ 194 TRACE_MMIO_MAP); \
212 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
213 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
214 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 195 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
215 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 196 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
216 TRACE_GRAPH_ENT); \ 197 TRACE_GRAPH_ENT); \
217 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 198 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
218 TRACE_GRAPH_RET); \ 199 TRACE_GRAPH_RET); \
219 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
220 TRACE_KMEM_ALLOC); \
221 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
222 TRACE_KMEM_FREE); \
223 IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
224 __ftrace_bad_type(); \ 200 __ftrace_bad_type(); \
225 } while (0) 201 } while (0)
226 202
@@ -298,6 +274,7 @@ struct tracer {
298 struct tracer *next; 274 struct tracer *next;
299 int print_max; 275 int print_max;
300 struct tracer_flags *flags; 276 struct tracer_flags *flags;
277 int use_max_tr;
301}; 278};
302 279
303 280
@@ -318,7 +295,6 @@ struct dentry *trace_create_file(const char *name,
318 const struct file_operations *fops); 295 const struct file_operations *fops);
319 296
320struct dentry *tracing_init_dentry(void); 297struct dentry *tracing_init_dentry(void);
321void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
322 298
323struct ring_buffer_event; 299struct ring_buffer_event;
324 300
@@ -363,11 +339,6 @@ void tracing_sched_wakeup_trace(struct trace_array *tr,
363 struct task_struct *wakee, 339 struct task_struct *wakee,
364 struct task_struct *cur, 340 struct task_struct *cur,
365 unsigned long flags, int pc); 341 unsigned long flags, int pc);
366void trace_special(struct trace_array *tr,
367 struct trace_array_cpu *data,
368 unsigned long arg1,
369 unsigned long arg2,
370 unsigned long arg3, int pc);
371void trace_function(struct trace_array *tr, 342void trace_function(struct trace_array *tr,
372 unsigned long ip, 343 unsigned long ip,
373 unsigned long parent_ip, 344 unsigned long parent_ip,
@@ -398,8 +369,6 @@ extern cpumask_var_t __read_mostly tracing_buffer_mask;
398#define for_each_tracing_cpu(cpu) \ 369#define for_each_tracing_cpu(cpu) \
399 for_each_cpu(cpu, tracing_buffer_mask) 370 for_each_cpu(cpu, tracing_buffer_mask)
400 371
401extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
402
403extern unsigned long nsecs_to_usecs(unsigned long nsecs); 372extern unsigned long nsecs_to_usecs(unsigned long nsecs);
404 373
405extern unsigned long tracing_thresh; 374extern unsigned long tracing_thresh;
@@ -469,12 +438,8 @@ extern int trace_selftest_startup_nop(struct tracer *trace,
469 struct trace_array *tr); 438 struct trace_array *tr);
470extern int trace_selftest_startup_sched_switch(struct tracer *trace, 439extern int trace_selftest_startup_sched_switch(struct tracer *trace,
471 struct trace_array *tr); 440 struct trace_array *tr);
472extern int trace_selftest_startup_sysprof(struct tracer *trace,
473 struct trace_array *tr);
474extern int trace_selftest_startup_branch(struct tracer *trace, 441extern int trace_selftest_startup_branch(struct tracer *trace,
475 struct trace_array *tr); 442 struct trace_array *tr);
476extern int trace_selftest_startup_ksym(struct tracer *trace,
477 struct trace_array *tr);
478#endif /* CONFIG_FTRACE_STARTUP_TEST */ 443#endif /* CONFIG_FTRACE_STARTUP_TEST */
479 444
480extern void *head_page(struct trace_array_cpu *data); 445extern void *head_page(struct trace_array_cpu *data);
@@ -636,6 +601,7 @@ enum trace_iterator_flags {
636 TRACE_ITER_LATENCY_FMT = 0x20000, 601 TRACE_ITER_LATENCY_FMT = 0x20000,
637 TRACE_ITER_SLEEP_TIME = 0x40000, 602 TRACE_ITER_SLEEP_TIME = 0x40000,
638 TRACE_ITER_GRAPH_TIME = 0x80000, 603 TRACE_ITER_GRAPH_TIME = 0x80000,
604 TRACE_ITER_RECORD_CMD = 0x100000,
639}; 605};
640 606
641/* 607/*
@@ -647,54 +613,6 @@ enum trace_iterator_flags {
647 613
648extern struct tracer nop_trace; 614extern struct tracer nop_trace;
649 615
650/**
651 * ftrace_preempt_disable - disable preemption scheduler safe
652 *
653 * When tracing can happen inside the scheduler, there exists
654 * cases that the tracing might happen before the need_resched
655 * flag is checked. If this happens and the tracer calls
656 * preempt_enable (after a disable), a schedule might take place
657 * causing an infinite recursion.
658 *
659 * To prevent this, we read the need_resched flag before
660 * disabling preemption. When we want to enable preemption we
661 * check the flag, if it is set, then we call preempt_enable_no_resched.
662 * Otherwise, we call preempt_enable.
663 *
664 * The rational for doing the above is that if need_resched is set
665 * and we have yet to reschedule, we are either in an atomic location
666 * (where we do not need to check for scheduling) or we are inside
667 * the scheduler and do not want to resched.
668 */
669static inline int ftrace_preempt_disable(void)
670{
671 int resched;
672
673 resched = need_resched();
674 preempt_disable_notrace();
675
676 return resched;
677}
678
679/**
680 * ftrace_preempt_enable - enable preemption scheduler safe
681 * @resched: the return value from ftrace_preempt_disable
682 *
683 * This is a scheduler safe way to enable preemption and not miss
684 * any preemption checks. The disabled saved the state of preemption.
685 * If resched is set, then we are either inside an atomic or
686 * are inside the scheduler (we would have already scheduled
687 * otherwise). In this case, we do not want to call normal
688 * preempt_enable, but preempt_enable_no_resched instead.
689 */
690static inline void ftrace_preempt_enable(int resched)
691{
692 if (resched)
693 preempt_enable_no_resched_notrace();
694 else
695 preempt_enable_notrace();
696}
697
698#ifdef CONFIG_BRANCH_TRACER 616#ifdef CONFIG_BRANCH_TRACER
699extern int enable_branch_tracing(struct trace_array *tr); 617extern int enable_branch_tracing(struct trace_array *tr);
700extern void disable_branch_tracing(void); 618extern void disable_branch_tracing(void);
@@ -785,6 +703,8 @@ struct filter_pred {
785 int pop_n; 703 int pop_n;
786}; 704};
787 705
706extern struct list_head ftrace_common_fields;
707
788extern enum regex_type 708extern enum regex_type
789filter_parse_regex(char *buff, int len, char **search, int *not); 709filter_parse_regex(char *buff, int len, char **search, int *not);
790extern void print_event_filter(struct ftrace_event_call *call, 710extern void print_event_filter(struct ftrace_event_call *call,
@@ -814,6 +734,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
814 return 0; 734 return 0;
815} 735}
816 736
737extern void trace_event_enable_cmd_record(bool enable);
738
817extern struct mutex event_mutex; 739extern struct mutex event_mutex;
818extern struct list_head ftrace_events; 740extern struct list_head ftrace_events;
819 741