aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r--kernel/trace/trace.h315
1 files changed, 267 insertions, 48 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4d3d381bfd95..cb0ce3fc36d3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -9,6 +9,8 @@
9#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/ftrace.h> 10#include <linux/ftrace.h>
11#include <trace/boot.h> 11#include <trace/boot.h>
12#include <trace/kmemtrace.h>
13#include <trace/power.h>
12 14
13enum trace_type { 15enum trace_type {
14 __TRACE_FIRST_TYPE = 0, 16 __TRACE_FIRST_TYPE = 0,
@@ -16,9 +18,9 @@ enum trace_type {
16 TRACE_FN, 18 TRACE_FN,
17 TRACE_CTX, 19 TRACE_CTX,
18 TRACE_WAKE, 20 TRACE_WAKE,
19 TRACE_CONT,
20 TRACE_STACK, 21 TRACE_STACK,
21 TRACE_PRINT, 22 TRACE_PRINT,
23 TRACE_BPRINT,
22 TRACE_SPECIAL, 24 TRACE_SPECIAL,
23 TRACE_MMIO_RW, 25 TRACE_MMIO_RW,
24 TRACE_MMIO_MAP, 26 TRACE_MMIO_MAP,
@@ -29,9 +31,14 @@ enum trace_type {
29 TRACE_GRAPH_ENT, 31 TRACE_GRAPH_ENT,
30 TRACE_USER_STACK, 32 TRACE_USER_STACK,
31 TRACE_HW_BRANCHES, 33 TRACE_HW_BRANCHES,
34 TRACE_SYSCALL_ENTER,
35 TRACE_SYSCALL_EXIT,
36 TRACE_KMEM_ALLOC,
37 TRACE_KMEM_FREE,
32 TRACE_POWER, 38 TRACE_POWER,
39 TRACE_BLK,
33 40
34 __TRACE_LAST_TYPE 41 __TRACE_LAST_TYPE,
35}; 42};
36 43
37/* 44/*
@@ -42,7 +49,6 @@ enum trace_type {
42 */ 49 */
43struct trace_entry { 50struct trace_entry {
44 unsigned char type; 51 unsigned char type;
45 unsigned char cpu;
46 unsigned char flags; 52 unsigned char flags;
47 unsigned char preempt_count; 53 unsigned char preempt_count;
48 int pid; 54 int pid;
@@ -60,13 +66,13 @@ struct ftrace_entry {
60 66
61/* Function call entry */ 67/* Function call entry */
62struct ftrace_graph_ent_entry { 68struct ftrace_graph_ent_entry {
63 struct trace_entry ent; 69 struct trace_entry ent;
64 struct ftrace_graph_ent graph_ent; 70 struct ftrace_graph_ent graph_ent;
65}; 71};
66 72
67/* Function return entry */ 73/* Function return entry */
68struct ftrace_graph_ret_entry { 74struct ftrace_graph_ret_entry {
69 struct trace_entry ent; 75 struct trace_entry ent;
70 struct ftrace_graph_ret ret; 76 struct ftrace_graph_ret ret;
71}; 77};
72extern struct tracer boot_tracer; 78extern struct tracer boot_tracer;
@@ -112,12 +118,18 @@ struct userstack_entry {
112}; 118};
113 119
114/* 120/*
115 * ftrace_printk entry: 121 * trace_printk entry:
116 */ 122 */
123struct bprint_entry {
124 struct trace_entry ent;
125 unsigned long ip;
126 const char *fmt;
127 u32 buf[];
128};
129
117struct print_entry { 130struct print_entry {
118 struct trace_entry ent; 131 struct trace_entry ent;
119 unsigned long ip; 132 unsigned long ip;
120 int depth;
121 char buf[]; 133 char buf[];
122}; 134};
123 135
@@ -170,15 +182,45 @@ struct trace_power {
170 struct power_trace state_data; 182 struct power_trace state_data;
171}; 183};
172 184
185struct kmemtrace_alloc_entry {
186 struct trace_entry ent;
187 enum kmemtrace_type_id type_id;
188 unsigned long call_site;
189 const void *ptr;
190 size_t bytes_req;
191 size_t bytes_alloc;
192 gfp_t gfp_flags;
193 int node;
194};
195
196struct kmemtrace_free_entry {
197 struct trace_entry ent;
198 enum kmemtrace_type_id type_id;
199 unsigned long call_site;
200 const void *ptr;
201};
202
203struct syscall_trace_enter {
204 struct trace_entry ent;
205 int nr;
206 unsigned long args[];
207};
208
209struct syscall_trace_exit {
210 struct trace_entry ent;
211 int nr;
212 unsigned long ret;
213};
214
215
173/* 216/*
174 * trace_flag_type is an enumeration that holds different 217 * trace_flag_type is an enumeration that holds different
175 * states when a trace occurs. These are: 218 * states when a trace occurs. These are:
176 * IRQS_OFF - interrupts were disabled 219 * IRQS_OFF - interrupts were disabled
177 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 220 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
178 * NEED_RESCED - reschedule is requested 221 * NEED_RESCED - reschedule is requested
179 * HARDIRQ - inside an interrupt handler 222 * HARDIRQ - inside an interrupt handler
180 * SOFTIRQ - inside a softirq handler 223 * SOFTIRQ - inside a softirq handler
181 * CONT - multiple entries hold the trace item
182 */ 224 */
183enum trace_flag_type { 225enum trace_flag_type {
184 TRACE_FLAG_IRQS_OFF = 0x01, 226 TRACE_FLAG_IRQS_OFF = 0x01,
@@ -186,7 +228,6 @@ enum trace_flag_type {
186 TRACE_FLAG_NEED_RESCHED = 0x04, 228 TRACE_FLAG_NEED_RESCHED = 0x04,
187 TRACE_FLAG_HARDIRQ = 0x08, 229 TRACE_FLAG_HARDIRQ = 0x08,
188 TRACE_FLAG_SOFTIRQ = 0x10, 230 TRACE_FLAG_SOFTIRQ = 0x10,
189 TRACE_FLAG_CONT = 0x20,
190}; 231};
191 232
192#define TRACE_BUF_SIZE 1024 233#define TRACE_BUF_SIZE 1024
@@ -198,6 +239,7 @@ enum trace_flag_type {
198 */ 239 */
199struct trace_array_cpu { 240struct trace_array_cpu {
200 atomic_t disabled; 241 atomic_t disabled;
242 void *buffer_page; /* ring buffer spare */
201 243
202 /* these fields get copied into max-trace: */ 244 /* these fields get copied into max-trace: */
203 unsigned long trace_idx; 245 unsigned long trace_idx;
@@ -262,10 +304,10 @@ extern void __ftrace_bad_type(void);
262 do { \ 304 do { \
263 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 305 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
264 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 306 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
265 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
266 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 307 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
267 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 308 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
268 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 309 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
310 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
269 IF_ASSIGN(var, ent, struct special_entry, 0); \ 311 IF_ASSIGN(var, ent, struct special_entry, 0); \
270 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 312 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
271 TRACE_MMIO_RW); \ 313 TRACE_MMIO_RW); \
@@ -279,7 +321,15 @@ extern void __ftrace_bad_type(void);
279 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 321 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
280 TRACE_GRAPH_RET); \ 322 TRACE_GRAPH_RET); \
281 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ 323 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
282 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ 324 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
325 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
326 TRACE_KMEM_ALLOC); \
327 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
328 TRACE_KMEM_FREE); \
329 IF_ASSIGN(var, ent, struct syscall_trace_enter, \
330 TRACE_SYSCALL_ENTER); \
331 IF_ASSIGN(var, ent, struct syscall_trace_exit, \
332 TRACE_SYSCALL_EXIT); \
283 __ftrace_bad_type(); \ 333 __ftrace_bad_type(); \
284 } while (0) 334 } while (0)
285 335
@@ -287,7 +337,8 @@ extern void __ftrace_bad_type(void);
287enum print_line_t { 337enum print_line_t {
288 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ 338 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
289 TRACE_TYPE_HANDLED = 1, 339 TRACE_TYPE_HANDLED = 1,
290 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ 340 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
341 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
291}; 342};
292 343
293 344
@@ -297,8 +348,8 @@ enum print_line_t {
297 * flags value in struct tracer_flags. 348 * flags value in struct tracer_flags.
298 */ 349 */
299struct tracer_opt { 350struct tracer_opt {
300 const char *name; /* Will appear on the trace_options file */ 351 const char *name; /* Will appear on the trace_options file */
301 u32 bit; /* Mask assigned in val field in tracer_flags */ 352 u32 bit; /* Mask assigned in val field in tracer_flags */
302}; 353};
303 354
304/* 355/*
@@ -307,28 +358,51 @@ struct tracer_opt {
307 */ 358 */
308struct tracer_flags { 359struct tracer_flags {
309 u32 val; 360 u32 val;
310 struct tracer_opt *opts; 361 struct tracer_opt *opts;
311}; 362};
312 363
313/* Makes more easy to define a tracer opt */ 364/* Makes more easy to define a tracer opt */
314#define TRACER_OPT(s, b) .name = #s, .bit = b 365#define TRACER_OPT(s, b) .name = #s, .bit = b
315 366
316/* 367
317 * A specific tracer, represented by methods that operate on a trace array: 368/**
369 * struct tracer - a specific tracer and its callbacks to interact with debugfs
370 * @name: the name chosen to select it on the available_tracers file
371 * @init: called when one switches to this tracer (echo name > current_tracer)
372 * @reset: called when one switches to another tracer
373 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
374 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
375 * @open: called when the trace file is opened
376 * @pipe_open: called when the trace_pipe file is opened
377 * @wait_pipe: override how the user waits for traces on trace_pipe
378 * @close: called when the trace file is released
379 * @read: override the default read callback on trace_pipe
380 * @splice_read: override the default splice_read callback on trace_pipe
381 * @selftest: selftest to run on boot (see trace_selftest.c)
382 * @print_headers: override the first lines that describe your columns
383 * @print_line: callback that prints a trace
384 * @set_flag: signals one of your private flags changed (trace_options file)
385 * @flags: your private flags
318 */ 386 */
319struct tracer { 387struct tracer {
320 const char *name; 388 const char *name;
321 /* Your tracer should raise a warning if init fails */
322 int (*init)(struct trace_array *tr); 389 int (*init)(struct trace_array *tr);
323 void (*reset)(struct trace_array *tr); 390 void (*reset)(struct trace_array *tr);
324 void (*start)(struct trace_array *tr); 391 void (*start)(struct trace_array *tr);
325 void (*stop)(struct trace_array *tr); 392 void (*stop)(struct trace_array *tr);
326 void (*open)(struct trace_iterator *iter); 393 void (*open)(struct trace_iterator *iter);
327 void (*pipe_open)(struct trace_iterator *iter); 394 void (*pipe_open)(struct trace_iterator *iter);
395 void (*wait_pipe)(struct trace_iterator *iter);
328 void (*close)(struct trace_iterator *iter); 396 void (*close)(struct trace_iterator *iter);
329 ssize_t (*read)(struct trace_iterator *iter, 397 ssize_t (*read)(struct trace_iterator *iter,
330 struct file *filp, char __user *ubuf, 398 struct file *filp, char __user *ubuf,
331 size_t cnt, loff_t *ppos); 399 size_t cnt, loff_t *ppos);
400 ssize_t (*splice_read)(struct trace_iterator *iter,
401 struct file *filp,
402 loff_t *ppos,
403 struct pipe_inode_info *pipe,
404 size_t len,
405 unsigned int flags);
332#ifdef CONFIG_FTRACE_STARTUP_TEST 406#ifdef CONFIG_FTRACE_STARTUP_TEST
333 int (*selftest)(struct tracer *trace, 407 int (*selftest)(struct tracer *trace,
334 struct trace_array *tr); 408 struct trace_array *tr);
@@ -339,7 +413,8 @@ struct tracer {
339 int (*set_flag)(u32 old_flags, u32 bit, int set); 413 int (*set_flag)(u32 old_flags, u32 bit, int set);
340 struct tracer *next; 414 struct tracer *next;
341 int print_max; 415 int print_max;
342 struct tracer_flags *flags; 416 struct tracer_flags *flags;
417 struct tracer_stat *stats;
343}; 418};
344 419
345struct trace_seq { 420struct trace_seq {
@@ -348,6 +423,16 @@ struct trace_seq {
348 unsigned int readpos; 423 unsigned int readpos;
349}; 424};
350 425
426static inline void
427trace_seq_init(struct trace_seq *s)
428{
429 s->len = 0;
430 s->readpos = 0;
431}
432
433
434#define TRACE_PIPE_ALL_CPU -1
435
351/* 436/*
352 * Trace iterator - used by printout routines who present trace 437 * Trace iterator - used by printout routines who present trace
353 * results to users and which routines might sleep, etc: 438 * results to users and which routines might sleep, etc:
@@ -356,6 +441,8 @@ struct trace_iterator {
356 struct trace_array *tr; 441 struct trace_array *tr;
357 struct tracer *trace; 442 struct tracer *trace;
358 void *private; 443 void *private;
444 int cpu_file;
445 struct mutex mutex;
359 struct ring_buffer_iter *buffer_iter[NR_CPUS]; 446 struct ring_buffer_iter *buffer_iter[NR_CPUS];
360 447
361 /* The below is zeroed out in pipe_read */ 448 /* The below is zeroed out in pipe_read */
@@ -371,6 +458,7 @@ struct trace_iterator {
371 cpumask_var_t started; 458 cpumask_var_t started;
372}; 459};
373 460
461int tracer_init(struct tracer *t, struct trace_array *tr);
374int tracing_is_enabled(void); 462int tracing_is_enabled(void);
375void trace_wake_up(void); 463void trace_wake_up(void);
376void tracing_reset(struct trace_array *tr, int cpu); 464void tracing_reset(struct trace_array *tr, int cpu);
@@ -379,26 +467,50 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
379struct dentry *tracing_init_dentry(void); 467struct dentry *tracing_init_dentry(void);
380void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 468void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
381 469
470struct ring_buffer_event;
471
472struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
473 unsigned char type,
474 unsigned long len,
475 unsigned long flags,
476 int pc);
477void trace_buffer_unlock_commit(struct trace_array *tr,
478 struct ring_buffer_event *event,
479 unsigned long flags, int pc);
480
481struct ring_buffer_event *
482trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
483 unsigned long flags, int pc);
484void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
485 unsigned long flags, int pc);
486void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
487 unsigned long flags, int pc);
488
382struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 489struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
383 struct trace_array_cpu *data); 490 struct trace_array_cpu *data);
491
492struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
493 int *ent_cpu, u64 *ent_ts);
494
384void tracing_generic_entry_update(struct trace_entry *entry, 495void tracing_generic_entry_update(struct trace_entry *entry,
385 unsigned long flags, 496 unsigned long flags,
386 int pc); 497 int pc);
387 498
499void default_wait_pipe(struct trace_iterator *iter);
500void poll_wait_pipe(struct trace_iterator *iter);
501
388void ftrace(struct trace_array *tr, 502void ftrace(struct trace_array *tr,
389 struct trace_array_cpu *data, 503 struct trace_array_cpu *data,
390 unsigned long ip, 504 unsigned long ip,
391 unsigned long parent_ip, 505 unsigned long parent_ip,
392 unsigned long flags, int pc); 506 unsigned long flags, int pc);
393void tracing_sched_switch_trace(struct trace_array *tr, 507void tracing_sched_switch_trace(struct trace_array *tr,
394 struct trace_array_cpu *data,
395 struct task_struct *prev, 508 struct task_struct *prev,
396 struct task_struct *next, 509 struct task_struct *next,
397 unsigned long flags, int pc); 510 unsigned long flags, int pc);
398void tracing_record_cmdline(struct task_struct *tsk); 511void tracing_record_cmdline(struct task_struct *tsk);
399 512
400void tracing_sched_wakeup_trace(struct trace_array *tr, 513void tracing_sched_wakeup_trace(struct trace_array *tr,
401 struct trace_array_cpu *data,
402 struct task_struct *wakee, 514 struct task_struct *wakee,
403 struct task_struct *cur, 515 struct task_struct *cur,
404 unsigned long flags, int pc); 516 unsigned long flags, int pc);
@@ -408,14 +520,12 @@ void trace_special(struct trace_array *tr,
408 unsigned long arg2, 520 unsigned long arg2,
409 unsigned long arg3, int pc); 521 unsigned long arg3, int pc);
410void trace_function(struct trace_array *tr, 522void trace_function(struct trace_array *tr,
411 struct trace_array_cpu *data,
412 unsigned long ip, 523 unsigned long ip,
413 unsigned long parent_ip, 524 unsigned long parent_ip,
414 unsigned long flags, int pc); 525 unsigned long flags, int pc);
415 526
416void trace_graph_return(struct ftrace_graph_ret *trace); 527void trace_graph_return(struct ftrace_graph_ret *trace);
417int trace_graph_entry(struct ftrace_graph_ent *trace); 528int trace_graph_entry(struct ftrace_graph_ent *trace);
418void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
419 529
420void tracing_start_cmdline_record(void); 530void tracing_start_cmdline_record(void);
421void tracing_stop_cmdline_record(void); 531void tracing_stop_cmdline_record(void);
@@ -434,15 +544,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
434void update_max_tr_single(struct trace_array *tr, 544void update_max_tr_single(struct trace_array *tr,
435 struct task_struct *tsk, int cpu); 545 struct task_struct *tsk, int cpu);
436 546
437extern cycle_t ftrace_now(int cpu); 547void __trace_stack(struct trace_array *tr,
548 unsigned long flags,
549 int skip, int pc);
438 550
439#ifdef CONFIG_FUNCTION_TRACER 551extern cycle_t ftrace_now(int cpu);
440void tracing_start_function_trace(void);
441void tracing_stop_function_trace(void);
442#else
443# define tracing_start_function_trace() do { } while (0)
444# define tracing_stop_function_trace() do { } while (0)
445#endif
446 552
447#ifdef CONFIG_CONTEXT_SWITCH_TRACER 553#ifdef CONFIG_CONTEXT_SWITCH_TRACER
448typedef void 554typedef void
@@ -456,10 +562,10 @@ struct tracer_switch_ops {
456 void *private; 562 void *private;
457 struct tracer_switch_ops *next; 563 struct tracer_switch_ops *next;
458}; 564};
459
460char *trace_find_cmdline(int pid);
461#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 565#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
462 566
567extern void trace_find_cmdline(int pid, char comm[]);
568
463#ifdef CONFIG_DYNAMIC_FTRACE 569#ifdef CONFIG_DYNAMIC_FTRACE
464extern unsigned long ftrace_update_tot_cnt; 570extern unsigned long ftrace_update_tot_cnt;
465#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 571#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
@@ -469,6 +575,8 @@ extern int DYN_FTRACE_TEST_NAME(void);
469#ifdef CONFIG_FTRACE_STARTUP_TEST 575#ifdef CONFIG_FTRACE_STARTUP_TEST
470extern int trace_selftest_startup_function(struct tracer *trace, 576extern int trace_selftest_startup_function(struct tracer *trace,
471 struct trace_array *tr); 577 struct trace_array *tr);
578extern int trace_selftest_startup_function_graph(struct tracer *trace,
579 struct trace_array *tr);
472extern int trace_selftest_startup_irqsoff(struct tracer *trace, 580extern int trace_selftest_startup_irqsoff(struct tracer *trace,
473 struct trace_array *tr); 581 struct trace_array *tr);
474extern int trace_selftest_startup_preemptoff(struct tracer *trace, 582extern int trace_selftest_startup_preemptoff(struct tracer *trace,
@@ -488,18 +596,11 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
488#endif /* CONFIG_FTRACE_STARTUP_TEST */ 596#endif /* CONFIG_FTRACE_STARTUP_TEST */
489 597
490extern void *head_page(struct trace_array_cpu *data); 598extern void *head_page(struct trace_array_cpu *data);
491extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
492extern void trace_seq_print_cont(struct trace_seq *s,
493 struct trace_iterator *iter);
494
495extern int
496seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
497 unsigned long sym_flags);
498extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
499 size_t cnt);
500extern long ns2usecs(cycle_t nsec); 599extern long ns2usecs(cycle_t nsec);
501extern int 600extern int
502trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); 601trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
602extern int
603trace_vprintk(unsigned long ip, const char *fmt, va_list args);
503 604
504extern unsigned long trace_flags; 605extern unsigned long trace_flags;
505 606
@@ -580,7 +681,11 @@ enum trace_iterator_flags {
580 TRACE_ITER_ANNOTATE = 0x2000, 681 TRACE_ITER_ANNOTATE = 0x2000,
581 TRACE_ITER_USERSTACKTRACE = 0x4000, 682 TRACE_ITER_USERSTACKTRACE = 0x4000,
582 TRACE_ITER_SYM_USEROBJ = 0x8000, 683 TRACE_ITER_SYM_USEROBJ = 0x8000,
583 TRACE_ITER_PRINTK_MSGONLY = 0x10000 684 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
685 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
686 TRACE_ITER_LATENCY_FMT = 0x40000,
687 TRACE_ITER_GLOBAL_CLK = 0x80000,
688 TRACE_ITER_SLEEP_TIME = 0x100000,
584}; 689};
585 690
586/* 691/*
@@ -601,12 +706,12 @@ extern struct tracer nop_trace;
601 * preempt_enable (after a disable), a schedule might take place 706 * preempt_enable (after a disable), a schedule might take place
602 * causing an infinite recursion. 707 * causing an infinite recursion.
603 * 708 *
604 * To prevent this, we read the need_recshed flag before 709 * To prevent this, we read the need_resched flag before
605 * disabling preemption. When we want to enable preemption we 710 * disabling preemption. When we want to enable preemption we
606 * check the flag, if it is set, then we call preempt_enable_no_resched. 711 * check the flag, if it is set, then we call preempt_enable_no_resched.
607 * Otherwise, we call preempt_enable. 712 * Otherwise, we call preempt_enable.
608 * 713 *
609 * The rational for doing the above is that if need resched is set 714 * The rational for doing the above is that if need_resched is set
610 * and we have yet to reschedule, we are either in an atomic location 715 * and we have yet to reschedule, we are either in an atomic location
611 * (where we do not need to check for scheduling) or we are inside 716 * (where we do not need to check for scheduling) or we are inside
612 * the scheduler and do not want to resched. 717 * the scheduler and do not want to resched.
@@ -627,7 +732,7 @@ static inline int ftrace_preempt_disable(void)
627 * 732 *
628 * This is a scheduler safe way to enable preemption and not miss 733 * This is a scheduler safe way to enable preemption and not miss
629 * any preemption checks. The disabled saved the state of preemption. 734 * any preemption checks. The disabled saved the state of preemption.
630 * If resched is set, then we were either inside an atomic or 735 * If resched is set, then we are either inside an atomic or
631 * are inside the scheduler (we would have already scheduled 736 * are inside the scheduler (we would have already scheduled
632 * otherwise). In this case, we do not want to call normal 737 * otherwise). In this case, we do not want to call normal
633 * preempt_enable, but preempt_enable_no_resched instead. 738 * preempt_enable, but preempt_enable_no_resched instead.
@@ -664,4 +769,118 @@ static inline void trace_branch_disable(void)
664} 769}
665#endif /* CONFIG_BRANCH_TRACER */ 770#endif /* CONFIG_BRANCH_TRACER */
666 771
772/* set ring buffers to default size if not already done so */
773int tracing_update_buffers(void);
774
775/* trace event type bit fields, not numeric */
776enum {
777 TRACE_EVENT_TYPE_PRINTF = 1,
778 TRACE_EVENT_TYPE_RAW = 2,
779};
780
781struct ftrace_event_field {
782 struct list_head link;
783 char *name;
784 char *type;
785 int offset;
786 int size;
787};
788
789struct ftrace_event_call {
790 char *name;
791 char *system;
792 struct dentry *dir;
793 int enabled;
794 int (*regfunc)(void);
795 void (*unregfunc)(void);
796 int id;
797 int (*raw_init)(void);
798 int (*show_format)(struct trace_seq *s);
799 int (*define_fields)(void);
800 struct list_head fields;
801 struct filter_pred **preds;
802
803#ifdef CONFIG_EVENT_PROFILE
804 atomic_t profile_count;
805 int (*profile_enable)(struct ftrace_event_call *);
806 void (*profile_disable)(struct ftrace_event_call *);
807#endif
808};
809
810struct event_subsystem {
811 struct list_head list;
812 const char *name;
813 struct dentry *entry;
814 struct filter_pred **preds;
815};
816
817#define events_for_each(event) \
818 for (event = __start_ftrace_events; \
819 (unsigned long)event < (unsigned long)__stop_ftrace_events; \
820 event++)
821
822#define MAX_FILTER_PRED 8
823
824struct filter_pred;
825
826typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
827
828struct filter_pred {
829 filter_pred_fn_t fn;
830 u64 val;
831 char *str_val;
832 int str_len;
833 char *field_name;
834 int offset;
835 int not;
836 int or;
837 int compound;
838 int clear;
839};
840
841int trace_define_field(struct ftrace_event_call *call, char *type,
842 char *name, int offset, int size);
843extern void filter_free_pred(struct filter_pred *pred);
844extern void filter_print_preds(struct filter_pred **preds,
845 struct trace_seq *s);
846extern int filter_parse(char **pbuf, struct filter_pred *pred);
847extern int filter_add_pred(struct ftrace_event_call *call,
848 struct filter_pred *pred);
849extern void filter_free_preds(struct ftrace_event_call *call);
850extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
851extern void filter_free_subsystem_preds(struct event_subsystem *system);
852extern int filter_add_subsystem_pred(struct event_subsystem *system,
853 struct filter_pred *pred);
854
855void event_trace_printk(unsigned long ip, const char *fmt, ...);
856extern struct ftrace_event_call __start_ftrace_events[];
857extern struct ftrace_event_call __stop_ftrace_events[];
858
859#define for_each_event(event) \
860 for (event = __start_ftrace_events; \
861 (unsigned long)event < (unsigned long)__stop_ftrace_events; \
862 event++)
863
864extern const char *__start___trace_bprintk_fmt[];
865extern const char *__stop___trace_bprintk_fmt[];
866
867/*
868 * The double __builtin_constant_p is because gcc will give us an error
869 * if we try to allocate the static variable to fmt if it is not a
870 * constant. Even with the outer if statement optimizing out.
871 */
872#define event_trace_printk(ip, fmt, args...) \
873do { \
874 __trace_printk_check_format(fmt, ##args); \
875 tracing_record_cmdline(current); \
876 if (__builtin_constant_p(fmt)) { \
877 static const char *trace_printk_fmt \
878 __attribute__((section("__trace_printk_fmt"))) = \
879 __builtin_constant_p(fmt) ? fmt : NULL; \
880 \
881 __trace_bprintk(ip, trace_printk_fmt, ##args); \
882 } else \
883 __trace_printk(ip, fmt, ##args); \
884} while (0)
885
667#endif /* _LINUX_KERNEL_TRACE_H */ 886#endif /* _LINUX_KERNEL_TRACE_H */