aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.h')
-rw-r--r--kernel/trace/trace.h321
1 files changed, 273 insertions, 48 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4d3d381bfd95..e685ac2b2ba1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -9,6 +9,8 @@
9#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/ftrace.h> 10#include <linux/ftrace.h>
11#include <trace/boot.h> 11#include <trace/boot.h>
12#include <trace/kmemtrace.h>
13#include <trace/power.h>
12 14
13enum trace_type { 15enum trace_type {
14 __TRACE_FIRST_TYPE = 0, 16 __TRACE_FIRST_TYPE = 0,
@@ -16,9 +18,9 @@ enum trace_type {
16 TRACE_FN, 18 TRACE_FN,
17 TRACE_CTX, 19 TRACE_CTX,
18 TRACE_WAKE, 20 TRACE_WAKE,
19 TRACE_CONT,
20 TRACE_STACK, 21 TRACE_STACK,
21 TRACE_PRINT, 22 TRACE_PRINT,
23 TRACE_BPRINT,
22 TRACE_SPECIAL, 24 TRACE_SPECIAL,
23 TRACE_MMIO_RW, 25 TRACE_MMIO_RW,
24 TRACE_MMIO_MAP, 26 TRACE_MMIO_MAP,
@@ -29,9 +31,14 @@ enum trace_type {
29 TRACE_GRAPH_ENT, 31 TRACE_GRAPH_ENT,
30 TRACE_USER_STACK, 32 TRACE_USER_STACK,
31 TRACE_HW_BRANCHES, 33 TRACE_HW_BRANCHES,
34 TRACE_SYSCALL_ENTER,
35 TRACE_SYSCALL_EXIT,
36 TRACE_KMEM_ALLOC,
37 TRACE_KMEM_FREE,
32 TRACE_POWER, 38 TRACE_POWER,
39 TRACE_BLK,
33 40
34 __TRACE_LAST_TYPE 41 __TRACE_LAST_TYPE,
35}; 42};
36 43
37/* 44/*
@@ -42,7 +49,6 @@ enum trace_type {
42 */ 49 */
43struct trace_entry { 50struct trace_entry {
44 unsigned char type; 51 unsigned char type;
45 unsigned char cpu;
46 unsigned char flags; 52 unsigned char flags;
47 unsigned char preempt_count; 53 unsigned char preempt_count;
48 int pid; 54 int pid;
@@ -60,13 +66,13 @@ struct ftrace_entry {
60 66
61/* Function call entry */ 67/* Function call entry */
62struct ftrace_graph_ent_entry { 68struct ftrace_graph_ent_entry {
63 struct trace_entry ent; 69 struct trace_entry ent;
64 struct ftrace_graph_ent graph_ent; 70 struct ftrace_graph_ent graph_ent;
65}; 71};
66 72
67/* Function return entry */ 73/* Function return entry */
68struct ftrace_graph_ret_entry { 74struct ftrace_graph_ret_entry {
69 struct trace_entry ent; 75 struct trace_entry ent;
70 struct ftrace_graph_ret ret; 76 struct ftrace_graph_ret ret;
71}; 77};
72extern struct tracer boot_tracer; 78extern struct tracer boot_tracer;
@@ -112,12 +118,18 @@ struct userstack_entry {
112}; 118};
113 119
114/* 120/*
115 * ftrace_printk entry: 121 * trace_printk entry:
116 */ 122 */
123struct bprint_entry {
124 struct trace_entry ent;
125 unsigned long ip;
126 const char *fmt;
127 u32 buf[];
128};
129
117struct print_entry { 130struct print_entry {
118 struct trace_entry ent; 131 struct trace_entry ent;
119 unsigned long ip; 132 unsigned long ip;
120 int depth;
121 char buf[]; 133 char buf[];
122}; 134};
123 135
@@ -170,15 +182,51 @@ struct trace_power {
170 struct power_trace state_data; 182 struct power_trace state_data;
171}; 183};
172 184
185enum kmemtrace_type_id {
186 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
187 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
188 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
189};
190
191struct kmemtrace_alloc_entry {
192 struct trace_entry ent;
193 enum kmemtrace_type_id type_id;
194 unsigned long call_site;
195 const void *ptr;
196 size_t bytes_req;
197 size_t bytes_alloc;
198 gfp_t gfp_flags;
199 int node;
200};
201
202struct kmemtrace_free_entry {
203 struct trace_entry ent;
204 enum kmemtrace_type_id type_id;
205 unsigned long call_site;
206 const void *ptr;
207};
208
209struct syscall_trace_enter {
210 struct trace_entry ent;
211 int nr;
212 unsigned long args[];
213};
214
215struct syscall_trace_exit {
216 struct trace_entry ent;
217 int nr;
218 unsigned long ret;
219};
220
221
173/* 222/*
174 * trace_flag_type is an enumeration that holds different 223 * trace_flag_type is an enumeration that holds different
175 * states when a trace occurs. These are: 224 * states when a trace occurs. These are:
176 * IRQS_OFF - interrupts were disabled 225 * IRQS_OFF - interrupts were disabled
177 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 226 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
178 * NEED_RESCED - reschedule is requested 227 * NEED_RESCED - reschedule is requested
179 * HARDIRQ - inside an interrupt handler 228 * HARDIRQ - inside an interrupt handler
180 * SOFTIRQ - inside a softirq handler 229 * SOFTIRQ - inside a softirq handler
181 * CONT - multiple entries hold the trace item
182 */ 230 */
183enum trace_flag_type { 231enum trace_flag_type {
184 TRACE_FLAG_IRQS_OFF = 0x01, 232 TRACE_FLAG_IRQS_OFF = 0x01,
@@ -186,7 +234,6 @@ enum trace_flag_type {
186 TRACE_FLAG_NEED_RESCHED = 0x04, 234 TRACE_FLAG_NEED_RESCHED = 0x04,
187 TRACE_FLAG_HARDIRQ = 0x08, 235 TRACE_FLAG_HARDIRQ = 0x08,
188 TRACE_FLAG_SOFTIRQ = 0x10, 236 TRACE_FLAG_SOFTIRQ = 0x10,
189 TRACE_FLAG_CONT = 0x20,
190}; 237};
191 238
192#define TRACE_BUF_SIZE 1024 239#define TRACE_BUF_SIZE 1024
@@ -198,6 +245,7 @@ enum trace_flag_type {
198 */ 245 */
199struct trace_array_cpu { 246struct trace_array_cpu {
200 atomic_t disabled; 247 atomic_t disabled;
248 void *buffer_page; /* ring buffer spare */
201 249
202 /* these fields get copied into max-trace: */ 250 /* these fields get copied into max-trace: */
203 unsigned long trace_idx; 251 unsigned long trace_idx;
@@ -262,10 +310,10 @@ extern void __ftrace_bad_type(void);
262 do { \ 310 do { \
263 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 311 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
264 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 312 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
265 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
266 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 313 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
267 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 314 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
268 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 315 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
316 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
269 IF_ASSIGN(var, ent, struct special_entry, 0); \ 317 IF_ASSIGN(var, ent, struct special_entry, 0); \
270 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 318 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
271 TRACE_MMIO_RW); \ 319 TRACE_MMIO_RW); \
@@ -279,7 +327,15 @@ extern void __ftrace_bad_type(void);
279 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 327 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
280 TRACE_GRAPH_RET); \ 328 TRACE_GRAPH_RET); \
281 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ 329 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
282 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ 330 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
331 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
332 TRACE_KMEM_ALLOC); \
333 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
334 TRACE_KMEM_FREE); \
335 IF_ASSIGN(var, ent, struct syscall_trace_enter, \
336 TRACE_SYSCALL_ENTER); \
337 IF_ASSIGN(var, ent, struct syscall_trace_exit, \
338 TRACE_SYSCALL_EXIT); \
283 __ftrace_bad_type(); \ 339 __ftrace_bad_type(); \
284 } while (0) 340 } while (0)
285 341
@@ -287,7 +343,8 @@ extern void __ftrace_bad_type(void);
287enum print_line_t { 343enum print_line_t {
288 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ 344 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
289 TRACE_TYPE_HANDLED = 1, 345 TRACE_TYPE_HANDLED = 1,
290 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ 346 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
347 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
291}; 348};
292 349
293 350
@@ -297,8 +354,8 @@ enum print_line_t {
297 * flags value in struct tracer_flags. 354 * flags value in struct tracer_flags.
298 */ 355 */
299struct tracer_opt { 356struct tracer_opt {
300 const char *name; /* Will appear on the trace_options file */ 357 const char *name; /* Will appear on the trace_options file */
301 u32 bit; /* Mask assigned in val field in tracer_flags */ 358 u32 bit; /* Mask assigned in val field in tracer_flags */
302}; 359};
303 360
304/* 361/*
@@ -307,28 +364,51 @@ struct tracer_opt {
307 */ 364 */
308struct tracer_flags { 365struct tracer_flags {
309 u32 val; 366 u32 val;
310 struct tracer_opt *opts; 367 struct tracer_opt *opts;
311}; 368};
312 369
313/* Makes more easy to define a tracer opt */ 370/* Makes more easy to define a tracer opt */
314#define TRACER_OPT(s, b) .name = #s, .bit = b 371#define TRACER_OPT(s, b) .name = #s, .bit = b
315 372
316/* 373
317 * A specific tracer, represented by methods that operate on a trace array: 374/**
375 * struct tracer - a specific tracer and its callbacks to interact with debugfs
376 * @name: the name chosen to select it on the available_tracers file
377 * @init: called when one switches to this tracer (echo name > current_tracer)
378 * @reset: called when one switches to another tracer
379 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
380 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
381 * @open: called when the trace file is opened
382 * @pipe_open: called when the trace_pipe file is opened
383 * @wait_pipe: override how the user waits for traces on trace_pipe
384 * @close: called when the trace file is released
385 * @read: override the default read callback on trace_pipe
386 * @splice_read: override the default splice_read callback on trace_pipe
387 * @selftest: selftest to run on boot (see trace_selftest.c)
388 * @print_headers: override the first lines that describe your columns
389 * @print_line: callback that prints a trace
390 * @set_flag: signals one of your private flags changed (trace_options file)
391 * @flags: your private flags
318 */ 392 */
319struct tracer { 393struct tracer {
320 const char *name; 394 const char *name;
321 /* Your tracer should raise a warning if init fails */
322 int (*init)(struct trace_array *tr); 395 int (*init)(struct trace_array *tr);
323 void (*reset)(struct trace_array *tr); 396 void (*reset)(struct trace_array *tr);
324 void (*start)(struct trace_array *tr); 397 void (*start)(struct trace_array *tr);
325 void (*stop)(struct trace_array *tr); 398 void (*stop)(struct trace_array *tr);
326 void (*open)(struct trace_iterator *iter); 399 void (*open)(struct trace_iterator *iter);
327 void (*pipe_open)(struct trace_iterator *iter); 400 void (*pipe_open)(struct trace_iterator *iter);
401 void (*wait_pipe)(struct trace_iterator *iter);
328 void (*close)(struct trace_iterator *iter); 402 void (*close)(struct trace_iterator *iter);
329 ssize_t (*read)(struct trace_iterator *iter, 403 ssize_t (*read)(struct trace_iterator *iter,
330 struct file *filp, char __user *ubuf, 404 struct file *filp, char __user *ubuf,
331 size_t cnt, loff_t *ppos); 405 size_t cnt, loff_t *ppos);
406 ssize_t (*splice_read)(struct trace_iterator *iter,
407 struct file *filp,
408 loff_t *ppos,
409 struct pipe_inode_info *pipe,
410 size_t len,
411 unsigned int flags);
332#ifdef CONFIG_FTRACE_STARTUP_TEST 412#ifdef CONFIG_FTRACE_STARTUP_TEST
333 int (*selftest)(struct tracer *trace, 413 int (*selftest)(struct tracer *trace,
334 struct trace_array *tr); 414 struct trace_array *tr);
@@ -339,7 +419,8 @@ struct tracer {
339 int (*set_flag)(u32 old_flags, u32 bit, int set); 419 int (*set_flag)(u32 old_flags, u32 bit, int set);
340 struct tracer *next; 420 struct tracer *next;
341 int print_max; 421 int print_max;
342 struct tracer_flags *flags; 422 struct tracer_flags *flags;
423 struct tracer_stat *stats;
343}; 424};
344 425
345struct trace_seq { 426struct trace_seq {
@@ -348,6 +429,16 @@ struct trace_seq {
348 unsigned int readpos; 429 unsigned int readpos;
349}; 430};
350 431
432static inline void
433trace_seq_init(struct trace_seq *s)
434{
435 s->len = 0;
436 s->readpos = 0;
437}
438
439
440#define TRACE_PIPE_ALL_CPU -1
441
351/* 442/*
352 * Trace iterator - used by printout routines who present trace 443 * Trace iterator - used by printout routines who present trace
353 * results to users and which routines might sleep, etc: 444 * results to users and which routines might sleep, etc:
@@ -356,6 +447,8 @@ struct trace_iterator {
356 struct trace_array *tr; 447 struct trace_array *tr;
357 struct tracer *trace; 448 struct tracer *trace;
358 void *private; 449 void *private;
450 int cpu_file;
451 struct mutex mutex;
359 struct ring_buffer_iter *buffer_iter[NR_CPUS]; 452 struct ring_buffer_iter *buffer_iter[NR_CPUS];
360 453
361 /* The below is zeroed out in pipe_read */ 454 /* The below is zeroed out in pipe_read */
@@ -371,6 +464,7 @@ struct trace_iterator {
371 cpumask_var_t started; 464 cpumask_var_t started;
372}; 465};
373 466
467int tracer_init(struct tracer *t, struct trace_array *tr);
374int tracing_is_enabled(void); 468int tracing_is_enabled(void);
375void trace_wake_up(void); 469void trace_wake_up(void);
376void tracing_reset(struct trace_array *tr, int cpu); 470void tracing_reset(struct trace_array *tr, int cpu);
@@ -379,26 +473,50 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
379struct dentry *tracing_init_dentry(void); 473struct dentry *tracing_init_dentry(void);
380void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 474void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
381 475
476struct ring_buffer_event;
477
478struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
479 unsigned char type,
480 unsigned long len,
481 unsigned long flags,
482 int pc);
483void trace_buffer_unlock_commit(struct trace_array *tr,
484 struct ring_buffer_event *event,
485 unsigned long flags, int pc);
486
487struct ring_buffer_event *
488trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
489 unsigned long flags, int pc);
490void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
491 unsigned long flags, int pc);
492void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
493 unsigned long flags, int pc);
494
382struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 495struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
383 struct trace_array_cpu *data); 496 struct trace_array_cpu *data);
497
498struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
499 int *ent_cpu, u64 *ent_ts);
500
384void tracing_generic_entry_update(struct trace_entry *entry, 501void tracing_generic_entry_update(struct trace_entry *entry,
385 unsigned long flags, 502 unsigned long flags,
386 int pc); 503 int pc);
387 504
505void default_wait_pipe(struct trace_iterator *iter);
506void poll_wait_pipe(struct trace_iterator *iter);
507
388void ftrace(struct trace_array *tr, 508void ftrace(struct trace_array *tr,
389 struct trace_array_cpu *data, 509 struct trace_array_cpu *data,
390 unsigned long ip, 510 unsigned long ip,
391 unsigned long parent_ip, 511 unsigned long parent_ip,
392 unsigned long flags, int pc); 512 unsigned long flags, int pc);
393void tracing_sched_switch_trace(struct trace_array *tr, 513void tracing_sched_switch_trace(struct trace_array *tr,
394 struct trace_array_cpu *data,
395 struct task_struct *prev, 514 struct task_struct *prev,
396 struct task_struct *next, 515 struct task_struct *next,
397 unsigned long flags, int pc); 516 unsigned long flags, int pc);
398void tracing_record_cmdline(struct task_struct *tsk); 517void tracing_record_cmdline(struct task_struct *tsk);
399 518
400void tracing_sched_wakeup_trace(struct trace_array *tr, 519void tracing_sched_wakeup_trace(struct trace_array *tr,
401 struct trace_array_cpu *data,
402 struct task_struct *wakee, 520 struct task_struct *wakee,
403 struct task_struct *cur, 521 struct task_struct *cur,
404 unsigned long flags, int pc); 522 unsigned long flags, int pc);
@@ -408,14 +526,12 @@ void trace_special(struct trace_array *tr,
408 unsigned long arg2, 526 unsigned long arg2,
409 unsigned long arg3, int pc); 527 unsigned long arg3, int pc);
410void trace_function(struct trace_array *tr, 528void trace_function(struct trace_array *tr,
411 struct trace_array_cpu *data,
412 unsigned long ip, 529 unsigned long ip,
413 unsigned long parent_ip, 530 unsigned long parent_ip,
414 unsigned long flags, int pc); 531 unsigned long flags, int pc);
415 532
416void trace_graph_return(struct ftrace_graph_ret *trace); 533void trace_graph_return(struct ftrace_graph_ret *trace);
417int trace_graph_entry(struct ftrace_graph_ent *trace); 534int trace_graph_entry(struct ftrace_graph_ent *trace);
418void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
419 535
420void tracing_start_cmdline_record(void); 536void tracing_start_cmdline_record(void);
421void tracing_stop_cmdline_record(void); 537void tracing_stop_cmdline_record(void);
@@ -434,15 +550,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
434void update_max_tr_single(struct trace_array *tr, 550void update_max_tr_single(struct trace_array *tr,
435 struct task_struct *tsk, int cpu); 551 struct task_struct *tsk, int cpu);
436 552
437extern cycle_t ftrace_now(int cpu); 553void __trace_stack(struct trace_array *tr,
554 unsigned long flags,
555 int skip, int pc);
438 556
439#ifdef CONFIG_FUNCTION_TRACER 557extern cycle_t ftrace_now(int cpu);
440void tracing_start_function_trace(void);
441void tracing_stop_function_trace(void);
442#else
443# define tracing_start_function_trace() do { } while (0)
444# define tracing_stop_function_trace() do { } while (0)
445#endif
446 558
447#ifdef CONFIG_CONTEXT_SWITCH_TRACER 559#ifdef CONFIG_CONTEXT_SWITCH_TRACER
448typedef void 560typedef void
@@ -456,10 +568,10 @@ struct tracer_switch_ops {
456 void *private; 568 void *private;
457 struct tracer_switch_ops *next; 569 struct tracer_switch_ops *next;
458}; 570};
459
460char *trace_find_cmdline(int pid);
461#endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 571#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
462 572
573extern void trace_find_cmdline(int pid, char comm[]);
574
463#ifdef CONFIG_DYNAMIC_FTRACE 575#ifdef CONFIG_DYNAMIC_FTRACE
464extern unsigned long ftrace_update_tot_cnt; 576extern unsigned long ftrace_update_tot_cnt;
465#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 577#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
@@ -469,6 +581,8 @@ extern int DYN_FTRACE_TEST_NAME(void);
469#ifdef CONFIG_FTRACE_STARTUP_TEST 581#ifdef CONFIG_FTRACE_STARTUP_TEST
470extern int trace_selftest_startup_function(struct tracer *trace, 582extern int trace_selftest_startup_function(struct tracer *trace,
471 struct trace_array *tr); 583 struct trace_array *tr);
584extern int trace_selftest_startup_function_graph(struct tracer *trace,
585 struct trace_array *tr);
472extern int trace_selftest_startup_irqsoff(struct tracer *trace, 586extern int trace_selftest_startup_irqsoff(struct tracer *trace,
473 struct trace_array *tr); 587 struct trace_array *tr);
474extern int trace_selftest_startup_preemptoff(struct tracer *trace, 588extern int trace_selftest_startup_preemptoff(struct tracer *trace,
@@ -488,18 +602,11 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
488#endif /* CONFIG_FTRACE_STARTUP_TEST */ 602#endif /* CONFIG_FTRACE_STARTUP_TEST */
489 603
490extern void *head_page(struct trace_array_cpu *data); 604extern void *head_page(struct trace_array_cpu *data);
491extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); 605extern unsigned long long ns2usecs(cycle_t nsec);
492extern void trace_seq_print_cont(struct trace_seq *s,
493 struct trace_iterator *iter);
494
495extern int 606extern int
496seq_print_ip_sym(struct trace_seq *s, unsigned long ip, 607trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
497 unsigned long sym_flags);
498extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
499 size_t cnt);
500extern long ns2usecs(cycle_t nsec);
501extern int 608extern int
502trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); 609trace_vprintk(unsigned long ip, const char *fmt, va_list args);
503 610
504extern unsigned long trace_flags; 611extern unsigned long trace_flags;
505 612
@@ -580,7 +687,11 @@ enum trace_iterator_flags {
580 TRACE_ITER_ANNOTATE = 0x2000, 687 TRACE_ITER_ANNOTATE = 0x2000,
581 TRACE_ITER_USERSTACKTRACE = 0x4000, 688 TRACE_ITER_USERSTACKTRACE = 0x4000,
582 TRACE_ITER_SYM_USEROBJ = 0x8000, 689 TRACE_ITER_SYM_USEROBJ = 0x8000,
583 TRACE_ITER_PRINTK_MSGONLY = 0x10000 690 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
691 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
692 TRACE_ITER_LATENCY_FMT = 0x40000,
693 TRACE_ITER_GLOBAL_CLK = 0x80000,
694 TRACE_ITER_SLEEP_TIME = 0x100000,
584}; 695};
585 696
586/* 697/*
@@ -601,12 +712,12 @@ extern struct tracer nop_trace;
601 * preempt_enable (after a disable), a schedule might take place 712 * preempt_enable (after a disable), a schedule might take place
602 * causing an infinite recursion. 713 * causing an infinite recursion.
603 * 714 *
604 * To prevent this, we read the need_recshed flag before 715 * To prevent this, we read the need_resched flag before
605 * disabling preemption. When we want to enable preemption we 716 * disabling preemption. When we want to enable preemption we
606 * check the flag, if it is set, then we call preempt_enable_no_resched. 717 * check the flag, if it is set, then we call preempt_enable_no_resched.
607 * Otherwise, we call preempt_enable. 718 * Otherwise, we call preempt_enable.
608 * 719 *
609 * The rational for doing the above is that if need resched is set 720 * The rational for doing the above is that if need_resched is set
610 * and we have yet to reschedule, we are either in an atomic location 721 * and we have yet to reschedule, we are either in an atomic location
611 * (where we do not need to check for scheduling) or we are inside 722 * (where we do not need to check for scheduling) or we are inside
612 * the scheduler and do not want to resched. 723 * the scheduler and do not want to resched.
@@ -627,7 +738,7 @@ static inline int ftrace_preempt_disable(void)
627 * 738 *
628 * This is a scheduler safe way to enable preemption and not miss 739 * This is a scheduler safe way to enable preemption and not miss
629 * any preemption checks. The disabled saved the state of preemption. 740 * any preemption checks. The disabled saved the state of preemption.
630 * If resched is set, then we were either inside an atomic or 741 * If resched is set, then we are either inside an atomic or
631 * are inside the scheduler (we would have already scheduled 742 * are inside the scheduler (we would have already scheduled
632 * otherwise). In this case, we do not want to call normal 743 * otherwise). In this case, we do not want to call normal
633 * preempt_enable, but preempt_enable_no_resched instead. 744 * preempt_enable, but preempt_enable_no_resched instead.
@@ -664,4 +775,118 @@ static inline void trace_branch_disable(void)
664} 775}
665#endif /* CONFIG_BRANCH_TRACER */ 776#endif /* CONFIG_BRANCH_TRACER */
666 777
778/* set ring buffers to default size if not already done so */
779int tracing_update_buffers(void);
780
781/* trace event type bit fields, not numeric */
782enum {
783 TRACE_EVENT_TYPE_PRINTF = 1,
784 TRACE_EVENT_TYPE_RAW = 2,
785};
786
787struct ftrace_event_field {
788 struct list_head link;
789 char *name;
790 char *type;
791 int offset;
792 int size;
793};
794
795struct ftrace_event_call {
796 char *name;
797 char *system;
798 struct dentry *dir;
799 int enabled;
800 int (*regfunc)(void);
801 void (*unregfunc)(void);
802 int id;
803 int (*raw_init)(void);
804 int (*show_format)(struct trace_seq *s);
805 int (*define_fields)(void);
806 struct list_head fields;
807 struct filter_pred **preds;
808
809#ifdef CONFIG_EVENT_PROFILE
810 atomic_t profile_count;
811 int (*profile_enable)(struct ftrace_event_call *);
812 void (*profile_disable)(struct ftrace_event_call *);
813#endif
814};
815
816struct event_subsystem {
817 struct list_head list;
818 const char *name;
819 struct dentry *entry;
820 struct filter_pred **preds;
821};
822
823#define events_for_each(event) \
824 for (event = __start_ftrace_events; \
825 (unsigned long)event < (unsigned long)__stop_ftrace_events; \
826 event++)
827
828#define MAX_FILTER_PRED 8
829
830struct filter_pred;
831
832typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
833
834struct filter_pred {
835 filter_pred_fn_t fn;
836 u64 val;
837 char *str_val;
838 int str_len;
839 char *field_name;
840 int offset;
841 int not;
842 int or;
843 int compound;
844 int clear;
845};
846
847int trace_define_field(struct ftrace_event_call *call, char *type,
848 char *name, int offset, int size);
849extern void filter_free_pred(struct filter_pred *pred);
850extern void filter_print_preds(struct filter_pred **preds,
851 struct trace_seq *s);
852extern int filter_parse(char **pbuf, struct filter_pred *pred);
853extern int filter_add_pred(struct ftrace_event_call *call,
854 struct filter_pred *pred);
855extern void filter_free_preds(struct ftrace_event_call *call);
856extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
857extern void filter_free_subsystem_preds(struct event_subsystem *system);
858extern int filter_add_subsystem_pred(struct event_subsystem *system,
859 struct filter_pred *pred);
860
861void event_trace_printk(unsigned long ip, const char *fmt, ...);
862extern struct ftrace_event_call __start_ftrace_events[];
863extern struct ftrace_event_call __stop_ftrace_events[];
864
865#define for_each_event(event) \
866 for (event = __start_ftrace_events; \
867 (unsigned long)event < (unsigned long)__stop_ftrace_events; \
868 event++)
869
870extern const char *__start___trace_bprintk_fmt[];
871extern const char *__stop___trace_bprintk_fmt[];
872
873/*
874 * The double __builtin_constant_p is because gcc will give us an error
875 * if we try to allocate the static variable to fmt if it is not a
876 * constant. Even with the outer if statement optimizing out.
877 */
878#define event_trace_printk(ip, fmt, args...) \
879do { \
880 __trace_printk_check_format(fmt, ##args); \
881 tracing_record_cmdline(current); \
882 if (__builtin_constant_p(fmt)) { \
883 static const char *trace_printk_fmt \
884 __attribute__((section("__trace_printk_fmt"))) = \
885 __builtin_constant_p(fmt) ? fmt : NULL; \
886 \
887 __trace_bprintk(ip, trace_printk_fmt, ##args); \
888 } else \
889 __trace_printk(ip, fmt, ##args); \
890} while (0)
891
667#endif /* _LINUX_KERNEL_TRACE_H */ 892#endif /* _LINUX_KERNEL_TRACE_H */