aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 16:55:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 16:55:38 -0400
commit9e8529afc4518f4e5d610001545ebc97e1333c79 (patch)
tree26e1aa2cbb50f3f511cfa7d8e39e6b7bd9221b68 /include/trace
parentec25e246b94a3233ab064994ef05a170bdba0e7c (diff)
parent4c69e6ea415a35eb7f0fc8ee9390c8f7436492a2 (diff)
Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Along with the usual minor fixes and clean ups there are a few major changes with this pull request. 1) Multiple buffers for the ftrace facility This feature has been requested by many people over the last few years. I even heard that Google was about to implement it themselves. I finally had time and cleaned up the code such that you can now create multiple instances of the ftrace buffer and have different events go to different buffers. This way, a low frequency event will not be lost in the noise of a high frequency event. Note, currently only events can go to different buffers, the tracers (ie function, function_graph and the latency tracers) still can only be written to the main buffer. 2) The function tracer triggers have now been extended. The function tracer had two triggers. One to enable tracing when a function is hit, and one to disable tracing. Now you can record a stack trace on a single (or many) function(s), take a snapshot of the buffer (copy it to the snapshot buffer), and you can enable or disable an event to be traced when a function is hit. 3) A perf clock has been added. A "perf" clock can be chosen to be used when tracing. This will cause ftrace to use the same clock as perf uses, and hopefully this will make it easier to interleave the perf and ftrace data for analysis." * tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (82 commits) tracepoints: Prevent null probe from being added tracing: Compare to 1 instead of zero for is_signed_type() tracing: Remove obsolete macro guard _TRACE_PROFILE_INIT ftrace: Get rid of ftrace_profile_bits tracing: Check return value of tracing_init_dentry() tracing: Get rid of unneeded key calculation in ftrace_hash_move() tracing: Reset ftrace_graph_filter_enabled if count is zero tracing: Fix off-by-one on allocating stat->pages kernel: tracing: Use strlcpy instead of strncpy tracing: Update debugfs README file tracing: Fix ftrace_dump() tracing: Rename trace_event_mutex to trace_event_sem tracing: Fix comment about prefix in arch_syscall_match_sym_name() tracing: Convert trace_destroy_fields() to static tracing: Move find_event_field() into trace_events.c tracing: Use TRACE_MAX_PRINT instead of constant tracing: Use pr_warn_once instead of open coded implementation ring-buffer: Add ring buffer startup selftest tracing: Bring Documentation/trace/ftrace.txt up to date tracing: Add "perf" trace_clock ... Conflicts: kernel/trace/ftrace.c kernel/trace/trace.c
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/ftrace.h49
1 files changed, 23 insertions, 26 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 40dc5e8fe340..19edd7facaa1 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -227,29 +227,18 @@ static notrace enum print_line_t \
227ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 227ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
228 struct trace_event *trace_event) \ 228 struct trace_event *trace_event) \
229{ \ 229{ \
230 struct ftrace_event_call *event; \
231 struct trace_seq *s = &iter->seq; \ 230 struct trace_seq *s = &iter->seq; \
231 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
232 struct ftrace_raw_##call *field; \ 232 struct ftrace_raw_##call *field; \
233 struct trace_entry *entry; \
234 struct trace_seq *p = &iter->tmp_seq; \
235 int ret; \ 233 int ret; \
236 \ 234 \
237 event = container_of(trace_event, struct ftrace_event_call, \ 235 field = (typeof(field))iter->ent; \
238 event); \
239 \
240 entry = iter->ent; \
241 \
242 if (entry->type != event->event.type) { \
243 WARN_ON_ONCE(1); \
244 return TRACE_TYPE_UNHANDLED; \
245 } \
246 \
247 field = (typeof(field))entry; \
248 \ 236 \
249 trace_seq_init(p); \ 237 ret = ftrace_raw_output_prep(iter, trace_event); \
250 ret = trace_seq_printf(s, "%s: ", event->name); \
251 if (ret) \ 238 if (ret) \
252 ret = trace_seq_printf(s, print); \ 239 return ret; \
240 \
241 ret = trace_seq_printf(s, print); \
253 if (!ret) \ 242 if (!ret) \
254 return TRACE_TYPE_PARTIAL_LINE; \ 243 return TRACE_TYPE_PARTIAL_LINE; \
255 \ 244 \
@@ -335,7 +324,7 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
335 324
336#undef DECLARE_EVENT_CLASS 325#undef DECLARE_EVENT_CLASS
337#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 326#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
338static int notrace \ 327static int notrace __init \
339ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 328ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
340{ \ 329{ \
341 struct ftrace_raw_##call field; \ 330 struct ftrace_raw_##call field; \
@@ -414,7 +403,8 @@ static inline notrace int ftrace_get_offsets_##call( \
414 * 403 *
415 * static void ftrace_raw_event_<call>(void *__data, proto) 404 * static void ftrace_raw_event_<call>(void *__data, proto)
416 * { 405 * {
417 * struct ftrace_event_call *event_call = __data; 406 * struct ftrace_event_file *ftrace_file = __data;
407 * struct ftrace_event_call *event_call = ftrace_file->event_call;
418 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 408 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
419 * struct ring_buffer_event *event; 409 * struct ring_buffer_event *event;
420 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 410 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
@@ -423,12 +413,16 @@ static inline notrace int ftrace_get_offsets_##call( \
423 * int __data_size; 413 * int __data_size;
424 * int pc; 414 * int pc;
425 * 415 *
416 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
417 * &ftrace_file->flags))
418 * return;
419 *
426 * local_save_flags(irq_flags); 420 * local_save_flags(irq_flags);
427 * pc = preempt_count(); 421 * pc = preempt_count();
428 * 422 *
429 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 423 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
430 * 424 *
431 * event = trace_current_buffer_lock_reserve(&buffer, 425 * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
432 * event_<call>->event.type, 426 * event_<call>->event.type,
433 * sizeof(*entry) + __data_size, 427 * sizeof(*entry) + __data_size,
434 * irq_flags, pc); 428 * irq_flags, pc);
@@ -440,7 +434,7 @@ static inline notrace int ftrace_get_offsets_##call( \
440 * __array macros. 434 * __array macros.
441 * 435 *
442 * if (!filter_current_check_discard(buffer, event_call, entry, event)) 436 * if (!filter_current_check_discard(buffer, event_call, entry, event))
443 * trace_current_buffer_unlock_commit(buffer, 437 * trace_nowake_buffer_unlock_commit(buffer,
444 * event, irq_flags, pc); 438 * event, irq_flags, pc);
445 * } 439 * }
446 * 440 *
@@ -518,7 +512,8 @@ static inline notrace int ftrace_get_offsets_##call( \
518static notrace void \ 512static notrace void \
519ftrace_raw_event_##call(void *__data, proto) \ 513ftrace_raw_event_##call(void *__data, proto) \
520{ \ 514{ \
521 struct ftrace_event_call *event_call = __data; \ 515 struct ftrace_event_file *ftrace_file = __data; \
516 struct ftrace_event_call *event_call = ftrace_file->event_call; \
522 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 517 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
523 struct ring_buffer_event *event; \ 518 struct ring_buffer_event *event; \
524 struct ftrace_raw_##call *entry; \ 519 struct ftrace_raw_##call *entry; \
@@ -527,12 +522,16 @@ ftrace_raw_event_##call(void *__data, proto) \
527 int __data_size; \ 522 int __data_size; \
528 int pc; \ 523 int pc; \
529 \ 524 \
525 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
526 &ftrace_file->flags)) \
527 return; \
528 \
530 local_save_flags(irq_flags); \ 529 local_save_flags(irq_flags); \
531 pc = preempt_count(); \ 530 pc = preempt_count(); \
532 \ 531 \
533 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 532 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
534 \ 533 \
535 event = trace_current_buffer_lock_reserve(&buffer, \ 534 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \
536 event_call->event.type, \ 535 event_call->event.type, \
537 sizeof(*entry) + __data_size, \ 536 sizeof(*entry) + __data_size, \
538 irq_flags, pc); \ 537 irq_flags, pc); \
@@ -581,7 +580,7 @@ static inline void ftrace_test_probe_##call(void) \
581#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 580#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
582_TRACE_PERF_PROTO(call, PARAMS(proto)); \ 581_TRACE_PERF_PROTO(call, PARAMS(proto)); \
583static const char print_fmt_##call[] = print; \ 582static const char print_fmt_##call[] = print; \
584static struct ftrace_event_class __used event_class_##call = { \ 583static struct ftrace_event_class __used __refdata event_class_##call = { \
585 .system = __stringify(TRACE_SYSTEM), \ 584 .system = __stringify(TRACE_SYSTEM), \
586 .define_fields = ftrace_define_fields_##call, \ 585 .define_fields = ftrace_define_fields_##call, \
587 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 586 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
@@ -705,5 +704,3 @@ static inline void perf_test_probe_##call(void) \
705#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 704#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
706#endif /* CONFIG_PERF_EVENTS */ 705#endif /* CONFIG_PERF_EVENTS */
707 706
708#undef _TRACE_PROFILE_INIT
709