diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-09 19:39:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-09 19:39:15 -0400 |
commit | 214b93132023cc9305d5801add812515bea4d7d0 (patch) | |
tree | bb8db8677dd80b6ef570b8aa59475b072b81db11 /include/trace | |
parent | 14208b0ec56919f5333dd654b1a7d10765d0ad05 (diff) | |
parent | a9fcaaac37b3baba1343f906f52aeb65c4d4e356 (diff) |
Merge tag 'trace-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"Lots of tweaks, small fixes, optimizations, and some helper functions
to help out the rest of the kernel to ease their use of trace events.
The big change for this release is the allowing of other tracers, such
as the latency tracers, to be used in the trace instances and allow
for function or function graph tracing to be in the top level
simultaneously"
* tag 'trace-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits)
tracing: Fix memory leak on instance deletion
tracing: Fix leak of ring buffer data when new instances creation fails
tracing/kprobes: Avoid self tests if tracing is disabled on boot up
tracing: Return error if ftrace_trace_arrays list is empty
tracing: Only calculate stats of tracepoint benchmarks for 2^32 times
tracing: Convert stddev into u64 in tracepoint benchmark
tracing: Introduce saved_cmdlines_size file
tracing: Add __get_dynamic_array_len() macro for trace events
tracing: Remove unused variable in trace_benchmark
tracing: Eliminate double free on failure of allocation on boot up
ftrace/x86: Call text_ip_addr() instead of the duplicated code
tracing: Print max callstack on stacktrace bug
tracing: Move locking of trace_cmdline_lock into start/stop seq calls
tracing: Try again for saved cmdline if failed due to locking
tracing: Have saved_cmdlines use the seq_read infrastructure
tracing: Add tracepoint benchmark tracepoint
tracing: Print nasty banner when trace_printk() is in use
tracing: Add funcgraph_tail option to print function name after closing braces
tracing: Eliminate duplicate TRACE_GRAPH_PRINT_xx defines
tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks
...
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/ftrace.h | 66 |
1 files changed, 65 insertions, 1 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0a1a4f7caf09..0fd06fef9fac 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -53,6 +53,9 @@ | |||
53 | #undef __string | 53 | #undef __string |
54 | #define __string(item, src) __dynamic_array(char, item, -1) | 54 | #define __string(item, src) __dynamic_array(char, item, -1) |
55 | 55 | ||
56 | #undef __bitmask | ||
57 | #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) | ||
58 | |||
56 | #undef TP_STRUCT__entry | 59 | #undef TP_STRUCT__entry |
57 | #define TP_STRUCT__entry(args...) args | 60 | #define TP_STRUCT__entry(args...) args |
58 | 61 | ||
@@ -128,6 +131,9 @@ | |||
128 | #undef __string | 131 | #undef __string |
129 | #define __string(item, src) __dynamic_array(char, item, -1) | 132 | #define __string(item, src) __dynamic_array(char, item, -1) |
130 | 133 | ||
134 | #undef __bitmask | ||
135 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | ||
136 | |||
131 | #undef DECLARE_EVENT_CLASS | 137 | #undef DECLARE_EVENT_CLASS |
132 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 138 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
133 | struct ftrace_data_offsets_##call { \ | 139 | struct ftrace_data_offsets_##call { \ |
@@ -197,9 +203,22 @@ | |||
197 | #define __get_dynamic_array(field) \ | 203 | #define __get_dynamic_array(field) \ |
198 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | 204 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) |
199 | 205 | ||
206 | #undef __get_dynamic_array_len | ||
207 | #define __get_dynamic_array_len(field) \ | ||
208 | ((__entry->__data_loc_##field >> 16) & 0xffff) | ||
209 | |||
200 | #undef __get_str | 210 | #undef __get_str |
201 | #define __get_str(field) (char *)__get_dynamic_array(field) | 211 | #define __get_str(field) (char *)__get_dynamic_array(field) |
202 | 212 | ||
213 | #undef __get_bitmask | ||
214 | #define __get_bitmask(field) \ | ||
215 | ({ \ | ||
216 | void *__bitmask = __get_dynamic_array(field); \ | ||
217 | unsigned int __bitmask_size; \ | ||
218 | __bitmask_size = __get_dynamic_array_len(field); \ | ||
219 | ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ | ||
220 | }) | ||
221 | |||
203 | #undef __print_flags | 222 | #undef __print_flags |
204 | #define __print_flags(flag, delim, flag_array...) \ | 223 | #define __print_flags(flag, delim, flag_array...) \ |
205 | ({ \ | 224 | ({ \ |
@@ -322,6 +341,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |||
322 | #undef __string | 341 | #undef __string |
323 | #define __string(item, src) __dynamic_array(char, item, -1) | 342 | #define __string(item, src) __dynamic_array(char, item, -1) |
324 | 343 | ||
344 | #undef __bitmask | ||
345 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | ||
346 | |||
325 | #undef DECLARE_EVENT_CLASS | 347 | #undef DECLARE_EVENT_CLASS |
326 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ | 348 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ |
327 | static int notrace __init \ | 349 | static int notrace __init \ |
@@ -372,6 +394,29 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | |||
372 | #define __string(item, src) __dynamic_array(char, item, \ | 394 | #define __string(item, src) __dynamic_array(char, item, \ |
373 | strlen((src) ? (const char *)(src) : "(null)") + 1) | 395 | strlen((src) ? (const char *)(src) : "(null)") + 1) |
374 | 396 | ||
397 | /* | ||
398 | * __bitmask_size_in_bytes_raw is the number of bytes needed to hold | ||
399 | * num_possible_cpus(). | ||
400 | */ | ||
401 | #define __bitmask_size_in_bytes_raw(nr_bits) \ | ||
402 | (((nr_bits) + 7) / 8) | ||
403 | |||
404 | #define __bitmask_size_in_longs(nr_bits) \ | ||
405 | ((__bitmask_size_in_bytes_raw(nr_bits) + \ | ||
406 | ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) | ||
407 | |||
408 | /* | ||
409 | * __bitmask_size_in_bytes is the number of bytes needed to hold | ||
410 | * num_possible_cpus() padded out to the nearest long. This is what | ||
411 | * is saved in the buffer, just to be consistent. | ||
412 | */ | ||
413 | #define __bitmask_size_in_bytes(nr_bits) \ | ||
414 | (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) | ||
415 | |||
416 | #undef __bitmask | ||
417 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ | ||
418 | __bitmask_size_in_longs(nr_bits)) | ||
419 | |||
375 | #undef DECLARE_EVENT_CLASS | 420 | #undef DECLARE_EVENT_CLASS |
376 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 421 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
377 | static inline notrace int ftrace_get_offsets_##call( \ | 422 | static inline notrace int ftrace_get_offsets_##call( \ |
@@ -513,12 +558,22 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
513 | __entry->__data_loc_##item = __data_offsets.item; | 558 | __entry->__data_loc_##item = __data_offsets.item; |
514 | 559 | ||
515 | #undef __string | 560 | #undef __string |
516 | #define __string(item, src) __dynamic_array(char, item, -1) \ | 561 | #define __string(item, src) __dynamic_array(char, item, -1) |
517 | 562 | ||
518 | #undef __assign_str | 563 | #undef __assign_str |
519 | #define __assign_str(dst, src) \ | 564 | #define __assign_str(dst, src) \ |
520 | strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); | 565 | strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); |
521 | 566 | ||
567 | #undef __bitmask | ||
568 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | ||
569 | |||
570 | #undef __get_bitmask | ||
571 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | ||
572 | |||
573 | #undef __assign_bitmask | ||
574 | #define __assign_bitmask(dst, src, nr_bits) \ | ||
575 | memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) | ||
576 | |||
522 | #undef TP_fast_assign | 577 | #undef TP_fast_assign |
523 | #define TP_fast_assign(args...) args | 578 | #define TP_fast_assign(args...) args |
524 | 579 | ||
@@ -585,7 +640,9 @@ static inline void ftrace_test_probe_##call(void) \ | |||
585 | #undef __print_symbolic | 640 | #undef __print_symbolic |
586 | #undef __print_hex | 641 | #undef __print_hex |
587 | #undef __get_dynamic_array | 642 | #undef __get_dynamic_array |
643 | #undef __get_dynamic_array_len | ||
588 | #undef __get_str | 644 | #undef __get_str |
645 | #undef __get_bitmask | ||
589 | 646 | ||
590 | #undef TP_printk | 647 | #undef TP_printk |
591 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) | 648 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) |
@@ -648,9 +705,16 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | |||
648 | #define __get_dynamic_array(field) \ | 705 | #define __get_dynamic_array(field) \ |
649 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | 706 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) |
650 | 707 | ||
708 | #undef __get_dynamic_array_len | ||
709 | #define __get_dynamic_array_len(field) \ | ||
710 | ((__entry->__data_loc_##field >> 16) & 0xffff) | ||
711 | |||
651 | #undef __get_str | 712 | #undef __get_str |
652 | #define __get_str(field) (char *)__get_dynamic_array(field) | 713 | #define __get_str(field) (char *)__get_dynamic_array(field) |
653 | 714 | ||
715 | #undef __get_bitmask | ||
716 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | ||
717 | |||
654 | #undef __perf_addr | 718 | #undef __perf_addr |
655 | #define __perf_addr(a) (__addr = (a)) | 719 | #define __perf_addr(a) (__addr = (a)) |
656 | 720 | ||