aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:05:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-21 12:05:47 -0400
commitbd4c3a3441144cd46d1f544046523724c5bc6e94 (patch)
tree8b5c67249a7a163caf3f88cbcb9df5236fcc3b93 /include/trace
parentb3727c24da69971503a4ca98b3b877753c6a4393 (diff)
parent583a22e7c154dc0a3938db522696b4bc7f098f59 (diff)
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: kernel/profile.c: Switch /proc/irq/prof_cpu_mask to seq_file tracing: Export trace_profile_buf symbols tracing/events: use list_for_entry_continue tracing: remove max_tracer_type_len function-graph: use ftrace_graph_funcs directly tracing: Remove markers tracing: Allocate the ftrace event profile buffer dynamically tracing: Factorize the events profile accounting
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/ftrace.h111
1 files changed, 63 insertions, 48 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 72a3b437b829..a0361cb69769 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -382,20 +382,14 @@ static inline int ftrace_get_offsets_##call( \
382 * 382 *
383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
384 * 384 *
385 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) 385 * static int ftrace_profile_enable_<call>(void)
386 * { 386 * {
387 * int ret = 0; 387 * return register_trace_<call>(ftrace_profile_<call>);
388 *
389 * if (!atomic_inc_return(&event_call->profile_count))
390 * ret = register_trace_<call>(ftrace_profile_<call>);
391 *
392 * return ret;
393 * } 388 * }
394 * 389 *
395 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) 390 * static void ftrace_profile_disable_<call>(void)
396 * { 391 * {
397 * if (atomic_add_negative(-1, &event->call->profile_count)) 392 * unregister_trace_<call>(ftrace_profile_<call>);
398 * unregister_trace_<call>(ftrace_profile_<call>);
399 * } 393 * }
400 * 394 *
401 */ 395 */
@@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \
405 \ 399 \
406static void ftrace_profile_##call(proto); \ 400static void ftrace_profile_##call(proto); \
407 \ 401 \
408static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ 402static int ftrace_profile_enable_##call(void) \
409{ \ 403{ \
410 int ret = 0; \ 404 return register_trace_##call(ftrace_profile_##call); \
411 \
412 if (!atomic_inc_return(&event_call->profile_count)) \
413 ret = register_trace_##call(ftrace_profile_##call); \
414 \
415 return ret; \
416} \ 405} \
417 \ 406 \
418static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ 407static void ftrace_profile_disable_##call(void) \
419{ \ 408{ \
420 if (atomic_add_negative(-1, &event_call->profile_count)) \ 409 unregister_trace_##call(ftrace_profile_##call); \
421 unregister_trace_##call(ftrace_profile_##call); \
422} 410}
423 411
424#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -660,11 +648,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
660 * struct ftrace_raw_##call *entry; 648 * struct ftrace_raw_##call *entry;
661 * u64 __addr = 0, __count = 1; 649 * u64 __addr = 0, __count = 1;
662 * unsigned long irq_flags; 650 * unsigned long irq_flags;
651 * struct trace_entry *ent;
663 * int __entry_size; 652 * int __entry_size;
664 * int __data_size; 653 * int __data_size;
654 * int __cpu
665 * int pc; 655 * int pc;
666 * 656 *
667 * local_save_flags(irq_flags);
668 * pc = preempt_count(); 657 * pc = preempt_count();
669 * 658 *
670 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 659 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
@@ -675,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
675 * sizeof(u64)); 664 * sizeof(u64));
676 * __entry_size -= sizeof(u32); 665 * __entry_size -= sizeof(u32);
677 * 666 *
678 * do { 667 * // Protect the non nmi buffer
679 * char raw_data[__entry_size]; <- allocate our sample in the stack 668 * // This also protects the rcu read side
680 * struct trace_entry *ent; 669 * local_irq_save(irq_flags);
670 * __cpu = smp_processor_id();
671 *
672 * if (in_nmi())
673 * raw_data = rcu_dereference(trace_profile_buf_nmi);
674 * else
675 * raw_data = rcu_dereference(trace_profile_buf);
676 *
677 * if (!raw_data)
678 * goto end;
681 * 679 *
682 * zero dead bytes from alignment to avoid stack leak to userspace: 680 * raw_data = per_cpu_ptr(raw_data, __cpu);
683 * 681 *
684 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 682 * //zero dead bytes from alignment to avoid stack leak to userspace:
685 * entry = (struct ftrace_raw_<call> *)raw_data; 683 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
686 * ent = &entry->ent; 684 * entry = (struct ftrace_raw_<call> *)raw_data;
687 * tracing_generic_entry_update(ent, irq_flags, pc); 685 * ent = &entry->ent;
688 * ent->type = event_call->id; 686 * tracing_generic_entry_update(ent, irq_flags, pc);
687 * ent->type = event_call->id;
689 * 688 *
690 * <tstruct> <- do some jobs with dynamic arrays 689 * <tstruct> <- do some jobs with dynamic arrays
691 * 690 *
692 * <assign> <- affect our values 691 * <assign> <- affect our values
693 * 692 *
694 * perf_tpcounter_event(event_call->id, __addr, __count, entry, 693 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
695 * __entry_size); <- submit them to perf counter 694 * __entry_size); <- submit them to perf counter
696 * } while (0);
697 * 695 *
698 * } 696 * }
699 */ 697 */
@@ -716,11 +714,13 @@ static void ftrace_profile_##call(proto) \
716 struct ftrace_raw_##call *entry; \ 714 struct ftrace_raw_##call *entry; \
717 u64 __addr = 0, __count = 1; \ 715 u64 __addr = 0, __count = 1; \
718 unsigned long irq_flags; \ 716 unsigned long irq_flags; \
717 struct trace_entry *ent; \
719 int __entry_size; \ 718 int __entry_size; \
720 int __data_size; \ 719 int __data_size; \
720 char *raw_data; \
721 int __cpu; \
721 int pc; \ 722 int pc; \
722 \ 723 \
723 local_save_flags(irq_flags); \
724 pc = preempt_count(); \ 724 pc = preempt_count(); \
725 \ 725 \
726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
@@ -728,23 +728,38 @@ static void ftrace_profile_##call(proto) \
728 sizeof(u64)); \ 728 sizeof(u64)); \
729 __entry_size -= sizeof(u32); \ 729 __entry_size -= sizeof(u32); \
730 \ 730 \
731 do { \ 731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
732 char raw_data[__entry_size]; \ 732 "profile buffer not large enough")) \
733 struct trace_entry *ent; \ 733 return; \
734 \
735 local_irq_save(irq_flags); \
736 __cpu = smp_processor_id(); \
734 \ 737 \
735 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ 738 if (in_nmi()) \
736 entry = (struct ftrace_raw_##call *)raw_data; \ 739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
737 ent = &entry->ent; \ 740 else \
738 tracing_generic_entry_update(ent, irq_flags, pc); \ 741 raw_data = rcu_dereference(trace_profile_buf); \
739 ent->type = event_call->id; \
740 \ 742 \
741 tstruct \ 743 if (!raw_data) \
744 goto end; \
742 \ 745 \
743 { assign; } \ 746 raw_data = per_cpu_ptr(raw_data, __cpu); \
744 \ 747 \
745 perf_tpcounter_event(event_call->id, __addr, __count, entry,\ 748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
753 \
754 tstruct \
755 \
756 { assign; } \
757 \
758 perf_tpcounter_event(event_call->id, __addr, __count, entry, \
746 __entry_size); \ 759 __entry_size); \
747 } while (0); \ 760 \
761end: \
762 local_irq_restore(irq_flags); \
748 \ 763 \
749} 764}
750 765