aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-09-10 19:09:23 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-09-10 19:09:23 -0400
commit8f8ffe2485bcaa890800681451d380779cea06af (patch)
tree1d2ef3a27f1cab9a2b9014f4b75886a96a1ae8db /include/trace/ftrace.h
parent70069577323e6f72b845166724f34b9858134437 (diff)
parentd28daf923ac5e4a0d7cecebae56f3e339189366b (diff)
Merge commit 'tracing/core' into tracing/kprobes
Conflicts: kernel/trace/trace_export.c kernel/trace/trace_kprobe.c Merge reason: This topic branch lacks an important build fix in tracing/core: 0dd7b74787eaf7858c6c573353a83c3e2766e674: tracing: Fix double CPP substitution in TRACE_EVENT_FN that prevents from multiple tracepoint headers inclusion crashes. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h29
1 files changed, 17 insertions, 12 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index f2bd7a8f8e8..5d3df2a5049 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -45,14 +45,15 @@
45 }; \ 45 }; \
46 static struct ftrace_event_call event_##name 46 static struct ftrace_event_call event_##name
47 47
48#undef __cpparg
49#define __cpparg(arg...) arg
50
48/* Callbacks are meaningless to ftrace. */ 51/* Callbacks are meaningless to ftrace. */
49#undef TRACE_EVENT_FN 52#undef TRACE_EVENT_FN
50#define TRACE_EVENT_FN(name, proto, args, tstruct, \ 53#define TRACE_EVENT_FN(name, proto, args, tstruct, \
51 assign, print, reg, unreg) \ 54 assign, print, reg, unreg) \
52 TRACE_EVENT(name, TP_PROTO(proto), TP_ARGS(args), \ 55 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
53 TP_STRUCT__entry(tstruct), \ 56 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
54 TP_fast_assign(assign), \
55 TP_printk(print))
56 57
57#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 58#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
58 59
@@ -459,13 +460,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
459 * { 460 * {
460 * struct ring_buffer_event *event; 461 * struct ring_buffer_event *event;
461 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 462 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
463 * struct ring_buffer *buffer;
462 * unsigned long irq_flags; 464 * unsigned long irq_flags;
463 * int pc; 465 * int pc;
464 * 466 *
465 * local_save_flags(irq_flags); 467 * local_save_flags(irq_flags);
466 * pc = preempt_count(); 468 * pc = preempt_count();
467 * 469 *
468 * event = trace_current_buffer_lock_reserve(event_<call>.id, 470 * event = trace_current_buffer_lock_reserve(&buffer,
471 * event_<call>.id,
469 * sizeof(struct ftrace_raw_<call>), 472 * sizeof(struct ftrace_raw_<call>),
470 * irq_flags, pc); 473 * irq_flags, pc);
471 * if (!event) 474 * if (!event)
@@ -475,7 +478,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
475 * <assign>; <-- Here we assign the entries by the __field and 478 * <assign>; <-- Here we assign the entries by the __field and
476 * __array macros. 479 * __array macros.
477 * 480 *
478 * trace_current_buffer_unlock_commit(event, irq_flags, pc); 481 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
479 * } 482 * }
480 * 483 *
481 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) 484 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
@@ -567,6 +570,7 @@ static void ftrace_raw_event_##call(proto) \
567 struct ftrace_event_call *event_call = &event_##call; \ 570 struct ftrace_event_call *event_call = &event_##call; \
568 struct ring_buffer_event *event; \ 571 struct ring_buffer_event *event; \
569 struct ftrace_raw_##call *entry; \ 572 struct ftrace_raw_##call *entry; \
573 struct ring_buffer *buffer; \
570 unsigned long irq_flags; \ 574 unsigned long irq_flags; \
571 int __data_size; \ 575 int __data_size; \
572 int pc; \ 576 int pc; \
@@ -576,7 +580,8 @@ static void ftrace_raw_event_##call(proto) \
576 \ 580 \
577 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 581 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
578 \ 582 \
579 event = trace_current_buffer_lock_reserve(event_##call.id, \ 583 event = trace_current_buffer_lock_reserve(&buffer, \
584 event_##call.id, \
580 sizeof(*entry) + __data_size, \ 585 sizeof(*entry) + __data_size, \
581 irq_flags, pc); \ 586 irq_flags, pc); \
582 if (!event) \ 587 if (!event) \
@@ -588,8 +593,9 @@ static void ftrace_raw_event_##call(proto) \
588 \ 593 \
589 { assign; } \ 594 { assign; } \
590 \ 595 \
591 if (!filter_current_check_discard(event_call, entry, event)) \ 596 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
592 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ 597 trace_nowake_buffer_unlock_commit(buffer, \
598 event, irq_flags, pc); \
593} \ 599} \
594 \ 600 \
595static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ 601static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
@@ -621,7 +627,6 @@ static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
621 return -ENODEV; \ 627 return -ENODEV; \
622 event_##call.id = id; \ 628 event_##call.id = id; \
623 INIT_LIST_HEAD(&event_##call.fields); \ 629 INIT_LIST_HEAD(&event_##call.fields); \
624 init_preds(&event_##call); \
625 return 0; \ 630 return 0; \
626} \ 631} \
627 \ 632 \