diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:18:07 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-18 11:18:07 -0400 |
| commit | 3aaf51ace5975050ab43c7d4d7e439e0ae7d13d7 (patch) | |
| tree | 3ceb741d8b78c6dc78be3fd2e4f8aac443044787 /include/trace/ftrace.h | |
| parent | f262af3d08d3fffc4e11277d3a177b2d67ea2aba (diff) | |
| parent | cc49b092d308f8ea8634134b0d95d831a88a674b (diff) | |
Merge branch 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (24 commits)
oprofile/x86: make AMD IBS hotplug capable
oprofile/x86: notify cpus only when daemon is running
oprofile/x86: reordering some functions
oprofile/x86: stop disabled counters in nmi handler
oprofile/x86: protect cpu hotplug sections
oprofile/x86: remove CONFIG_SMP macros
oprofile/x86: fix uninitialized counter usage during cpu hotplug
oprofile/x86: remove duplicate IBS capability check
oprofile/x86: move IBS code
oprofile/x86: return -EBUSY if counters are already reserved
oprofile/x86: moving shutdown functions
oprofile/x86: reserve counter msrs pairwise
oprofile/x86: rework error handler in nmi_setup()
oprofile: update file list in MAINTAINERS file
oprofile: protect from not being in an IRQ context
oprofile: remove double ring buffering
ring-buffer: Add lost event count to end of sub buffer
tracing: Show the lost events in the trace_pipe output
ring-buffer: Add place holder recording of dropped events
tracing: Fix compile error in module tracepoints when MODULE_UNLOAD not set
...
Diffstat (limited to 'include/trace/ftrace.h')
| -rw-r--r-- | include/trace/ftrace.h | 33 |
1 files changed, 19 insertions, 14 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index ea6f9d4a20e..75dd7787fb3 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -154,9 +154,11 @@ | |||
| 154 | * | 154 | * |
| 155 | * field = (typeof(field))entry; | 155 | * field = (typeof(field))entry; |
| 156 | * | 156 | * |
| 157 | * p = get_cpu_var(ftrace_event_seq); | 157 | * p = &get_cpu_var(ftrace_event_seq); |
| 158 | * trace_seq_init(p); | 158 | * trace_seq_init(p); |
| 159 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | 159 | * ret = trace_seq_printf(s, "%s: ", <call>); |
| 160 | * if (ret) | ||
| 161 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
| 160 | * put_cpu(); | 162 | * put_cpu(); |
| 161 | * if (!ret) | 163 | * if (!ret) |
| 162 | * return TRACE_TYPE_PARTIAL_LINE; | 164 | * return TRACE_TYPE_PARTIAL_LINE; |
| @@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
| 450 | * | 452 | * |
| 451 | * static void ftrace_raw_event_<call>(proto) | 453 | * static void ftrace_raw_event_<call>(proto) |
| 452 | * { | 454 | * { |
| 455 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
| 453 | * struct ring_buffer_event *event; | 456 | * struct ring_buffer_event *event; |
| 454 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 457 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
| 455 | * struct ring_buffer *buffer; | 458 | * struct ring_buffer *buffer; |
| 456 | * unsigned long irq_flags; | 459 | * unsigned long irq_flags; |
| 460 | * int __data_size; | ||
| 457 | * int pc; | 461 | * int pc; |
| 458 | * | 462 | * |
| 459 | * local_save_flags(irq_flags); | 463 | * local_save_flags(irq_flags); |
| 460 | * pc = preempt_count(); | 464 | * pc = preempt_count(); |
| 461 | * | 465 | * |
| 466 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
| 467 | * | ||
| 462 | * event = trace_current_buffer_lock_reserve(&buffer, | 468 | * event = trace_current_buffer_lock_reserve(&buffer, |
| 463 | * event_<call>.id, | 469 | * event_<call>.id, |
| 464 | * sizeof(struct ftrace_raw_<call>), | 470 | * sizeof(*entry) + __data_size, |
| 465 | * irq_flags, pc); | 471 | * irq_flags, pc); |
| 466 | * if (!event) | 472 | * if (!event) |
| 467 | * return; | 473 | * return; |
| 468 | * entry = ring_buffer_event_data(event); | 474 | * entry = ring_buffer_event_data(event); |
| 469 | * | 475 | * |
| 470 | * <assign>; <-- Here we assign the entries by the __field and | 476 | * { <assign>; } <-- Here we assign the entries by the __field and |
| 471 | * __array macros. | 477 | * __array macros. |
| 472 | * | 478 | * |
| 473 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); | 479 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) |
| 480 | * trace_current_buffer_unlock_commit(buffer, | ||
| 481 | * event, irq_flags, pc); | ||
| 474 | * } | 482 | * } |
| 475 | * | 483 | * |
| 476 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) | 484 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) |
| 477 | * { | 485 | * { |
| 478 | * int ret; | 486 | * return register_trace_<call>(ftrace_raw_event_<call>); |
| 479 | * | ||
| 480 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
| 481 | * if (!ret) | ||
| 482 | * pr_info("event trace: Could not activate trace point " | ||
| 483 | * "probe to <call>"); | ||
| 484 | * return ret; | ||
| 485 | * } | 487 | * } |
| 486 | * | 488 | * |
| 487 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | 489 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) |
| @@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
| 493 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 495 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
| 494 | * }; | 496 | * }; |
| 495 | * | 497 | * |
| 498 | * static const char print_fmt_<call>[] = <TP_printk>; | ||
| 499 | * | ||
| 496 | * static struct ftrace_event_call __used | 500 | * static struct ftrace_event_call __used |
| 497 | * __attribute__((__aligned__(4))) | 501 | * __attribute__((__aligned__(4))) |
| 498 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 502 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
| @@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
| 501 | * .raw_init = trace_event_raw_init, | 505 | * .raw_init = trace_event_raw_init, |
| 502 | * .regfunc = ftrace_reg_event_<call>, | 506 | * .regfunc = ftrace_reg_event_<call>, |
| 503 | * .unregfunc = ftrace_unreg_event_<call>, | 507 | * .unregfunc = ftrace_unreg_event_<call>, |
| 508 | * .print_fmt = print_fmt_<call>, | ||
| 509 | * .define_fields = ftrace_define_fields_<call>, | ||
| 504 | * } | 510 | * } |
| 505 | * | 511 | * |
| 506 | */ | 512 | */ |
| @@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | |||
| 569 | return; \ | 575 | return; \ |
| 570 | entry = ring_buffer_event_data(event); \ | 576 | entry = ring_buffer_event_data(event); \ |
| 571 | \ | 577 | \ |
| 572 | \ | ||
| 573 | tstruct \ | 578 | tstruct \ |
| 574 | \ | 579 | \ |
| 575 | { assign; } \ | 580 | { assign; } \ |
