diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-09-02 14:17:06 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-09-04 18:59:39 -0400 |
commit | e77405ad80f53966524b5c31244e13fbbbecbd84 (patch) | |
tree | 65c05f9e1573e9958e52bb72655e00c8592aacd2 /include/trace/ftrace.h | |
parent | f633903af2ceb0cec07d45e499a072b6593d0ed1 (diff) |
tracing: pass around ring buffer instead of tracer
The latency tracers (irqsoff and wakeup) can swap trace buffers
on the fly. If an event is happening and has reserved data on one of
the buffers, and the latency tracer swaps the global buffer with the
max buffer, the result is that the event may commit the data to the
wrong buffer.
This patch changes the API to the trace recording to be recieve the
buffer that was used to reserve a commit. Then this buffer can be passed
in to the commit.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index bfbc842600a..308bafd9332 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -460,13 +460,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
460 | * { | 460 | * { |
461 | * struct ring_buffer_event *event; | 461 | * struct ring_buffer_event *event; |
462 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 462 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
463 | * struct ring_buffer *buffer; | ||
463 | * unsigned long irq_flags; | 464 | * unsigned long irq_flags; |
464 | * int pc; | 465 | * int pc; |
465 | * | 466 | * |
466 | * local_save_flags(irq_flags); | 467 | * local_save_flags(irq_flags); |
467 | * pc = preempt_count(); | 468 | * pc = preempt_count(); |
468 | * | 469 | * |
469 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | 470 | * event = trace_current_buffer_lock_reserve(&buffer, |
471 | * event_<call>.id, | ||
470 | * sizeof(struct ftrace_raw_<call>), | 472 | * sizeof(struct ftrace_raw_<call>), |
471 | * irq_flags, pc); | 473 | * irq_flags, pc); |
472 | * if (!event) | 474 | * if (!event) |
@@ -476,7 +478,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
476 | * <assign>; <-- Here we assign the entries by the __field and | 478 | * <assign>; <-- Here we assign the entries by the __field and |
477 | * __array macros. | 479 | * __array macros. |
478 | * | 480 | * |
479 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | 481 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); |
480 | * } | 482 | * } |
481 | * | 483 | * |
482 | * static int ftrace_raw_reg_event_<call>(void) | 484 | * static int ftrace_raw_reg_event_<call>(void) |
@@ -568,6 +570,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
568 | struct ftrace_event_call *event_call = &event_##call; \ | 570 | struct ftrace_event_call *event_call = &event_##call; \ |
569 | struct ring_buffer_event *event; \ | 571 | struct ring_buffer_event *event; \ |
570 | struct ftrace_raw_##call *entry; \ | 572 | struct ftrace_raw_##call *entry; \ |
573 | struct ring_buffer *buffer; \ | ||
571 | unsigned long irq_flags; \ | 574 | unsigned long irq_flags; \ |
572 | int __data_size; \ | 575 | int __data_size; \ |
573 | int pc; \ | 576 | int pc; \ |
@@ -577,7 +580,8 @@ static void ftrace_raw_event_##call(proto) \ | |||
577 | \ | 580 | \ |
578 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 581 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
579 | \ | 582 | \ |
580 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | 583 | event = trace_current_buffer_lock_reserve(&buffer, \ |
584 | event_##call.id, \ | ||
581 | sizeof(*entry) + __data_size, \ | 585 | sizeof(*entry) + __data_size, \ |
582 | irq_flags, pc); \ | 586 | irq_flags, pc); \ |
583 | if (!event) \ | 587 | if (!event) \ |
@@ -589,8 +593,9 @@ static void ftrace_raw_event_##call(proto) \ | |||
589 | \ | 593 | \ |
590 | { assign; } \ | 594 | { assign; } \ |
591 | \ | 595 | \ |
592 | if (!filter_current_check_discard(event_call, entry, event)) \ | 596 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ |
593 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | 597 | trace_nowake_buffer_unlock_commit(buffer, \ |
598 | event, irq_flags, pc); \ | ||
594 | } \ | 599 | } \ |
595 | \ | 600 | \ |
596 | static int ftrace_raw_reg_event_##call(void *ptr) \ | 601 | static int ftrace_raw_reg_event_##call(void *ptr) \ |