aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sched_switch.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-09-02 14:17:06 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-09-04 18:59:39 -0400
commite77405ad80f53966524b5c31244e13fbbbecbd84 (patch)
tree65c05f9e1573e9958e52bb72655e00c8592aacd2 /kernel/trace/trace_sched_switch.c
parentf633903af2ceb0cec07d45e499a072b6593d0ed1 (diff)
tracing: pass around ring buffer instead of tracer
The latency tracers (irqsoff and wakeup) can swap trace buffers on the fly. If an event is happening and has reserved data on one of the buffers, and the latency tracer swaps the global buffer with the max buffer, the result is that the event may commit the data to the wrong buffer. This patch changes the API to the trace recording to be recieve the buffer that was used to reserve a commit. Then this buffer can be passed in to the commit. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_sched_switch.c')
-rw-r--r--kernel/trace/trace_sched_switch.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index e1285d7b5488..5fca0f51fde4 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -28,10 +28,11 @@ tracing_sched_switch_trace(struct trace_array *tr,
28 unsigned long flags, int pc) 28 unsigned long flags, int pc)
29{ 29{
30 struct ftrace_event_call *call = &event_context_switch; 30 struct ftrace_event_call *call = &event_context_switch;
31 struct ring_buffer *buffer = tr->buffer;
31 struct ring_buffer_event *event; 32 struct ring_buffer_event *event;
32 struct ctx_switch_entry *entry; 33 struct ctx_switch_entry *entry;
33 34
34 event = trace_buffer_lock_reserve(tr, TRACE_CTX, 35 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
35 sizeof(*entry), flags, pc); 36 sizeof(*entry), flags, pc);
36 if (!event) 37 if (!event)
37 return; 38 return;
@@ -44,8 +45,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
44 entry->next_state = next->state; 45 entry->next_state = next->state;
45 entry->next_cpu = task_cpu(next); 46 entry->next_cpu = task_cpu(next);
46 47
47 if (!filter_check_discard(call, entry, tr->buffer, event)) 48 if (!filter_check_discard(call, entry, buffer, event))
48 trace_buffer_unlock_commit(tr, event, flags, pc); 49 trace_buffer_unlock_commit(buffer, event, flags, pc);
49} 50}
50 51
51static void 52static void
@@ -86,8 +87,9 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
86 struct ftrace_event_call *call = &event_wakeup; 87 struct ftrace_event_call *call = &event_wakeup;
87 struct ring_buffer_event *event; 88 struct ring_buffer_event *event;
88 struct ctx_switch_entry *entry; 89 struct ctx_switch_entry *entry;
90 struct ring_buffer *buffer = tr->buffer;
89 91
90 event = trace_buffer_lock_reserve(tr, TRACE_WAKE, 92 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
91 sizeof(*entry), flags, pc); 93 sizeof(*entry), flags, pc);
92 if (!event) 94 if (!event)
93 return; 95 return;
@@ -100,10 +102,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
100 entry->next_state = wakee->state; 102 entry->next_state = wakee->state;
101 entry->next_cpu = task_cpu(wakee); 103 entry->next_cpu = task_cpu(wakee);
102 104
103 if (!filter_check_discard(call, entry, tr->buffer, event)) 105 if (!filter_check_discard(call, entry, buffer, event))
104 ring_buffer_unlock_commit(tr->buffer, event); 106 ring_buffer_unlock_commit(buffer, event);
105 ftrace_trace_stack(tr, flags, 6, pc); 107 ftrace_trace_stack(tr->buffer, flags, 6, pc);
106 ftrace_trace_userstack(tr, flags, pc); 108 ftrace_trace_userstack(tr->buffer, flags, pc);
107} 109}
108 110
109static void 111static void