aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-09-02 14:17:06 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-09-04 18:59:39 -0400
commite77405ad80f53966524b5c31244e13fbbbecbd84 (patch)
tree65c05f9e1573e9958e52bb72655e00c8592aacd2 /kernel/trace/trace_functions_graph.c
parentf633903af2ceb0cec07d45e499a072b6593d0ed1 (diff)
tracing: pass around ring buffer instead of tracer
The latency tracers (irqsoff and wakeup) can swap trace buffers on the fly. If an event is happening and has reserved data on one of the buffers, and the latency tracer swaps the global buffer with the max buffer, the result is that the event may commit the data to the wrong buffer. This patch changes the API to the trace recording to be recieve the buffer that was used to reserve a commit. Then this buffer can be passed in to the commit. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 3f4a251b7d16..b3749a2c3132 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -173,19 +173,20 @@ static int __trace_graph_entry(struct trace_array *tr,
173{ 173{
174 struct ftrace_event_call *call = &event_funcgraph_entry; 174 struct ftrace_event_call *call = &event_funcgraph_entry;
175 struct ring_buffer_event *event; 175 struct ring_buffer_event *event;
176 struct ring_buffer *buffer = tr->buffer;
176 struct ftrace_graph_ent_entry *entry; 177 struct ftrace_graph_ent_entry *entry;
177 178
178 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 179 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
179 return 0; 180 return 0;
180 181
181 event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT, 182 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
182 sizeof(*entry), flags, pc); 183 sizeof(*entry), flags, pc);
183 if (!event) 184 if (!event)
184 return 0; 185 return 0;
185 entry = ring_buffer_event_data(event); 186 entry = ring_buffer_event_data(event);
186 entry->graph_ent = *trace; 187 entry->graph_ent = *trace;
187 if (!filter_current_check_discard(call, entry, event)) 188 if (!filter_current_check_discard(buffer, call, entry, event))
188 ring_buffer_unlock_commit(tr->buffer, event); 189 ring_buffer_unlock_commit(buffer, event);
189 190
190 return 1; 191 return 1;
191} 192}
@@ -236,19 +237,20 @@ static void __trace_graph_return(struct trace_array *tr,
236{ 237{
237 struct ftrace_event_call *call = &event_funcgraph_exit; 238 struct ftrace_event_call *call = &event_funcgraph_exit;
238 struct ring_buffer_event *event; 239 struct ring_buffer_event *event;
240 struct ring_buffer *buffer = tr->buffer;
239 struct ftrace_graph_ret_entry *entry; 241 struct ftrace_graph_ret_entry *entry;
240 242
241 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 243 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
242 return; 244 return;
243 245
244 event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET, 246 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
245 sizeof(*entry), flags, pc); 247 sizeof(*entry), flags, pc);
246 if (!event) 248 if (!event)
247 return; 249 return;
248 entry = ring_buffer_event_data(event); 250 entry = ring_buffer_event_data(event);
249 entry->ret = *trace; 251 entry->ret = *trace;
250 if (!filter_current_check_discard(call, entry, event)) 252 if (!filter_current_check_discard(buffer, call, entry, event))
251 ring_buffer_unlock_commit(tr->buffer, event); 253 ring_buffer_unlock_commit(buffer, event);
252} 254}
253 255
254void trace_graph_return(struct ftrace_graph_ret *trace) 256void trace_graph_return(struct ftrace_graph_ret *trace)