aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_boot.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-09-02 14:17:06 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-09-04 18:59:39 -0400
commite77405ad80f53966524b5c31244e13fbbbecbd84 (patch)
tree65c05f9e1573e9958e52bb72655e00c8592aacd2 /kernel/trace/trace_boot.c
parentf633903af2ceb0cec07d45e499a072b6593d0ed1 (diff)
tracing: pass around ring buffer instead of tracer
The latency tracers (irqsoff and wakeup) can swap trace buffers on the fly. If an event is happening and has reserved data on one of the buffers, and the latency tracer swaps the global buffer with the max buffer, the result is that the event may commit the data to the wrong buffer. This patch changes the API to the trace recording to be recieve the buffer that was used to reserve a commit. Then this buffer can be passed in to the commit. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_boot.c')
-rw-r--r--kernel/trace/trace_boot.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 86313932781..19bfc75d467 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -130,6 +130,7 @@ struct tracer boot_tracer __read_mostly =
130void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) 130void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
131{ 131{
132 struct ring_buffer_event *event; 132 struct ring_buffer_event *event;
133 struct ring_buffer *buffer;
133 struct trace_boot_call *entry; 134 struct trace_boot_call *entry;
134 struct trace_array *tr = boot_trace; 135 struct trace_array *tr = boot_trace;
135 136
@@ -142,13 +143,14 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
142 sprint_symbol(bt->func, (unsigned long)fn); 143 sprint_symbol(bt->func, (unsigned long)fn);
143 preempt_disable(); 144 preempt_disable();
144 145
145 event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL, 146 buffer = tr->buffer;
147 event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL,
146 sizeof(*entry), 0, 0); 148 sizeof(*entry), 0, 0);
147 if (!event) 149 if (!event)
148 goto out; 150 goto out;
149 entry = ring_buffer_event_data(event); 151 entry = ring_buffer_event_data(event);
150 entry->boot_call = *bt; 152 entry->boot_call = *bt;
151 trace_buffer_unlock_commit(tr, event, 0, 0); 153 trace_buffer_unlock_commit(buffer, event, 0, 0);
152 out: 154 out:
153 preempt_enable(); 155 preempt_enable();
154} 156}
@@ -156,6 +158,7 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
156void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) 158void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
157{ 159{
158 struct ring_buffer_event *event; 160 struct ring_buffer_event *event;
161 struct ring_buffer *buffer;
159 struct trace_boot_ret *entry; 162 struct trace_boot_ret *entry;
160 struct trace_array *tr = boot_trace; 163 struct trace_array *tr = boot_trace;
161 164
@@ -165,13 +168,14 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
165 sprint_symbol(bt->func, (unsigned long)fn); 168 sprint_symbol(bt->func, (unsigned long)fn);
166 preempt_disable(); 169 preempt_disable();
167 170
168 event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET, 171 buffer = tr->buffer;
172 event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET,
169 sizeof(*entry), 0, 0); 173 sizeof(*entry), 0, 0);
170 if (!event) 174 if (!event)
171 goto out; 175 goto out;
172 entry = ring_buffer_event_data(event); 176 entry = ring_buffer_event_data(event);
173 entry->boot_ret = *bt; 177 entry->boot_ret = *bt;
174 trace_buffer_unlock_commit(tr, event, 0, 0); 178 trace_buffer_unlock_commit(buffer, event, 0, 0);
175 out: 179 out:
176 preempt_enable(); 180 preempt_enable();
177} 181}