aboutsummaryrefslogtreecommitdiffstats
path: root/block/blktrace.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2009-02-05 13:14:13 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-05 19:01:41 -0500
commit51a763dd84253bab1d0a1e68e11a7753d1b702ca (patch)
tree2cc2cf0509db480391c585786285267e360c1338 /block/blktrace.c
parent0a9877514c4fed10a70720293b37213dd172ee3e (diff)
tracing: Introduce trace_buffer_{lock_reserve,unlock_commit}
Impact: new API These new functions do what previously was being open coded, reducing the number of details ftrace plugin writers have to worry about. It also standardizes the handling of stacktrace, userstacktrace and other trace options we may introduce in the future. With this patch, for instance, the blk tracer (and some others already in the tree) can use the "userstacktrace" /d/tracing/trace_options facility. $ codiff /tmp/vmlinux.before /tmp/vmlinux.after linux-2.6-tip/kernel/trace/trace.c: trace_vprintk | -5 trace_graph_return | -22 trace_graph_entry | -26 trace_function | -45 __ftrace_trace_stack | -27 ftrace_trace_userstack | -29 tracing_sched_switch_trace | -66 tracing_stop | +1 trace_seq_to_user | -1 ftrace_trace_special | -63 ftrace_special | +1 tracing_sched_wakeup_trace | -70 tracing_reset_online_cpus | -1 13 functions changed, 2 bytes added, 355 bytes removed, diff: -353 linux-2.6-tip/block/blktrace.c: __blk_add_trace | -58 1 function changed, 58 bytes removed, diff: -58 linux-2.6-tip/kernel/trace/trace.c: trace_buffer_lock_reserve | +88 trace_buffer_unlock_commit | +86 2 functions changed, 174 bytes added, diff: +174 /tmp/vmlinux.after: 16 functions changed, 176 bytes added, 413 bytes removed, diff: -237 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Frédéric Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'block/blktrace.c')
-rw-r--r--block/blktrace.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/block/blktrace.c b/block/blktrace.c
index 8e52f24cc8f9..834cd84037b2 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -187,19 +187,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
187 cpu = raw_smp_processor_id(); 187 cpu = raw_smp_processor_id();
188 188
189 if (blk_tr) { 189 if (blk_tr) {
190 struct trace_entry *ent;
191 tracing_record_cmdline(current); 190 tracing_record_cmdline(current);
192 191
193 event = ring_buffer_lock_reserve(blk_tr->buffer, 192 pc = preempt_count();
194 sizeof(*t) + pdu_len); 193 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
194 sizeof(*t) + pdu_len,
195 0, pc);
195 if (!event) 196 if (!event)
196 return; 197 return;
197 198 t = ring_buffer_event_data(event);
198 ent = ring_buffer_event_data(event);
199 t = (struct blk_io_trace *)ent;
200 pc = preempt_count();
201 tracing_generic_entry_update(ent, 0, pc);
202 ent->type = TRACE_BLK;
203 goto record_it; 199 goto record_it;
204 } 200 }
205 201
@@ -241,12 +237,7 @@ record_it:
241 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); 237 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
242 238
243 if (blk_tr) { 239 if (blk_tr) {
244 ring_buffer_unlock_commit(blk_tr->buffer, event); 240 trace_buffer_unlock_commit(blk_tr, event, 0, pc);
245 if (pid != 0 &&
246 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) &&
247 (trace_flags & TRACE_ITER_STACKTRACE) != 0)
248 __trace_stack(blk_tr, 0, 5, pc);
249 trace_wake_up();
250 return; 241 return;
251 } 242 }
252 } 243 }