aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_power.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2009-02-05 13:14:13 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-05 19:01:41 -0500
commit51a763dd84253bab1d0a1e68e11a7753d1b702ca (patch)
tree2cc2cf0509db480391c585786285267e360c1338 /kernel/trace/trace_power.c
parent0a9877514c4fed10a70720293b37213dd172ee3e (diff)
tracing: Introduce trace_buffer_{lock_reserve,unlock_commit}
Impact: new API These new functions do what previously was being open coded, reducing the number of details ftrace plugin writers have to worry about. It also standardizes the handling of stacktrace, userstacktrace and other trace options we may introduce in the future. With this patch, for instance, the blk tracer (and some others already in the tree) can use the "userstacktrace" /d/tracing/trace_options facility. $ codiff /tmp/vmlinux.before /tmp/vmlinux.after linux-2.6-tip/kernel/trace/trace.c: trace_vprintk | -5 trace_graph_return | -22 trace_graph_entry | -26 trace_function | -45 __ftrace_trace_stack | -27 ftrace_trace_userstack | -29 tracing_sched_switch_trace | -66 tracing_stop | +1 trace_seq_to_user | -1 ftrace_trace_special | -63 ftrace_special | +1 tracing_sched_wakeup_trace | -70 tracing_reset_online_cpus | -1 13 functions changed, 2 bytes added, 355 bytes removed, diff: -353 linux-2.6-tip/block/blktrace.c: __blk_add_trace | -58 1 function changed, 58 bytes removed, diff: -58 linux-2.6-tip/kernel/trace/trace.c: trace_buffer_lock_reserve | +88 trace_buffer_unlock_commit | +86 2 functions changed, 174 bytes added, diff: +174 /tmp/vmlinux.after: 16 functions changed, 176 bytes added, 413 bytes removed, diff: -237 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Frédéric Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_power.c')
-rw-r--r--kernel/trace/trace_power.c20
1 files changed, 6 insertions, 14 deletions
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index 3b1a292d12d2..bfc21f8079ab 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -124,17 +124,13 @@ void trace_power_end(struct power_trace *it)
124 it->end = ktime_get(); 124 it->end = ktime_get();
125 data = tr->data[smp_processor_id()]; 125 data = tr->data[smp_processor_id()];
126 126
127 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); 127 event = trace_buffer_lock_reserve(tr, TRACE_POWER,
128 sizeof(*entry), 0, 0);
128 if (!event) 129 if (!event)
129 goto out; 130 goto out;
130 entry = ring_buffer_event_data(event); 131 entry = ring_buffer_event_data(event);
131 tracing_generic_entry_update(&entry->ent, 0, 0);
132 entry->ent.type = TRACE_POWER;
133 entry->state_data = *it; 132 entry->state_data = *it;
134 ring_buffer_unlock_commit(tr->buffer, event); 133 trace_buffer_unlock_commit(tr, event, 0, 0);
135
136 trace_wake_up();
137
138 out: 134 out:
139 preempt_enable(); 135 preempt_enable();
140} 136}
@@ -159,17 +155,13 @@ void trace_power_mark(struct power_trace *it, unsigned int type,
159 it->end = it->stamp; 155 it->end = it->stamp;
160 data = tr->data[smp_processor_id()]; 156 data = tr->data[smp_processor_id()];
161 157
162 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); 158 event = trace_buffer_lock_reserve(tr, TRACE_POWER,
159 sizeof(*entry), 0, 0);
163 if (!event) 160 if (!event)
164 goto out; 161 goto out;
165 entry = ring_buffer_event_data(event); 162 entry = ring_buffer_event_data(event);
166 tracing_generic_entry_update(&entry->ent, 0, 0);
167 entry->ent.type = TRACE_POWER;
168 entry->state_data = *it; 163 entry->state_data = *it;
169 ring_buffer_unlock_commit(tr->buffer, event); 164 trace_buffer_unlock_commit(tr, event, 0, 0);
170
171 trace_wake_up();
172
173 out: 165 out:
174 preempt_enable(); 166 preempt_enable();
175} 167}