diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2010-07-01 23:07:32 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2010-07-20 21:52:33 -0400 |
commit | e870e9a1240bcef1157ffaaf71dac63362e71904 (patch) | |
tree | b8f57a68ff45b35dab8cdfa474e17622d275bdc6 /kernel/trace/trace_events.c | |
parent | cc5edb0eb9ce892b530e34a5d110382483587942 (diff) |
tracing: Allow to disable cmdline recording
We found that even enabling a single trace event that will rarely be
triggered can add big overhead to context switch.
(lmbench context switch test)
-------------------------------------------------
2p/0K 2p/16K 2p/64K 8p/16K 8p/64K 16p/16K 16p/64K
ctxsw ctxsw ctxsw ctxsw ctxsw ctxsw ctxsw
------ ------ ------ ------ ------ ------- -------
2.19 2.3 2.21 2.56 2.13 2.54 2.07
2.39 2.51 2.35 2.75 2.27 2.81 2.24
The overhead is 6% ~ 11%.
It's because when a trace event is enabled 3 tracepoints (sched_switch,
sched_wakeup, sched_wakeup_new) will be activated to map pid to cmdname.
We'd like to avoid this overhead, so add a trace option '(no)record-cmd'
to allow to disable cmdline recording.
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
LKML-Reference: <4C2D57F4.2050204@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r-- | kernel/trace/trace_events.c | 30 |
1 files changed, 28 insertions, 2 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e8e6043f4d29..09b4fa6e4d3b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -170,6 +170,26 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) | |||
170 | } | 170 | } |
171 | EXPORT_SYMBOL_GPL(ftrace_event_reg); | 171 | EXPORT_SYMBOL_GPL(ftrace_event_reg); |
172 | 172 | ||
173 | void trace_event_enable_cmd_record(bool enable) | ||
174 | { | ||
175 | struct ftrace_event_call *call; | ||
176 | |||
177 | mutex_lock(&event_mutex); | ||
178 | list_for_each_entry(call, &ftrace_events, list) { | ||
179 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) | ||
180 | continue; | ||
181 | |||
182 | if (enable) { | ||
183 | tracing_start_cmdline_record(); | ||
184 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; | ||
185 | } else { | ||
186 | tracing_stop_cmdline_record(); | ||
187 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; | ||
188 | } | ||
189 | } | ||
190 | mutex_unlock(&event_mutex); | ||
191 | } | ||
192 | |||
173 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, | 193 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, |
174 | int enable) | 194 | int enable) |
175 | { | 195 | { |
@@ -179,13 +199,19 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
179 | case 0: | 199 | case 0: |
180 | if (call->flags & TRACE_EVENT_FL_ENABLED) { | 200 | if (call->flags & TRACE_EVENT_FL_ENABLED) { |
181 | call->flags &= ~TRACE_EVENT_FL_ENABLED; | 201 | call->flags &= ~TRACE_EVENT_FL_ENABLED; |
182 | tracing_stop_cmdline_record(); | 202 | if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) { |
203 | tracing_stop_cmdline_record(); | ||
204 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; | ||
205 | } | ||
183 | call->class->reg(call, TRACE_REG_UNREGISTER); | 206 | call->class->reg(call, TRACE_REG_UNREGISTER); |
184 | } | 207 | } |
185 | break; | 208 | break; |
186 | case 1: | 209 | case 1: |
187 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { | 210 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { |
188 | tracing_start_cmdline_record(); | 211 | if (trace_flags & TRACE_ITER_RECORD_CMD) { |
212 | tracing_start_cmdline_record(); | ||
213 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; | ||
214 | } | ||
189 | ret = call->class->reg(call, TRACE_REG_REGISTER); | 215 | ret = call->class->reg(call, TRACE_REG_REGISTER); |
190 | if (ret) { | 216 | if (ret) { |
191 | tracing_stop_cmdline_record(); | 217 | tracing_stop_cmdline_record(); |