aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2010-07-01 23:07:32 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-07-20 21:52:33 -0400
commite870e9a1240bcef1157ffaaf71dac63362e71904 (patch)
treeb8f57a68ff45b35dab8cdfa474e17622d275bdc6 /kernel/trace
parentcc5edb0eb9ce892b530e34a5d110382483587942 (diff)
tracing: Allow to disable cmdline recording
We found that even enabling a single trace event that will rarely be triggered can add big overhead to context switch. (lmbench context switch test) ------------------------------------------------- 2p/0K 2p/16K 2p/64K 8p/16K 8p/64K 16p/16K 16p/64K ctxsw ctxsw ctxsw ctxsw ctxsw ctxsw ctxsw ------ ------ ------ ------ ------ ------- ------- 2.19 2.3 2.21 2.56 2.13 2.54 2.07 2.39 2.51 2.35 2.75 2.27 2.81 2.24 The overhead is 6% ~ 11%. It's because when a trace event is enabled 3 tracepoints (sched_switch, sched_wakeup, sched_wakeup_new) will be activated to map pid to cmdname. We'd like to avoid this overhead, so add a trace option '(no)record-cmd' to allow to disable cmdline recording. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> LKML-Reference: <4C2D57F4.2050204@cn.fujitsu.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_events.c30
3 files changed, 36 insertions, 3 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8683dec6946b..af9042977c08 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -344,7 +344,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
344/* trace_flags holds trace_options default values */ 344/* trace_flags holds trace_options default values */
345unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 345unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
346 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 346 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
347 TRACE_ITER_GRAPH_TIME; 347 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD;
348 348
349static int trace_stop_count; 349static int trace_stop_count;
350static DEFINE_SPINLOCK(tracing_start_lock); 350static DEFINE_SPINLOCK(tracing_start_lock);
@@ -428,6 +428,7 @@ static const char *trace_options[] = {
428 "latency-format", 428 "latency-format",
429 "sleep-time", 429 "sleep-time",
430 "graph-time", 430 "graph-time",
431 "record-cmd",
431 NULL 432 NULL
432}; 433};
433 434
@@ -2561,6 +2562,9 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2561 trace_flags |= mask; 2562 trace_flags |= mask;
2562 else 2563 else
2563 trace_flags &= ~mask; 2564 trace_flags &= ~mask;
2565
2566 if (mask == TRACE_ITER_RECORD_CMD)
2567 trace_event_enable_cmd_record(enabled);
2564} 2568}
2565 2569
2566static ssize_t 2570static ssize_t
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 84d3f123e86f..7778f067fc8b 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -591,6 +591,7 @@ enum trace_iterator_flags {
591 TRACE_ITER_LATENCY_FMT = 0x20000, 591 TRACE_ITER_LATENCY_FMT = 0x20000,
592 TRACE_ITER_SLEEP_TIME = 0x40000, 592 TRACE_ITER_SLEEP_TIME = 0x40000,
593 TRACE_ITER_GRAPH_TIME = 0x80000, 593 TRACE_ITER_GRAPH_TIME = 0x80000,
594 TRACE_ITER_RECORD_CMD = 0x100000,
594}; 595};
595 596
596/* 597/*
@@ -723,6 +724,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
723 return 0; 724 return 0;
724} 725}
725 726
727extern void trace_event_enable_cmd_record(bool enable);
728
726extern struct mutex event_mutex; 729extern struct mutex event_mutex;
727extern struct list_head ftrace_events; 730extern struct list_head ftrace_events;
728 731
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e8e6043f4d29..09b4fa6e4d3b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -170,6 +170,26 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
170} 170}
171EXPORT_SYMBOL_GPL(ftrace_event_reg); 171EXPORT_SYMBOL_GPL(ftrace_event_reg);
172 172
173void trace_event_enable_cmd_record(bool enable)
174{
175 struct ftrace_event_call *call;
176
177 mutex_lock(&event_mutex);
178 list_for_each_entry(call, &ftrace_events, list) {
179 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
180 continue;
181
182 if (enable) {
183 tracing_start_cmdline_record();
184 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
185 } else {
186 tracing_stop_cmdline_record();
187 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
188 }
189 }
190 mutex_unlock(&event_mutex);
191}
192
173static int ftrace_event_enable_disable(struct ftrace_event_call *call, 193static int ftrace_event_enable_disable(struct ftrace_event_call *call,
174 int enable) 194 int enable)
175{ 195{
@@ -179,13 +199,19 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
179 case 0: 199 case 0:
180 if (call->flags & TRACE_EVENT_FL_ENABLED) { 200 if (call->flags & TRACE_EVENT_FL_ENABLED) {
181 call->flags &= ~TRACE_EVENT_FL_ENABLED; 201 call->flags &= ~TRACE_EVENT_FL_ENABLED;
182 tracing_stop_cmdline_record(); 202 if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
203 tracing_stop_cmdline_record();
204 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
205 }
183 call->class->reg(call, TRACE_REG_UNREGISTER); 206 call->class->reg(call, TRACE_REG_UNREGISTER);
184 } 207 }
185 break; 208 break;
186 case 1: 209 case 1:
187 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { 210 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
188 tracing_start_cmdline_record(); 211 if (trace_flags & TRACE_ITER_RECORD_CMD) {
212 tracing_start_cmdline_record();
213 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
214 }
189 ret = call->class->reg(call, TRACE_REG_REGISTER); 215 ret = call->class->reg(call, TRACE_REG_REGISTER);
190 if (ret) { 216 if (ret) {
191 tracing_stop_cmdline_record(); 217 tracing_stop_cmdline_record();