diff options
| author | Steven Rostedt <srostedt@redhat.com> | 2009-08-27 16:52:21 -0400 |
|---|---|---|
| committer | Steven Rostedt <rostedt@goodmis.org> | 2009-08-27 16:58:05 -0400 |
| commit | 5d4a9dba2d7fbab69f00dedd430d1788834a055a (patch) | |
| tree | a57ec209fc1ac86d7acbd67164b068ecdadd6317 | |
| parent | c0729be99cb2b9d9749256254f1c40a801835896 (diff) | |
tracing: only show tracing_max_latency when latency tracer configured
The tracing_max_latency file should only be present when one of the
latency tracers ({preempt|irqs}off, wakeup*) are enabled.
This patch also removes tracing_thresh when latency tracers are not
enabled, as well as compiles out code that is only used for latency
tracers.
Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
| -rw-r--r-- | kernel/trace/trace.c | 92 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 2 |
2 files changed, 52 insertions, 42 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 63dbc7ff213f..0f0881676dc9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -43,9 +43,6 @@ | |||
| 43 | 43 | ||
| 44 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 44 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
| 45 | 45 | ||
| 46 | unsigned long __read_mostly tracing_max_latency; | ||
| 47 | unsigned long __read_mostly tracing_thresh; | ||
| 48 | |||
| 49 | /* | 46 | /* |
| 50 | * On boot up, the ring buffer is set to the minimum size, so that | 47 | * On boot up, the ring buffer is set to the minimum size, so that |
| 51 | * we do not waste memory on systems that are not using tracing. | 48 | * we do not waste memory on systems that are not using tracing. |
| @@ -338,45 +335,6 @@ static struct { | |||
| 338 | 335 | ||
| 339 | int trace_clock_id; | 336 | int trace_clock_id; |
| 340 | 337 | ||
| 341 | /* | ||
| 342 | * ftrace_max_lock is used to protect the swapping of buffers | ||
| 343 | * when taking a max snapshot. The buffers themselves are | ||
| 344 | * protected by per_cpu spinlocks. But the action of the swap | ||
| 345 | * needs its own lock. | ||
| 346 | * | ||
| 347 | * This is defined as a raw_spinlock_t in order to help | ||
| 348 | * with performance when lockdep debugging is enabled. | ||
| 349 | */ | ||
| 350 | static raw_spinlock_t ftrace_max_lock = | ||
| 351 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
| 352 | |||
| 353 | /* | ||
| 354 | * Copy the new maximum trace into the separate maximum-trace | ||
| 355 | * structure. (this way the maximum trace is permanently saved, | ||
| 356 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | ||
| 357 | */ | ||
| 358 | static void | ||
| 359 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
| 360 | { | ||
| 361 | struct trace_array_cpu *data = tr->data[cpu]; | ||
| 362 | |||
| 363 | max_tr.cpu = cpu; | ||
| 364 | max_tr.time_start = data->preempt_timestamp; | ||
| 365 | |||
| 366 | data = max_tr.data[cpu]; | ||
| 367 | data->saved_latency = tracing_max_latency; | ||
| 368 | |||
| 369 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | ||
| 370 | data->pid = tsk->pid; | ||
| 371 | data->uid = task_uid(tsk); | ||
| 372 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | ||
| 373 | data->policy = tsk->policy; | ||
| 374 | data->rt_priority = tsk->rt_priority; | ||
| 375 | |||
| 376 | /* record this tasks comm */ | ||
| 377 | tracing_record_cmdline(tsk); | ||
| 378 | } | ||
| 379 | |||
| 380 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 338 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) |
| 381 | { | 339 | { |
| 382 | int len; | 340 | int len; |
| @@ -420,6 +378,53 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 420 | return cnt; | 378 | return cnt; |
| 421 | } | 379 | } |
| 422 | 380 | ||
| 381 | /* | ||
| 382 | * ftrace_max_lock is used to protect the swapping of buffers | ||
| 383 | * when taking a max snapshot. The buffers themselves are | ||
| 384 | * protected by per_cpu spinlocks. But the action of the swap | ||
| 385 | * needs its own lock. | ||
| 386 | * | ||
| 387 | * This is defined as a raw_spinlock_t in order to help | ||
| 388 | * with performance when lockdep debugging is enabled. | ||
| 389 | * | ||
| 390 | * It is also used in other places outside the update_max_tr | ||
| 391 | * so it needs to be defined outside of the | ||
| 392 | * CONFIG_TRACER_MAX_TRACE. | ||
| 393 | */ | ||
| 394 | static raw_spinlock_t ftrace_max_lock = | ||
| 395 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
| 396 | |||
| 397 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
| 398 | unsigned long __read_mostly tracing_max_latency; | ||
| 399 | unsigned long __read_mostly tracing_thresh; | ||
| 400 | |||
| 401 | /* | ||
| 402 | * Copy the new maximum trace into the separate maximum-trace | ||
| 403 | * structure. (this way the maximum trace is permanently saved, | ||
| 404 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | ||
| 405 | */ | ||
| 406 | static void | ||
| 407 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
| 408 | { | ||
| 409 | struct trace_array_cpu *data = tr->data[cpu]; | ||
| 410 | |||
| 411 | max_tr.cpu = cpu; | ||
| 412 | max_tr.time_start = data->preempt_timestamp; | ||
| 413 | |||
| 414 | data = max_tr.data[cpu]; | ||
| 415 | data->saved_latency = tracing_max_latency; | ||
| 416 | |||
| 417 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | ||
| 418 | data->pid = tsk->pid; | ||
| 419 | data->uid = task_uid(tsk); | ||
| 420 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | ||
| 421 | data->policy = tsk->policy; | ||
| 422 | data->rt_priority = tsk->rt_priority; | ||
| 423 | |||
| 424 | /* record this tasks comm */ | ||
| 425 | tracing_record_cmdline(tsk); | ||
| 426 | } | ||
| 427 | |||
| 423 | /** | 428 | /** |
| 424 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 429 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
| 425 | * @tr: tracer | 430 | * @tr: tracer |
| @@ -476,6 +481,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 476 | __update_max_tr(tr, tsk, cpu); | 481 | __update_max_tr(tr, tsk, cpu); |
| 477 | __raw_spin_unlock(&ftrace_max_lock); | 482 | __raw_spin_unlock(&ftrace_max_lock); |
| 478 | } | 483 | } |
| 484 | #endif /* CONFIG_TRACER_MAX_TRACE */ | ||
| 479 | 485 | ||
| 480 | /** | 486 | /** |
| 481 | * register_tracer - register a tracer with the ftrace system. | 487 | * register_tracer - register a tracer with the ftrace system. |
| @@ -3952,11 +3958,13 @@ static __init int tracer_init_debugfs(void) | |||
| 3952 | trace_create_file("current_tracer", 0644, d_tracer, | 3958 | trace_create_file("current_tracer", 0644, d_tracer, |
| 3953 | &global_trace, &set_tracer_fops); | 3959 | &global_trace, &set_tracer_fops); |
| 3954 | 3960 | ||
| 3961 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
| 3955 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 3962 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
| 3956 | &tracing_max_latency, &tracing_max_lat_fops); | 3963 | &tracing_max_latency, &tracing_max_lat_fops); |
| 3957 | 3964 | ||
| 3958 | trace_create_file("tracing_thresh", 0644, d_tracer, | 3965 | trace_create_file("tracing_thresh", 0644, d_tracer, |
| 3959 | &tracing_thresh, &tracing_max_lat_fops); | 3966 | &tracing_thresh, &tracing_max_lat_fops); |
| 3967 | #endif | ||
| 3960 | 3968 | ||
| 3961 | trace_create_file("README", 0444, d_tracer, | 3969 | trace_create_file("README", 0444, d_tracer, |
| 3962 | NULL, &tracing_readme_fops); | 3970 | NULL, &tracing_readme_fops); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 654fd657bd03..e2c06b21dd82 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -473,12 +473,14 @@ void unregister_tracer(struct tracer *type); | |||
| 473 | 473 | ||
| 474 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 474 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
| 475 | 475 | ||
| 476 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
| 476 | extern unsigned long tracing_max_latency; | 477 | extern unsigned long tracing_max_latency; |
| 477 | extern unsigned long tracing_thresh; | 478 | extern unsigned long tracing_thresh; |
| 478 | 479 | ||
| 479 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | 480 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
| 480 | void update_max_tr_single(struct trace_array *tr, | 481 | void update_max_tr_single(struct trace_array *tr, |
| 481 | struct task_struct *tsk, int cpu); | 482 | struct task_struct *tsk, int cpu); |
| 483 | #endif /* CONFIG_TRACER_MAX_TRACE */ | ||
| 482 | 484 | ||
| 483 | #ifdef CONFIG_STACKTRACE | 485 | #ifdef CONFIG_STACKTRACE |
| 484 | void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, | 486 | void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, |
