aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c92
1 files changed, 50 insertions, 42 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 63dbc7ff213f..0f0881676dc9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,9 +43,6 @@
43 43
44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
45 45
46unsigned long __read_mostly tracing_max_latency;
47unsigned long __read_mostly tracing_thresh;
48
49/* 46/*
50 * On boot up, the ring buffer is set to the minimum size, so that 47 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing. 48 * we do not waste memory on systems that are not using tracing.
@@ -338,45 +335,6 @@ static struct {
338 335
339int trace_clock_id; 336int trace_clock_id;
340 337
341/*
342 * ftrace_max_lock is used to protect the swapping of buffers
343 * when taking a max snapshot. The buffers themselves are
344 * protected by per_cpu spinlocks. But the action of the swap
345 * needs its own lock.
346 *
347 * This is defined as a raw_spinlock_t in order to help
348 * with performance when lockdep debugging is enabled.
349 */
350static raw_spinlock_t ftrace_max_lock =
351 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
352
353/*
354 * Copy the new maximum trace into the separate maximum-trace
355 * structure. (this way the maximum trace is permanently saved,
356 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
357 */
358static void
359__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
360{
361 struct trace_array_cpu *data = tr->data[cpu];
362
363 max_tr.cpu = cpu;
364 max_tr.time_start = data->preempt_timestamp;
365
366 data = max_tr.data[cpu];
367 data->saved_latency = tracing_max_latency;
368
369 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
370 data->pid = tsk->pid;
371 data->uid = task_uid(tsk);
372 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
373 data->policy = tsk->policy;
374 data->rt_priority = tsk->rt_priority;
375
376 /* record this tasks comm */
377 tracing_record_cmdline(tsk);
378}
379
380ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 338ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
381{ 339{
382 int len; 340 int len;
@@ -420,6 +378,53 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
420 return cnt; 378 return cnt;
421} 379}
422 380
381/*
382 * ftrace_max_lock is used to protect the swapping of buffers
383 * when taking a max snapshot. The buffers themselves are
384 * protected by per_cpu spinlocks. But the action of the swap
385 * needs its own lock.
386 *
387 * This is defined as a raw_spinlock_t in order to help
388 * with performance when lockdep debugging is enabled.
389 *
390 * It is also used in other places outside the update_max_tr
391 * so it needs to be defined outside of the
392 * CONFIG_TRACER_MAX_TRACE.
393 */
394static raw_spinlock_t ftrace_max_lock =
395 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
396
397#ifdef CONFIG_TRACER_MAX_TRACE
398unsigned long __read_mostly tracing_max_latency;
399unsigned long __read_mostly tracing_thresh;
400
401/*
402 * Copy the new maximum trace into the separate maximum-trace
403 * structure. (this way the maximum trace is permanently saved,
404 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
405 */
406static void
407__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
408{
409 struct trace_array_cpu *data = tr->data[cpu];
410
411 max_tr.cpu = cpu;
412 max_tr.time_start = data->preempt_timestamp;
413
414 data = max_tr.data[cpu];
415 data->saved_latency = tracing_max_latency;
416
417 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
418 data->pid = tsk->pid;
419 data->uid = task_uid(tsk);
420 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
421 data->policy = tsk->policy;
422 data->rt_priority = tsk->rt_priority;
423
424 /* record this tasks comm */
425 tracing_record_cmdline(tsk);
426}
427
423/** 428/**
424 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 429 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
425 * @tr: tracer 430 * @tr: tracer
@@ -476,6 +481,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
476 __update_max_tr(tr, tsk, cpu); 481 __update_max_tr(tr, tsk, cpu);
477 __raw_spin_unlock(&ftrace_max_lock); 482 __raw_spin_unlock(&ftrace_max_lock);
478} 483}
484#endif /* CONFIG_TRACER_MAX_TRACE */
479 485
480/** 486/**
481 * register_tracer - register a tracer with the ftrace system. 487 * register_tracer - register a tracer with the ftrace system.
@@ -3952,11 +3958,13 @@ static __init int tracer_init_debugfs(void)
3952 trace_create_file("current_tracer", 0644, d_tracer, 3958 trace_create_file("current_tracer", 0644, d_tracer,
3953 &global_trace, &set_tracer_fops); 3959 &global_trace, &set_tracer_fops);
3954 3960
3961#ifdef CONFIG_TRACER_MAX_TRACE
3955 trace_create_file("tracing_max_latency", 0644, d_tracer, 3962 trace_create_file("tracing_max_latency", 0644, d_tracer,
3956 &tracing_max_latency, &tracing_max_lat_fops); 3963 &tracing_max_latency, &tracing_max_lat_fops);
3957 3964
3958 trace_create_file("tracing_thresh", 0644, d_tracer, 3965 trace_create_file("tracing_thresh", 0644, d_tracer,
3959 &tracing_thresh, &tracing_max_lat_fops); 3966 &tracing_thresh, &tracing_max_lat_fops);
3967#endif
3960 3968
3961 trace_create_file("README", 0444, d_tracer, 3969 trace_create_file("README", 0444, d_tracer,
3962 NULL, &tracing_readme_fops); 3970 NULL, &tracing_readme_fops);