aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-05-12 15:20:42 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 14:32:06 -0400
commitbc0c38d139ec7fcd5c030aea16b008f3732e42ac (patch)
treeadaaf5e2ca49bbd7025bc357f87df9cb47641022 /kernel/trace/trace.c
parent16444a8a40d4c7b4f6de34af0cae1f76a4f6c901 (diff)
ftrace: latency tracer infrastructure
This patch adds the latency tracer infrastructure. This patch does not add anything that will select and turn it on, but will be used by later patches. If it were to be compiled, it would add the following files to the debugfs: The root tracing directory: /debugfs/tracing/ This patch also adds the following files: available_tracers list of available tracers. Currently no tracers are available. Looking into this file only shows "none" which is used to unregister all tracers. current_tracer The trace that is currently active. Empty on start up. To switch to a tracer simply echo one of the tracers that are listed in available_tracers: example: (used with later patches) echo function > /debugfs/tracing/current_tracer To disable the tracer: echo disable > /debugfs/tracing/current_tracer tracing_enabled echoing "1" into this file starts the ftrace function tracing (if sysctl kernel.ftrace_enabled=1) echoing "0" turns it off. latency_trace This file is readonly and holds the result of the trace. trace This file outputs a easier to read version of the trace. iter_ctrl Controls the way the output of traces look. So far there's two controls: echoing in "symonly" will only show the kallsyms variables without the addresses (if kallsyms was configured) echoing in "verbose" will change the output to show a lot more data, but not very easy to understand by humans. echoing in "nosymonly" turns off symonly. echoing in "noverbose" turns off verbose. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c1547
1 files changed, 1547 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
new file mode 100644
index 000000000000..1b8eca7650d4
--- /dev/null
+++ b/kernel/trace/trace.c
@@ -0,0 +1,1547 @@
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
17#include <linux/debugfs.h>
18#include <linux/hardirq.h>
19#include <linux/linkage.h>
20#include <linux/uaccess.h>
21#include <linux/ftrace.h>
22#include <linux/module.h>
23#include <linux/percpu.h>
24#include <linux/ctype.h>
25#include <linux/init.h>
26#include <linux/gfp.h>
27#include <linux/fs.h>
28
29#include "trace.h"
30
31unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
32unsigned long __read_mostly tracing_thresh;
33
34static long notrace
35ns2usecs(cycle_t nsec)
36{
37 nsec += 500;
38 do_div(nsec, 1000);
39 return nsec;
40}
41
42static atomic_t tracer_counter;
43static struct trace_array global_trace;
44
45static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
46
47static struct trace_array max_tr;
48
49static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
50
51static int tracer_enabled;
52static unsigned long trace_nr_entries = 4096UL;
53
54static struct tracer *trace_types __read_mostly;
55static struct tracer *current_trace __read_mostly;
56static int max_tracer_type_len;
57
58static DEFINE_MUTEX(trace_types_lock);
59
60static int __init set_nr_entries(char *str)
61{
62 if (!str)
63 return 0;
64 trace_nr_entries = simple_strtoul(str, &str, 0);
65 return 1;
66}
67__setup("trace_entries=", set_nr_entries);
68
69enum trace_type {
70 __TRACE_FIRST_TYPE = 0,
71
72 TRACE_FN,
73 TRACE_CTX,
74
75 __TRACE_LAST_TYPE
76};
77
78enum trace_flag_type {
79 TRACE_FLAG_IRQS_OFF = 0x01,
80 TRACE_FLAG_NEED_RESCHED = 0x02,
81 TRACE_FLAG_HARDIRQ = 0x04,
82 TRACE_FLAG_SOFTIRQ = 0x08,
83};
84
85enum trace_iterator_flags {
86 TRACE_ITER_PRINT_PARENT = 0x01,
87 TRACE_ITER_SYM_OFFSET = 0x02,
88 TRACE_ITER_SYM_ADDR = 0x04,
89 TRACE_ITER_VERBOSE = 0x08,
90};
91
92#define TRACE_ITER_SYM_MASK \
93 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
94
95/* These must match the bit postions above */
96static const char *trace_options[] = {
97 "print-parent",
98 "sym-offset",
99 "sym-addr",
100 "verbose",
101 NULL
102};
103
104static unsigned trace_flags;
105
106
107/*
108 * Copy the new maximum trace into the separate maximum-trace
109 * structure. (this way the maximum trace is permanently saved,
110 * for later retrieval via /debugfs/tracing/latency_trace)
111 */
112static void notrace
113__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
114{
115 struct trace_array_cpu *data = tr->data[cpu];
116
117 max_tr.cpu = cpu;
118 max_tr.time_start = data->preempt_timestamp;
119
120 data = max_tr.data[cpu];
121 data->saved_latency = tracing_max_latency;
122
123 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
124 data->pid = tsk->pid;
125 data->uid = tsk->uid;
126 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
127 data->policy = tsk->policy;
128 data->rt_priority = tsk->rt_priority;
129
130 /* record this tasks comm */
131 tracing_record_cmdline(current);
132}
133
134notrace void
135update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
136{
137 struct trace_array_cpu *data;
138 void *save_trace;
139 int i;
140
141 /* clear out all the previous traces */
142 for_each_possible_cpu(i) {
143 data = tr->data[i];
144 save_trace = max_tr.data[i]->trace;
145 memcpy(max_tr.data[i], data, sizeof(*data));
146 data->trace = save_trace;
147 }
148
149 __update_max_tr(tr, tsk, cpu);
150}
151
152/**
153 * update_max_tr_single - only copy one trace over, and reset the rest
154 * @tr - tracer
155 * @tsk - task with the latency
156 * @cpu - the cpu of the buffer to copy.
157 */
158notrace void
159update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
160{
161 struct trace_array_cpu *data = tr->data[cpu];
162 void *save_trace;
163 int i;
164
165 for_each_possible_cpu(i)
166 tracing_reset(max_tr.data[i]);
167
168 save_trace = max_tr.data[cpu]->trace;
169 memcpy(max_tr.data[cpu], data, sizeof(*data));
170 data->trace = save_trace;
171
172 __update_max_tr(tr, tsk, cpu);
173}
174
175int register_tracer(struct tracer *type)
176{
177 struct tracer *t;
178 int len;
179 int ret = 0;
180
181 if (!type->name) {
182 pr_info("Tracer must have a name\n");
183 return -1;
184 }
185
186 mutex_lock(&trace_types_lock);
187 for (t = trace_types; t; t = t->next) {
188 if (strcmp(type->name, t->name) == 0) {
189 /* already found */
190 pr_info("Trace %s already registered\n",
191 type->name);
192 ret = -1;
193 goto out;
194 }
195 }
196
197 type->next = trace_types;
198 trace_types = type;
199 len = strlen(type->name);
200 if (len > max_tracer_type_len)
201 max_tracer_type_len = len;
202 out:
203 mutex_unlock(&trace_types_lock);
204
205 return ret;
206}
207
208void unregister_tracer(struct tracer *type)
209{
210 struct tracer **t;
211 int len;
212
213 mutex_lock(&trace_types_lock);
214 for (t = &trace_types; *t; t = &(*t)->next) {
215 if (*t == type)
216 goto found;
217 }
218 pr_info("Trace %s not registered\n", type->name);
219 goto out;
220
221 found:
222 *t = (*t)->next;
223 if (strlen(type->name) != max_tracer_type_len)
224 goto out;
225
226 max_tracer_type_len = 0;
227 for (t = &trace_types; *t; t = &(*t)->next) {
228 len = strlen((*t)->name);
229 if (len > max_tracer_type_len)
230 max_tracer_type_len = len;
231 }
232 out:
233 mutex_unlock(&trace_types_lock);
234}
235
236void notrace tracing_reset(struct trace_array_cpu *data)
237{
238 data->trace_idx = 0;
239 atomic_set(&data->underrun, 0);
240}
241
242#ifdef CONFIG_FTRACE
243static void notrace
244function_trace_call(unsigned long ip, unsigned long parent_ip)
245{
246 struct trace_array *tr = &global_trace;
247 struct trace_array_cpu *data;
248 unsigned long flags;
249 long disabled;
250 int cpu;
251
252 if (unlikely(!tracer_enabled))
253 return;
254
255 raw_local_irq_save(flags);
256 cpu = raw_smp_processor_id();
257 data = tr->data[cpu];
258 disabled = atomic_inc_return(&data->disabled);
259
260 if (likely(disabled == 1))
261 ftrace(tr, data, ip, parent_ip, flags);
262
263 atomic_dec(&data->disabled);
264 raw_local_irq_restore(flags);
265}
266
267static struct ftrace_ops trace_ops __read_mostly =
268{
269 .func = function_trace_call,
270};
271#endif
272
273notrace void tracing_start_function_trace(void)
274{
275 register_ftrace_function(&trace_ops);
276}
277
278notrace void tracing_stop_function_trace(void)
279{
280 unregister_ftrace_function(&trace_ops);
281}
282
283#define SAVED_CMDLINES 128
284static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
285static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
286static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
287static int cmdline_idx;
288static DEFINE_SPINLOCK(trace_cmdline_lock);
289atomic_t trace_record_cmdline_disabled;
290
291static void trace_init_cmdlines(void)
292{
293 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
294 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
295 cmdline_idx = 0;
296}
297
298notrace void trace_stop_cmdline_recording(void);
299
300static void notrace trace_save_cmdline(struct task_struct *tsk)
301{
302 unsigned map;
303 unsigned idx;
304
305 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
306 return;
307
308 /*
309 * It's not the end of the world if we don't get
310 * the lock, but we also don't want to spin
311 * nor do we want to disable interrupts,
312 * so if we miss here, then better luck next time.
313 */
314 if (!spin_trylock(&trace_cmdline_lock))
315 return;
316
317 idx = map_pid_to_cmdline[tsk->pid];
318 if (idx >= SAVED_CMDLINES) {
319 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
320
321 map = map_cmdline_to_pid[idx];
322 if (map <= PID_MAX_DEFAULT)
323 map_pid_to_cmdline[map] = (unsigned)-1;
324
325 map_pid_to_cmdline[tsk->pid] = idx;
326
327 cmdline_idx = idx;
328 }
329
330 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
331
332 spin_unlock(&trace_cmdline_lock);
333}
334
335static notrace char *trace_find_cmdline(int pid)
336{
337 char *cmdline = "<...>";
338 unsigned map;
339
340 if (!pid)
341 return "<idle>";
342
343 if (pid > PID_MAX_DEFAULT)
344 goto out;
345
346 map = map_pid_to_cmdline[pid];
347 if (map >= SAVED_CMDLINES)
348 goto out;
349
350 cmdline = saved_cmdlines[map];
351
352 out:
353 return cmdline;
354}
355
356notrace void tracing_record_cmdline(struct task_struct *tsk)
357{
358 if (atomic_read(&trace_record_cmdline_disabled))
359 return;
360
361 trace_save_cmdline(tsk);
362}
363
364static inline notrace struct trace_entry *
365tracing_get_trace_entry(struct trace_array *tr,
366 struct trace_array_cpu *data)
367{
368 unsigned long idx, idx_next;
369 struct trace_entry *entry;
370
371 idx = data->trace_idx;
372 idx_next = idx + 1;
373
374 if (unlikely(idx_next >= tr->entries)) {
375 atomic_inc(&data->underrun);
376 idx_next = 0;
377 }
378
379 data->trace_idx = idx_next;
380
381 if (unlikely(idx_next != 0 && atomic_read(&data->underrun)))
382 atomic_inc(&data->underrun);
383
384 entry = data->trace + idx * TRACE_ENTRY_SIZE;
385
386 return entry;
387}
388
389static inline notrace void
390tracing_generic_entry_update(struct trace_entry *entry,
391 unsigned long flags)
392{
393 struct task_struct *tsk = current;
394 unsigned long pc;
395
396 pc = preempt_count();
397
398 entry->idx = atomic_inc_return(&tracer_counter);
399 entry->preempt_count = pc & 0xff;
400 entry->pid = tsk->pid;
401 entry->t = now(raw_smp_processor_id());
402 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
403 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
404 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
405 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
406}
407
408notrace void
409ftrace(struct trace_array *tr, struct trace_array_cpu *data,
410 unsigned long ip, unsigned long parent_ip,
411 unsigned long flags)
412{
413 struct trace_entry *entry;
414
415 entry = tracing_get_trace_entry(tr, data);
416 tracing_generic_entry_update(entry, flags);
417 entry->type = TRACE_FN;
418 entry->fn.ip = ip;
419 entry->fn.parent_ip = parent_ip;
420}
421
422notrace void
423tracing_sched_switch_trace(struct trace_array *tr,
424 struct trace_array_cpu *data,
425 struct task_struct *prev, struct task_struct *next,
426 unsigned long flags)
427{
428 struct trace_entry *entry;
429
430 entry = tracing_get_trace_entry(tr, data);
431 tracing_generic_entry_update(entry, flags);
432 entry->type = TRACE_CTX;
433 entry->ctx.prev_pid = prev->pid;
434 entry->ctx.prev_prio = prev->prio;
435 entry->ctx.prev_state = prev->state;
436 entry->ctx.next_pid = next->pid;
437 entry->ctx.next_prio = next->prio;
438}
439
440enum trace_file_type {
441 TRACE_FILE_LAT_FMT = 1,
442};
443
444static struct trace_entry *
445trace_entry_idx(struct trace_array *tr, unsigned long idx, int cpu)
446{
447 struct trace_entry *array = tr->data[cpu]->trace;
448 unsigned long underrun;
449
450 if (idx >= tr->entries)
451 return NULL;
452
453 underrun = atomic_read(&tr->data[cpu]->underrun);
454 if (underrun)
455 idx = ((underrun - 1) + idx) % tr->entries;
456 else if (idx >= tr->data[cpu]->trace_idx)
457 return NULL;
458
459 return &array[idx];
460}
461
462static struct notrace trace_entry *
463find_next_entry(struct trace_iterator *iter, int *ent_cpu)
464{
465 struct trace_array *tr = iter->tr;
466 struct trace_entry *ent, *next = NULL;
467 int next_cpu = -1;
468 int cpu;
469
470 for_each_possible_cpu(cpu) {
471 if (!tr->data[cpu]->trace)
472 continue;
473 ent = trace_entry_idx(tr, iter->next_idx[cpu], cpu);
474 if (ent &&
475 (!next || (long)(next->idx - ent->idx) > 0)) {
476 next = ent;
477 next_cpu = cpu;
478 }
479 }
480
481 if (ent_cpu)
482 *ent_cpu = next_cpu;
483
484 return next;
485}
486
487static void *find_next_entry_inc(struct trace_iterator *iter)
488{
489 struct trace_entry *next;
490 int next_cpu = -1;
491
492 next = find_next_entry(iter, &next_cpu);
493
494 if (next) {
495 iter->next_idx[next_cpu]++;
496 iter->idx++;
497 }
498 iter->ent = next;
499 iter->cpu = next_cpu;
500
501 return next ? iter : NULL;
502}
503
504static void notrace *
505s_next(struct seq_file *m, void *v, loff_t *pos)
506{
507 struct trace_iterator *iter = m->private;
508 void *ent;
509 void *last_ent = iter->ent;
510 int i = (int)*pos;
511
512 (*pos)++;
513
514 /* can't go backwards */
515 if (iter->idx > i)
516 return NULL;
517
518 if (iter->idx < 0)
519 ent = find_next_entry_inc(iter);
520 else
521 ent = iter;
522
523 while (ent && iter->idx < i)
524 ent = find_next_entry_inc(iter);
525
526 iter->pos = *pos;
527
528 if (last_ent && !ent)
529 seq_puts(m, "\n\nvim:ft=help\n");
530
531 return ent;
532}
533
534static void *s_start(struct seq_file *m, loff_t *pos)
535{
536 struct trace_iterator *iter = m->private;
537 void *p = NULL;
538 loff_t l = 0;
539 int i;
540
541 mutex_lock(&trace_types_lock);
542
543 if (!current_trace || current_trace != iter->trace)
544 return NULL;
545
546 atomic_inc(&trace_record_cmdline_disabled);
547
548 /* let the tracer grab locks here if needed */
549 if (current_trace->start)
550 current_trace->start(iter);
551
552 if (*pos != iter->pos) {
553 iter->ent = NULL;
554 iter->cpu = 0;
555 iter->idx = -1;
556
557 for (i = 0; i < NR_CPUS; i++)
558 iter->next_idx[i] = 0;
559
560 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
561 ;
562
563 } else {
564 l = *pos;
565 p = s_next(m, p, &l);
566 }
567
568 return p;
569}
570
571static void s_stop(struct seq_file *m, void *p)
572{
573 struct trace_iterator *iter = m->private;
574
575 atomic_dec(&trace_record_cmdline_disabled);
576
577 /* let the tracer release locks here if needed */
578 if (current_trace && current_trace == iter->trace && iter->trace->stop)
579 iter->trace->stop(iter);
580
581 mutex_unlock(&trace_types_lock);
582}
583
584static void
585seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
586{
587#ifdef CONFIG_KALLSYMS
588 char str[KSYM_SYMBOL_LEN];
589
590 kallsyms_lookup(address, NULL, NULL, NULL, str);
591
592 seq_printf(m, fmt, str);
593#endif
594}
595
596static void
597seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
598{
599#ifdef CONFIG_KALLSYMS
600 char str[KSYM_SYMBOL_LEN];
601
602 sprint_symbol(str, address);
603 seq_printf(m, fmt, str);
604#endif
605}
606
607#ifndef CONFIG_64BIT
608# define IP_FMT "%08lx"
609#else
610# define IP_FMT "%016lx"
611#endif
612
613static void notrace
614seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
615{
616 if (!ip) {
617 seq_printf(m, "0");
618 return;
619 }
620
621 if (sym_flags & TRACE_ITER_SYM_OFFSET)
622 seq_print_sym_offset(m, "%s", ip);
623 else
624 seq_print_sym_short(m, "%s", ip);
625
626 if (sym_flags & TRACE_ITER_SYM_ADDR)
627 seq_printf(m, " <" IP_FMT ">", ip);
628}
629
630static void notrace print_lat_help_header(struct seq_file *m)
631{
632 seq_puts(m, "# _------=> CPU# \n");
633 seq_puts(m, "# / _-----=> irqs-off \n");
634 seq_puts(m, "# | / _----=> need-resched \n");
635 seq_puts(m, "# || / _---=> hardirq/softirq \n");
636 seq_puts(m, "# ||| / _--=> preempt-depth \n");
637 seq_puts(m, "# |||| / \n");
638 seq_puts(m, "# ||||| delay \n");
639 seq_puts(m, "# cmd pid ||||| time | caller \n");
640 seq_puts(m, "# \\ / ||||| \\ | / \n");
641}
642
643static void notrace print_func_help_header(struct seq_file *m)
644{
645 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
646 seq_puts(m, "# | | | | |\n");
647}
648
649
650static void notrace
651print_trace_header(struct seq_file *m, struct trace_iterator *iter)
652{
653 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
654 struct trace_array *tr = iter->tr;
655 struct trace_array_cpu *data = tr->data[tr->cpu];
656 struct tracer *type = current_trace;
657 unsigned long underruns = 0;
658 unsigned long underrun;
659 unsigned long entries = 0;
660 int cpu;
661 const char *name = "preemption";
662
663 if (type)
664 name = type->name;
665
666 for_each_possible_cpu(cpu) {
667 if (tr->data[cpu]->trace) {
668 underrun = atomic_read(&tr->data[cpu]->underrun);
669 if (underrun) {
670 underruns += underrun;
671 entries += tr->entries;
672 } else
673 entries += tr->data[cpu]->trace_idx;
674 }
675 }
676
677 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
678 name, UTS_RELEASE);
679 seq_puts(m, "-----------------------------------"
680 "---------------------------------\n");
681 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
682 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
683 data->saved_latency,
684 entries,
685 (entries + underruns),
686 tr->cpu,
687#if defined(CONFIG_PREEMPT_NONE)
688 "server",
689#elif defined(CONFIG_PREEMPT_VOLUNTARY)
690 "desktop",
691#elif defined(CONFIG_PREEMPT_DESKTOP)
692 "preempt",
693#else
694 "unknown",
695#endif
696 /* These are reserved for later use */
697 0, 0, 0, 0);
698#ifdef CONFIG_SMP
699 seq_printf(m, " #P:%d)\n", num_online_cpus());
700#else
701 seq_puts(m, ")\n");
702#endif
703 seq_puts(m, " -----------------\n");
704 seq_printf(m, " | task: %.16s-%d "
705 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
706 data->comm, data->pid, data->uid, data->nice,
707 data->policy, data->rt_priority);
708 seq_puts(m, " -----------------\n");
709
710 if (data->critical_start) {
711 seq_puts(m, " => started at: ");
712 seq_print_ip_sym(m, data->critical_start, sym_flags);
713 seq_puts(m, "\n => ended at: ");
714 seq_print_ip_sym(m, data->critical_end, sym_flags);
715 seq_puts(m, "\n");
716 }
717
718 seq_puts(m, "\n");
719}
720
721unsigned long nsecs_to_usecs(unsigned long nsecs)
722{
723 return nsecs / 1000;
724}
725
726static void notrace
727lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
728{
729 int hardirq, softirq;
730 char *comm;
731
732 comm = trace_find_cmdline(entry->pid);
733
734 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
735 seq_printf(m, "%d", cpu);
736 seq_printf(m, "%c%c",
737 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
738 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
739
740 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
741 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
742 if (hardirq && softirq)
743 seq_putc(m, 'H');
744 else {
745 if (hardirq)
746 seq_putc(m, 'h');
747 else {
748 if (softirq)
749 seq_putc(m, 's');
750 else
751 seq_putc(m, '.');
752 }
753 }
754
755 if (entry->preempt_count)
756 seq_printf(m, "%x", entry->preempt_count);
757 else
758 seq_puts(m, ".");
759}
760
761unsigned long preempt_mark_thresh = 100;
762
763static void notrace
764lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
765 unsigned long rel_usecs)
766{
767 seq_printf(m, " %4lldus", abs_usecs);
768 if (rel_usecs > preempt_mark_thresh)
769 seq_puts(m, "!: ");
770 else if (rel_usecs > 1)
771 seq_puts(m, "+: ");
772 else
773 seq_puts(m, " : ");
774}
775
776static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
777
778static void notrace
779print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
780 unsigned int trace_idx, int cpu)
781{
782 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
783 struct trace_entry *next_entry = find_next_entry(iter, NULL);
784 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
785 struct trace_entry *entry = iter->ent;
786 unsigned long abs_usecs;
787 unsigned long rel_usecs;
788 char *comm;
789 int S;
790
791 if (!next_entry)
792 next_entry = entry;
793 rel_usecs = ns2usecs(next_entry->t - entry->t);
794 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
795
796 if (verbose) {
797 comm = trace_find_cmdline(entry->pid);
798 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
799 " %ld.%03ldms (+%ld.%03ldms): ",
800 comm,
801 entry->pid, cpu, entry->flags,
802 entry->preempt_count, trace_idx,
803 ns2usecs(entry->t),
804 abs_usecs/1000,
805 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
806 } else {
807 lat_print_generic(m, entry, cpu);
808 lat_print_timestamp(m, abs_usecs, rel_usecs);
809 }
810 switch (entry->type) {
811 case TRACE_FN:
812 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
813 seq_puts(m, " (");
814 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
815 seq_puts(m, ")\n");
816 break;
817 case TRACE_CTX:
818 S = entry->ctx.prev_state < sizeof(state_to_char) ?
819 state_to_char[entry->ctx.prev_state] : 'X';
820 comm = trace_find_cmdline(entry->ctx.next_pid);
821 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
822 entry->ctx.prev_pid,
823 entry->ctx.prev_prio,
824 S,
825 entry->ctx.next_pid,
826 entry->ctx.next_prio,
827 comm);
828 break;
829 }
830}
831
832static void notrace
833print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
834{
835 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
836 struct trace_entry *entry = iter->ent;
837 unsigned long usec_rem;
838 unsigned long long t;
839 unsigned long secs;
840 char *comm;
841 int S;
842
843 comm = trace_find_cmdline(iter->ent->pid);
844
845 t = ns2usecs(entry->t);
846 usec_rem = do_div(t, 1000000ULL);
847 secs = (unsigned long)t;
848
849 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
850 seq_printf(m, "[%02d] ", iter->cpu);
851 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
852
853 switch (entry->type) {
854 case TRACE_FN:
855 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
856 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
857 entry->fn.parent_ip) {
858 seq_printf(m, " <-");
859 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
860 }
861 break;
862 case TRACE_CTX:
863 S = entry->ctx.prev_state < sizeof(state_to_char) ?
864 state_to_char[entry->ctx.prev_state] : 'X';
865 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
866 entry->ctx.prev_pid,
867 entry->ctx.prev_prio,
868 S,
869 entry->ctx.next_pid,
870 entry->ctx.next_prio);
871 break;
872 }
873 seq_printf(m, "\n");
874}
875
876static int trace_empty(struct trace_iterator *iter)
877{
878 struct trace_array_cpu *data;
879 int cpu;
880
881 for_each_possible_cpu(cpu) {
882 data = iter->tr->data[cpu];
883
884 if (data->trace &&
885 (data->trace_idx ||
886 atomic_read(&data->underrun)))
887 return 0;
888 }
889 return 1;
890}
891
892static int s_show(struct seq_file *m, void *v)
893{
894 struct trace_iterator *iter = v;
895
896 if (iter->ent == NULL) {
897 if (iter->tr) {
898 seq_printf(m, "# tracer: %s\n", iter->trace->name);
899 seq_puts(m, "#\n");
900 }
901 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
902 /* print nothing if the buffers are empty */
903 if (trace_empty(iter))
904 return 0;
905 print_trace_header(m, iter);
906 if (!(trace_flags & TRACE_ITER_VERBOSE))
907 print_lat_help_header(m);
908 } else {
909 if (!(trace_flags & TRACE_ITER_VERBOSE))
910 print_func_help_header(m);
911 }
912 } else {
913 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
914 print_lat_fmt(m, iter, iter->idx, iter->cpu);
915 else
916 print_trace_fmt(m, iter);
917 }
918
919 return 0;
920}
921
922static struct seq_operations tracer_seq_ops = {
923 .start = s_start,
924 .next = s_next,
925 .stop = s_stop,
926 .show = s_show,
927};
928
929static struct trace_iterator notrace *
930__tracing_open(struct inode *inode, struct file *file, int *ret)
931{
932 struct trace_iterator *iter;
933
934 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
935 if (!iter) {
936 *ret = -ENOMEM;
937 goto out;
938 }
939
940 mutex_lock(&trace_types_lock);
941 if (current_trace && current_trace->print_max)
942 iter->tr = &max_tr;
943 else
944 iter->tr = inode->i_private;
945 iter->trace = current_trace;
946 iter->pos = -1;
947
948 /* TODO stop tracer */
949 *ret = seq_open(file, &tracer_seq_ops);
950 if (!*ret) {
951 struct seq_file *m = file->private_data;
952 m->private = iter;
953
954 /* stop the trace while dumping */
955 if (iter->tr->ctrl)
956 tracer_enabled = 0;
957
958 if (iter->trace && iter->trace->open)
959 iter->trace->open(iter);
960 } else {
961 kfree(iter);
962 iter = NULL;
963 }
964 mutex_unlock(&trace_types_lock);
965
966 out:
967 return iter;
968}
969
970int tracing_open_generic(struct inode *inode, struct file *filp)
971{
972 filp->private_data = inode->i_private;
973 return 0;
974}
975
976int tracing_release(struct inode *inode, struct file *file)
977{
978 struct seq_file *m = (struct seq_file *)file->private_data;
979 struct trace_iterator *iter = m->private;
980
981 mutex_lock(&trace_types_lock);
982 if (iter->trace && iter->trace->close)
983 iter->trace->close(iter);
984
985 /* reenable tracing if it was previously enabled */
986 if (iter->tr->ctrl)
987 tracer_enabled = 1;
988 mutex_unlock(&trace_types_lock);
989
990 seq_release(inode, file);
991 kfree(iter);
992 return 0;
993}
994
995static int tracing_open(struct inode *inode, struct file *file)
996{
997 int ret;
998
999 __tracing_open(inode, file, &ret);
1000
1001 return ret;
1002}
1003
1004static int tracing_lt_open(struct inode *inode, struct file *file)
1005{
1006 struct trace_iterator *iter;
1007 int ret;
1008
1009 iter = __tracing_open(inode, file, &ret);
1010
1011 if (!ret)
1012 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1013
1014 return ret;
1015}
1016
1017
1018static void notrace *
1019t_next(struct seq_file *m, void *v, loff_t *pos)
1020{
1021 struct tracer *t = m->private;
1022
1023 (*pos)++;
1024
1025 if (t)
1026 t = t->next;
1027
1028 m->private = t;
1029
1030 return t;
1031}
1032
1033static void *t_start(struct seq_file *m, loff_t *pos)
1034{
1035 struct tracer *t = m->private;
1036 loff_t l = 0;
1037
1038 mutex_lock(&trace_types_lock);
1039 for (; t && l < *pos; t = t_next(m, t, &l))
1040 ;
1041
1042 return t;
1043}
1044
1045static void t_stop(struct seq_file *m, void *p)
1046{
1047 mutex_unlock(&trace_types_lock);
1048}
1049
1050static int t_show(struct seq_file *m, void *v)
1051{
1052 struct tracer *t = v;
1053
1054 if (!t)
1055 return 0;
1056
1057 seq_printf(m, "%s", t->name);
1058 if (t->next)
1059 seq_putc(m, ' ');
1060 else
1061 seq_putc(m, '\n');
1062
1063 return 0;
1064}
1065
1066static struct seq_operations show_traces_seq_ops = {
1067 .start = t_start,
1068 .next = t_next,
1069 .stop = t_stop,
1070 .show = t_show,
1071};
1072
1073static int show_traces_open(struct inode *inode, struct file *file)
1074{
1075 int ret;
1076
1077 ret = seq_open(file, &show_traces_seq_ops);
1078 if (!ret) {
1079 struct seq_file *m = file->private_data;
1080 m->private = trace_types;
1081 }
1082
1083 return ret;
1084}
1085
1086static struct file_operations tracing_fops = {
1087 .open = tracing_open,
1088 .read = seq_read,
1089 .llseek = seq_lseek,
1090 .release = tracing_release,
1091};
1092
1093static struct file_operations tracing_lt_fops = {
1094 .open = tracing_lt_open,
1095 .read = seq_read,
1096 .llseek = seq_lseek,
1097 .release = tracing_release,
1098};
1099
1100static struct file_operations show_traces_fops = {
1101 .open = show_traces_open,
1102 .read = seq_read,
1103 .release = seq_release,
1104};
1105
1106static ssize_t
1107tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1108 size_t cnt, loff_t *ppos)
1109{
1110 char *buf;
1111 int r = 0;
1112 int len = 0;
1113 int i;
1114
1115 /* calulate max size */
1116 for (i = 0; trace_options[i]; i++) {
1117 len += strlen(trace_options[i]);
1118 len += 3; /* "no" and space */
1119 }
1120
1121 /* +2 for \n and \0 */
1122 buf = kmalloc(len + 2, GFP_KERNEL);
1123 if (!buf)
1124 return -ENOMEM;
1125
1126 for (i = 0; trace_options[i]; i++) {
1127 if (trace_flags & (1 << i))
1128 r += sprintf(buf + r, "%s ", trace_options[i]);
1129 else
1130 r += sprintf(buf + r, "no%s ", trace_options[i]);
1131 }
1132
1133 r += sprintf(buf + r, "\n");
1134 WARN_ON(r >= len + 2);
1135
1136 r = simple_read_from_buffer(ubuf, cnt, ppos,
1137 buf, r);
1138
1139 kfree(buf);
1140
1141 return r;
1142}
1143
1144static ssize_t
1145tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1146 size_t cnt, loff_t *ppos)
1147{
1148 char buf[64];
1149 char *cmp = buf;
1150 int neg = 0;
1151 int i;
1152
1153 if (cnt > 63)
1154 cnt = 63;
1155
1156 if (copy_from_user(&buf, ubuf, cnt))
1157 return -EFAULT;
1158
1159 buf[cnt] = 0;
1160
1161 if (strncmp(buf, "no", 2) == 0) {
1162 neg = 1;
1163 cmp += 2;
1164 }
1165
1166 for (i = 0; trace_options[i]; i++) {
1167 int len = strlen(trace_options[i]);
1168
1169 if (strncmp(cmp, trace_options[i], len) == 0) {
1170 if (neg)
1171 trace_flags &= ~(1 << i);
1172 else
1173 trace_flags |= (1 << i);
1174 break;
1175 }
1176 }
1177
1178 filp->f_pos += cnt;
1179
1180 return cnt;
1181}
1182
1183static struct file_operations tracing_iter_fops = {
1184 .open = tracing_open_generic,
1185 .read = tracing_iter_ctrl_read,
1186 .write = tracing_iter_ctrl_write,
1187};
1188
1189static ssize_t
1190tracing_ctrl_read(struct file *filp, char __user *ubuf,
1191 size_t cnt, loff_t *ppos)
1192{
1193 struct trace_array *tr = filp->private_data;
1194 char buf[64];
1195 int r;
1196
1197 r = sprintf(buf, "%ld\n", tr->ctrl);
1198 return simple_read_from_buffer(ubuf, cnt, ppos,
1199 buf, r);
1200}
1201
1202static ssize_t
1203tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1204 size_t cnt, loff_t *ppos)
1205{
1206 struct trace_array *tr = filp->private_data;
1207 long val;
1208 char buf[64];
1209
1210 if (cnt > 63)
1211 cnt = 63;
1212
1213 if (copy_from_user(&buf, ubuf, cnt))
1214 return -EFAULT;
1215
1216 buf[cnt] = 0;
1217
1218 val = simple_strtoul(buf, NULL, 10);
1219
1220 val = !!val;
1221
1222 mutex_lock(&trace_types_lock);
1223 if (tr->ctrl ^ val) {
1224 if (val)
1225 tracer_enabled = 1;
1226 else
1227 tracer_enabled = 0;
1228
1229 tr->ctrl = val;
1230
1231 if (current_trace && current_trace->ctrl_update)
1232 current_trace->ctrl_update(tr);
1233 }
1234 mutex_unlock(&trace_types_lock);
1235
1236 filp->f_pos += cnt;
1237
1238 return cnt;
1239}
1240
1241static ssize_t
1242tracing_set_trace_read(struct file *filp, char __user *ubuf,
1243 size_t cnt, loff_t *ppos)
1244{
1245 char buf[max_tracer_type_len+2];
1246 int r;
1247
1248 mutex_lock(&trace_types_lock);
1249 if (current_trace)
1250 r = sprintf(buf, "%s\n", current_trace->name);
1251 else
1252 r = sprintf(buf, "\n");
1253 mutex_unlock(&trace_types_lock);
1254
1255 return simple_read_from_buffer(ubuf, cnt, ppos,
1256 buf, r);
1257}
1258
1259static ssize_t
1260tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1261 size_t cnt, loff_t *ppos)
1262{
1263 struct trace_array *tr = &global_trace;
1264 struct tracer *t;
1265 char buf[max_tracer_type_len+1];
1266 int i;
1267
1268 if (cnt > max_tracer_type_len)
1269 cnt = max_tracer_type_len;
1270
1271 if (copy_from_user(&buf, ubuf, cnt))
1272 return -EFAULT;
1273
1274 buf[cnt] = 0;
1275
1276 /* strip ending whitespace. */
1277 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1278 buf[i] = 0;
1279
1280 mutex_lock(&trace_types_lock);
1281 for (t = trace_types; t; t = t->next) {
1282 if (strcmp(t->name, buf) == 0)
1283 break;
1284 }
1285 if (!t || t == current_trace)
1286 goto out;
1287
1288 if (current_trace && current_trace->reset)
1289 current_trace->reset(tr);
1290
1291 current_trace = t;
1292 if (t->init)
1293 t->init(tr);
1294
1295 out:
1296 mutex_unlock(&trace_types_lock);
1297
1298 filp->f_pos += cnt;
1299
1300 return cnt;
1301}
1302
1303static ssize_t
1304tracing_max_lat_read(struct file *filp, char __user *ubuf,
1305 size_t cnt, loff_t *ppos)
1306{
1307 unsigned long *ptr = filp->private_data;
1308 char buf[64];
1309 int r;
1310
1311 r = snprintf(buf, 64, "%ld\n",
1312 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1313 if (r > 64)
1314 r = 64;
1315 return simple_read_from_buffer(ubuf, cnt, ppos,
1316 buf, r);
1317}
1318
1319static ssize_t
1320tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1321 size_t cnt, loff_t *ppos)
1322{
1323 long *ptr = filp->private_data;
1324 long val;
1325 char buf[64];
1326
1327 if (cnt > 63)
1328 cnt = 63;
1329
1330 if (copy_from_user(&buf, ubuf, cnt))
1331 return -EFAULT;
1332
1333 buf[cnt] = 0;
1334
1335 val = simple_strtoul(buf, NULL, 10);
1336
1337 *ptr = val * 1000;
1338
1339 return cnt;
1340}
1341
1342static struct file_operations tracing_max_lat_fops = {
1343 .open = tracing_open_generic,
1344 .read = tracing_max_lat_read,
1345 .write = tracing_max_lat_write,
1346};
1347
1348static struct file_operations tracing_ctrl_fops = {
1349 .open = tracing_open_generic,
1350 .read = tracing_ctrl_read,
1351 .write = tracing_ctrl_write,
1352};
1353
1354static struct file_operations set_tracer_fops = {
1355 .open = tracing_open_generic,
1356 .read = tracing_set_trace_read,
1357 .write = tracing_set_trace_write,
1358};
1359
1360#ifdef CONFIG_DYNAMIC_FTRACE
1361
1362static ssize_t
1363tracing_read_long(struct file *filp, char __user *ubuf,
1364 size_t cnt, loff_t *ppos)
1365{
1366 unsigned long *p = filp->private_data;
1367 char buf[64];
1368 int r;
1369
1370 r = sprintf(buf, "%ld\n", *p);
1371 return simple_read_from_buffer(ubuf, cnt, ppos,
1372 buf, r);
1373}
1374
1375static struct file_operations tracing_read_long_fops = {
1376 .open = tracing_open_generic,
1377 .read = tracing_read_long,
1378};
1379#endif
1380
1381static struct dentry *d_tracer;
1382
1383struct dentry *tracing_init_dentry(void)
1384{
1385 static int once;
1386
1387 if (d_tracer)
1388 return d_tracer;
1389
1390 d_tracer = debugfs_create_dir("tracing", NULL);
1391
1392 if (!d_tracer && !once) {
1393 once = 1;
1394 pr_warning("Could not create debugfs directory 'tracing'\n");
1395 return NULL;
1396 }
1397
1398 return d_tracer;
1399}
1400
1401static __init void tracer_init_debugfs(void)
1402{
1403 struct dentry *d_tracer;
1404 struct dentry *entry;
1405
1406 d_tracer = tracing_init_dentry();
1407
1408 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1409 &global_trace, &tracing_ctrl_fops);
1410 if (!entry)
1411 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1412
1413 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1414 NULL, &tracing_iter_fops);
1415 if (!entry)
1416 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1417
1418 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1419 &global_trace, &tracing_lt_fops);
1420 if (!entry)
1421 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1422
1423 entry = debugfs_create_file("trace", 0444, d_tracer,
1424 &global_trace, &tracing_fops);
1425 if (!entry)
1426 pr_warning("Could not create debugfs 'trace' entry\n");
1427
1428 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1429 &global_trace, &show_traces_fops);
1430 if (!entry)
1431 pr_warning("Could not create debugfs 'trace' entry\n");
1432
1433 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1434 &global_trace, &set_tracer_fops);
1435 if (!entry)
1436 pr_warning("Could not create debugfs 'trace' entry\n");
1437
1438 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1439 &tracing_max_latency,
1440 &tracing_max_lat_fops);
1441 if (!entry)
1442 pr_warning("Could not create debugfs "
1443 "'tracing_max_latency' entry\n");
1444
1445 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1446 &tracing_thresh, &tracing_max_lat_fops);
1447 if (!entry)
1448 pr_warning("Could not create debugfs "
1449 "'tracing_threash' entry\n");
1450
1451#ifdef CONFIG_DYNAMIC_FTRACE
1452 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1453 &ftrace_update_tot_cnt,
1454 &tracing_read_long_fops);
1455 if (!entry)
1456 pr_warning("Could not create debugfs "
1457 "'dyn_ftrace_total_info' entry\n");
1458#endif
1459}
1460
1461/* dummy trace to disable tracing */
1462static struct tracer no_tracer __read_mostly =
1463{
1464 .name = "none",
1465};
1466
1467static inline notrace int page_order(const unsigned long size)
1468{
1469 const unsigned long nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
1470 return ilog2(roundup_pow_of_two(nr_pages));
1471}
1472
1473__init static int tracer_alloc_buffers(void)
1474{
1475 const int order = page_order(trace_nr_entries * TRACE_ENTRY_SIZE);
1476 const unsigned long size = (1UL << order) << PAGE_SHIFT;
1477 struct trace_entry *array;
1478 int i;
1479
1480 for_each_possible_cpu(i) {
1481 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
1482 max_tr.data[i] = &per_cpu(max_data, i);
1483
1484 array = (struct trace_entry *)
1485 __get_free_pages(GFP_KERNEL, order);
1486 if (array == NULL) {
1487 printk(KERN_ERR "tracer: failed to allocate"
1488 " %ld bytes for trace buffer!\n", size);
1489 goto free_buffers;
1490 }
1491 global_trace.data[i]->trace = array;
1492
1493/* Only allocate if we are actually using the max trace */
1494#ifdef CONFIG_TRACER_MAX_TRACE
1495 array = (struct trace_entry *)
1496 __get_free_pages(GFP_KERNEL, order);
1497 if (array == NULL) {
1498 printk(KERN_ERR "wakeup tracer: failed to allocate"
1499 " %ld bytes for trace buffer!\n", size);
1500 goto free_buffers;
1501 }
1502 max_tr.data[i]->trace = array;
1503#endif
1504 }
1505
1506 /*
1507 * Since we allocate by orders of pages, we may be able to
1508 * round up a bit.
1509 */
1510 global_trace.entries = size / TRACE_ENTRY_SIZE;
1511 max_tr.entries = global_trace.entries;
1512
1513 pr_info("tracer: %ld bytes allocated for %ld",
1514 size, trace_nr_entries);
1515 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1516 pr_info(" actual entries %ld\n", global_trace.entries);
1517
1518 tracer_init_debugfs();
1519
1520 trace_init_cmdlines();
1521
1522 register_tracer(&no_tracer);
1523 current_trace = &no_tracer;
1524
1525 return 0;
1526
1527 free_buffers:
1528 for (i-- ; i >= 0; i--) {
1529 struct trace_array_cpu *data = global_trace.data[i];
1530
1531 if (data && data->trace) {
1532 free_pages((unsigned long)data->trace, order);
1533 data->trace = NULL;
1534 }
1535
1536#ifdef CONFIG_TRACER_MAX_TRACE
1537 data = max_tr.data[i];
1538 if (data && data->trace) {
1539 free_pages((unsigned long)data->trace, order);
1540 data->trace = NULL;
1541 }
1542#endif
1543 }
1544 return -ENOMEM;
1545}
1546
1547device_initcall(tracer_alloc_buffers);