aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c3878
1 files changed, 2224 insertions, 1654 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 17bb88d86ac2..874f2893cff0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -11,47 +11,58 @@
11 * Copyright (C) 2004-2006 Ingo Molnar 11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III 12 * Copyright (C) 2004 William Lee Irwin III
13 */ 13 */
14#include <linux/ring_buffer.h>
14#include <linux/utsrelease.h> 15#include <linux/utsrelease.h>
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
15#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
16#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/smp_lock.h>
17#include <linux/notifier.h> 21#include <linux/notifier.h>
22#include <linux/irqflags.h>
18#include <linux/debugfs.h> 23#include <linux/debugfs.h>
19#include <linux/pagemap.h> 24#include <linux/pagemap.h>
20#include <linux/hardirq.h> 25#include <linux/hardirq.h>
21#include <linux/linkage.h> 26#include <linux/linkage.h>
22#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/kprobes.h>
23#include <linux/ftrace.h> 29#include <linux/ftrace.h>
24#include <linux/module.h> 30#include <linux/module.h>
25#include <linux/percpu.h> 31#include <linux/percpu.h>
32#include <linux/splice.h>
26#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/string.h>
27#include <linux/ctype.h> 35#include <linux/ctype.h>
28#include <linux/init.h> 36#include <linux/init.h>
29#include <linux/poll.h> 37#include <linux/poll.h>
30#include <linux/gfp.h> 38#include <linux/gfp.h>
31#include <linux/fs.h> 39#include <linux/fs.h>
32#include <linux/kprobes.h>
33#include <linux/writeback.h>
34
35#include <linux/stacktrace.h>
36#include <linux/ring_buffer.h>
37#include <linux/irqflags.h>
38 40
39#include "trace.h" 41#include "trace.h"
42#include "trace_output.h"
40 43
41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
42 45
43unsigned long __read_mostly tracing_max_latency; 46/*
44unsigned long __read_mostly tracing_thresh; 47 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
50int ring_buffer_expanded;
45 51
46/* 52/*
47 * We need to change this state when a selftest is running. 53 * We need to change this state when a selftest is running.
48 * A selftest will lurk into the ring-buffer to count the 54 * A selftest will lurk into the ring-buffer to count the
49 * entries inserted during the selftest although some concurrent 55 * entries inserted during the selftest although some concurrent
50 * insertions into the ring-buffer such as ftrace_printk could occurred 56 * insertions into the ring-buffer such as trace_printk could occurred
51 * at the same time, giving false positive or negative results. 57 * at the same time, giving false positive or negative results.
52 */ 58 */
53static bool __read_mostly tracing_selftest_running; 59static bool __read_mostly tracing_selftest_running;
54 60
61/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
64bool __read_mostly tracing_selftest_disabled;
65
55/* For tracers that don't implement custom flags */ 66/* For tracers that don't implement custom flags */
56static struct tracer_opt dummy_tracer_opt[] = { 67static struct tracer_opt dummy_tracer_opt[] = {
57 { } 68 { }
@@ -73,9 +84,9 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
73 * of the tracer is successful. But that is the only place that sets 84 * of the tracer is successful. But that is the only place that sets
74 * this back to zero. 85 * this back to zero.
75 */ 86 */
76int tracing_disabled = 1; 87static int tracing_disabled = 1;
77 88
78static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 89DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
79 90
80static inline void ftrace_disable_cpu(void) 91static inline void ftrace_disable_cpu(void)
81{ 92{
@@ -91,6 +102,9 @@ static inline void ftrace_enable_cpu(void)
91 102
92static cpumask_var_t __read_mostly tracing_buffer_mask; 103static cpumask_var_t __read_mostly tracing_buffer_mask;
93 104
105/* Define which cpu buffers are currently read in trace_pipe */
106static cpumask_var_t tracing_reader_cpumask;
107
94#define for_each_tracing_cpu(cpu) \ 108#define for_each_tracing_cpu(cpu) \
95 for_each_cpu(cpu, tracing_buffer_mask) 109 for_each_cpu(cpu, tracing_buffer_mask)
96 110
@@ -109,14 +123,21 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
109 */ 123 */
110int ftrace_dump_on_oops; 124int ftrace_dump_on_oops;
111 125
112static int tracing_set_tracer(char *buf); 126static int tracing_set_tracer(const char *buf);
127
128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer;
113 131
114static int __init set_ftrace(char *str) 132static int __init set_cmdline_ftrace(char *str)
115{ 133{
116 tracing_set_tracer(str); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf;
136 /* We are using ftrace early, expand it */
137 ring_buffer_expanded = 1;
117 return 1; 138 return 1;
118} 139}
119__setup("ftrace", set_ftrace); 140__setup("ftrace=", set_cmdline_ftrace);
120 141
121static int __init set_ftrace_dump_on_oops(char *str) 142static int __init set_ftrace_dump_on_oops(char *str)
122{ 143{
@@ -125,21 +146,13 @@ static int __init set_ftrace_dump_on_oops(char *str)
125} 146}
126__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 147__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
127 148
128long 149unsigned long long ns2usecs(cycle_t nsec)
129ns2usecs(cycle_t nsec)
130{ 150{
131 nsec += 500; 151 nsec += 500;
132 do_div(nsec, 1000); 152 do_div(nsec, 1000);
133 return nsec; 153 return nsec;
134} 154}
135 155
136cycle_t ftrace_now(int cpu)
137{
138 u64 ts = ring_buffer_time_stamp(cpu);
139 ring_buffer_normalize_time_stamp(cpu, &ts);
140 return ts;
141}
142
143/* 156/*
144 * The global_trace is the descriptor that holds the tracing 157 * The global_trace is the descriptor that holds the tracing
145 * buffers for the live tracing. For each CPU, it contains 158 * buffers for the live tracing. For each CPU, it contains
@@ -156,6 +169,28 @@ static struct trace_array global_trace;
156 169
157static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 170static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
158 171
172int filter_current_check_discard(struct ring_buffer *buffer,
173 struct ftrace_event_call *call, void *rec,
174 struct ring_buffer_event *event)
175{
176 return filter_check_discard(call, rec, buffer, event);
177}
178EXPORT_SYMBOL_GPL(filter_current_check_discard);
179
180cycle_t ftrace_now(int cpu)
181{
182 u64 ts;
183
184 /* Early boot up does not have a buffer yet */
185 if (!global_trace.buffer)
186 return trace_clock_local();
187
188 ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
189 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
190
191 return ts;
192}
193
159/* 194/*
160 * The max_tr is used to snapshot the global_trace when a maximum 195 * The max_tr is used to snapshot the global_trace when a maximum
161 * latency is reached. Some tracers will use this to store a maximum 196 * latency is reached. Some tracers will use this to store a maximum
@@ -186,9 +221,6 @@ int tracing_is_enabled(void)
186 return tracer_enabled; 221 return tracer_enabled;
187} 222}
188 223
189/* function tracing enabled */
190int ftrace_function_enabled;
191
192/* 224/*
193 * trace_buf_size is the size in bytes that is allocated 225 * trace_buf_size is the size in bytes that is allocated
194 * for a buffer. Note, the number of bytes is always rounded 226 * for a buffer. Note, the number of bytes is always rounded
@@ -210,13 +242,6 @@ static struct tracer *trace_types __read_mostly;
210static struct tracer *current_trace __read_mostly; 242static struct tracer *current_trace __read_mostly;
211 243
212/* 244/*
213 * max_tracer_type_len is used to simplify the allocating of
214 * buffers to read userspace tracer names. We keep track of
215 * the longest tracer name registered.
216 */
217static int max_tracer_type_len;
218
219/*
220 * trace_types_lock is used to protect the trace_types list. 245 * trace_types_lock is used to protect the trace_types list.
221 * This lock is also used to keep user access serialized. 246 * This lock is also used to keep user access serialized.
222 * Accesses from userspace will grab this lock while userspace 247 * Accesses from userspace will grab this lock while userspace
@@ -229,7 +254,11 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
229 254
230/* trace_flags holds trace_options default values */ 255/* trace_flags holds trace_options default values */
231unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 256unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
232 TRACE_ITER_ANNOTATE; 257 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
258 TRACE_ITER_GRAPH_TIME;
259
260static int trace_stop_count;
261static DEFINE_SPINLOCK(tracing_start_lock);
233 262
234/** 263/**
235 * trace_wake_up - wake up tasks waiting for trace input 264 * trace_wake_up - wake up tasks waiting for trace input
@@ -239,24 +268,29 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
239 */ 268 */
240void trace_wake_up(void) 269void trace_wake_up(void)
241{ 270{
271 int cpu;
272
273 if (trace_flags & TRACE_ITER_BLOCK)
274 return;
242 /* 275 /*
243 * The runqueue_is_locked() can fail, but this is the best we 276 * The runqueue_is_locked() can fail, but this is the best we
244 * have for now: 277 * have for now:
245 */ 278 */
246 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) 279 cpu = get_cpu();
280 if (!runqueue_is_locked(cpu))
247 wake_up(&trace_wait); 281 wake_up(&trace_wait);
282 put_cpu();
248} 283}
249 284
250static int __init set_buf_size(char *str) 285static int __init set_buf_size(char *str)
251{ 286{
252 unsigned long buf_size; 287 unsigned long buf_size;
253 int ret;
254 288
255 if (!str) 289 if (!str)
256 return 0; 290 return 0;
257 ret = strict_strtoul(str, 0, &buf_size); 291 buf_size = memparse(str, &str);
258 /* nr_entries can not be zero */ 292 /* nr_entries can not be zero */
259 if (ret < 0 || buf_size == 0) 293 if (buf_size == 0)
260 return 0; 294 return 0;
261 trace_buf_size = buf_size; 295 trace_buf_size = buf_size;
262 return 1; 296 return 1;
@@ -280,192 +314,164 @@ static const char *trace_options[] = {
280 "block", 314 "block",
281 "stacktrace", 315 "stacktrace",
282 "sched-tree", 316 "sched-tree",
283 "ftrace_printk", 317 "trace_printk",
284 "ftrace_preempt", 318 "ftrace_preempt",
285 "branch", 319 "branch",
286 "annotate", 320 "annotate",
287 "userstacktrace", 321 "userstacktrace",
288 "sym-userobj", 322 "sym-userobj",
289 "printk-msg-only", 323 "printk-msg-only",
324 "context-info",
325 "latency-format",
326 "sleep-time",
327 "graph-time",
290 NULL 328 NULL
291}; 329};
292 330
293/* 331static struct {
294 * ftrace_max_lock is used to protect the swapping of buffers 332 u64 (*func)(void);
295 * when taking a max snapshot. The buffers themselves are 333 const char *name;
296 * protected by per_cpu spinlocks. But the action of the swap 334} trace_clocks[] = {
297 * needs its own lock. 335 { trace_clock_local, "local" },
298 * 336 { trace_clock_global, "global" },
299 * This is defined as a raw_spinlock_t in order to help 337};
300 * with performance when lockdep debugging is enabled. 338
301 */ 339int trace_clock_id;
302static raw_spinlock_t ftrace_max_lock =
303 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
304 340
305/* 341/*
306 * Copy the new maximum trace into the separate maximum-trace 342 * trace_parser_get_init - gets the buffer for trace parser
307 * structure. (this way the maximum trace is permanently saved,
308 * for later retrieval via /debugfs/tracing/latency_trace)
309 */ 343 */
310static void 344int trace_parser_get_init(struct trace_parser *parser, int size)
311__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
312{ 345{
313 struct trace_array_cpu *data = tr->data[cpu]; 346 memset(parser, 0, sizeof(*parser));
314 347
315 max_tr.cpu = cpu; 348 parser->buffer = kmalloc(size, GFP_KERNEL);
316 max_tr.time_start = data->preempt_timestamp; 349 if (!parser->buffer)
317 350 return 1;
318 data = max_tr.data[cpu];
319 data->saved_latency = tracing_max_latency;
320
321 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
322 data->pid = tsk->pid;
323 data->uid = task_uid(tsk);
324 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
325 data->policy = tsk->policy;
326 data->rt_priority = tsk->rt_priority;
327 351
328 /* record this tasks comm */ 352 parser->size = size;
329 tracing_record_cmdline(current); 353 return 0;
330} 354}
331 355
332/** 356/*
333 * trace_seq_printf - sequence printing of trace information 357 * trace_parser_put - frees the buffer for trace parser
334 * @s: trace sequence descriptor
335 * @fmt: printf format string
336 *
337 * The tracer may use either sequence operations or its own
338 * copy to user routines. To simplify formating of a trace
339 * trace_seq_printf is used to store strings into a special
340 * buffer (@s). Then the output may be either used by
341 * the sequencer or pulled into another buffer.
342 */ 358 */
343int 359void trace_parser_put(struct trace_parser *parser)
344trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
345{ 360{
346 int len = (PAGE_SIZE - 1) - s->len; 361 kfree(parser->buffer);
347 va_list ap;
348 int ret;
349
350 if (!len)
351 return 0;
352
353 va_start(ap, fmt);
354 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
355 va_end(ap);
356
357 /* If we can't write it all, don't bother writing anything */
358 if (ret >= len)
359 return 0;
360
361 s->len += ret;
362
363 return len;
364} 362}
365 363
366/** 364/*
367 * trace_seq_puts - trace sequence printing of simple string 365 * trace_get_user - reads the user input string separated by space
368 * @s: trace sequence descriptor 366 * (matched by isspace(ch))
369 * @str: simple string to record 367 *
368 * For each string found the 'struct trace_parser' is updated,
369 * and the function returns.
370 *
371 * Returns number of bytes read.
370 * 372 *
371 * The tracer may use either the sequence operations or its own 373 * See kernel/trace/trace.h for 'struct trace_parser' details.
372 * copy to user routines. This function records a simple string
373 * into a special buffer (@s) for later retrieval by a sequencer
374 * or other mechanism.
375 */ 374 */
376static int 375int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
377trace_seq_puts(struct trace_seq *s, const char *str) 376 size_t cnt, loff_t *ppos)
378{
379 int len = strlen(str);
380
381 if (len > ((PAGE_SIZE - 1) - s->len))
382 return 0;
383
384 memcpy(s->buffer + s->len, str, len);
385 s->len += len;
386
387 return len;
388}
389
390static int
391trace_seq_putc(struct trace_seq *s, unsigned char c)
392{ 377{
393 if (s->len >= (PAGE_SIZE - 1)) 378 char ch;
394 return 0; 379 size_t read = 0;
380 ssize_t ret;
395 381
396 s->buffer[s->len++] = c; 382 if (!*ppos)
383 trace_parser_clear(parser);
397 384
398 return 1; 385 ret = get_user(ch, ubuf++);
399} 386 if (ret)
387 goto out;
400 388
401static int 389 read++;
402trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) 390 cnt--;
403{
404 if (len > ((PAGE_SIZE - 1) - s->len))
405 return 0;
406 391
407 memcpy(s->buffer + s->len, mem, len); 392 /*
408 s->len += len; 393 * The parser is not finished with the last write,
394 * continue reading the user input without skipping spaces.
395 */
396 if (!parser->cont) {
397 /* skip white space */
398 while (cnt && isspace(ch)) {
399 ret = get_user(ch, ubuf++);
400 if (ret)
401 goto out;
402 read++;
403 cnt--;
404 }
409 405
410 return len; 406 /* only spaces were written */
411} 407 if (isspace(ch)) {
408 *ppos += read;
409 ret = read;
410 goto out;
411 }
412 412
413#define MAX_MEMHEX_BYTES 8 413 parser->idx = 0;
414#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) 414 }
415 415
416static int 416 /* read the non-space input */
417trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) 417 while (cnt && !isspace(ch)) {
418{ 418 if (parser->idx < parser->size - 1)
419 unsigned char hex[HEX_CHARS]; 419 parser->buffer[parser->idx++] = ch;
420 unsigned char *data = mem; 420 else {
421 int i, j; 421 ret = -EINVAL;
422 goto out;
423 }
424 ret = get_user(ch, ubuf++);
425 if (ret)
426 goto out;
427 read++;
428 cnt--;
429 }
422 430
423#ifdef __BIG_ENDIAN 431 /* We either got finished input or we have to wait for another call. */
424 for (i = 0, j = 0; i < len; i++) { 432 if (isspace(ch)) {
425#else 433 parser->buffer[parser->idx] = 0;
426 for (i = len-1, j = 0; i >= 0; i--) { 434 parser->cont = false;
427#endif 435 } else {
428 hex[j++] = hex_asc_hi(data[i]); 436 parser->cont = true;
429 hex[j++] = hex_asc_lo(data[i]); 437 parser->buffer[parser->idx++] = ch;
430 } 438 }
431 hex[j++] = ' ';
432 439
433 return trace_seq_putmem(s, hex, j); 440 *ppos += read;
441 ret = read;
442
443out:
444 return ret;
434} 445}
435 446
436static int 447ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
437trace_seq_path(struct trace_seq *s, struct path *path)
438{ 448{
439 unsigned char *p; 449 int len;
450 int ret;
440 451
441 if (s->len >= (PAGE_SIZE - 1)) 452 if (!cnt)
442 return 0; 453 return 0;
443 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
444 if (!IS_ERR(p)) {
445 p = mangle_path(s->buffer + s->len, p, "\n");
446 if (p) {
447 s->len = p - s->buffer;
448 return 1;
449 }
450 } else {
451 s->buffer[s->len++] = '?';
452 return 1;
453 }
454 454
455 return 0; 455 if (s->len <= s->readpos)
456} 456 return -EBUSY;
457 457
458static void 458 len = s->len - s->readpos;
459trace_seq_reset(struct trace_seq *s) 459 if (cnt > len)
460{ 460 cnt = len;
461 s->len = 0; 461 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
462 s->readpos = 0; 462 if (ret == cnt)
463 return -EFAULT;
464
465 cnt -= ret;
466
467 s->readpos += cnt;
468 return cnt;
463} 469}
464 470
465ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 471static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
466{ 472{
467 int len; 473 int len;
468 int ret; 474 void *ret;
469 475
470 if (s->len <= s->readpos) 476 if (s->len <= s->readpos)
471 return -EBUSY; 477 return -EBUSY;
@@ -473,23 +479,62 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
473 len = s->len - s->readpos; 479 len = s->len - s->readpos;
474 if (cnt > len) 480 if (cnt > len)
475 cnt = len; 481 cnt = len;
476 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 482 ret = memcpy(buf, s->buffer + s->readpos, cnt);
477 if (ret) 483 if (!ret)
478 return -EFAULT; 484 return -EFAULT;
479 485
480 s->readpos += len; 486 s->readpos += cnt;
481 return cnt; 487 return cnt;
482} 488}
483 489
490/*
491 * ftrace_max_lock is used to protect the swapping of buffers
492 * when taking a max snapshot. The buffers themselves are
493 * protected by per_cpu spinlocks. But the action of the swap
494 * needs its own lock.
495 *
496 * This is defined as a raw_spinlock_t in order to help
497 * with performance when lockdep debugging is enabled.
498 *
499 * It is also used in other places outside the update_max_tr
500 * so it needs to be defined outside of the
501 * CONFIG_TRACER_MAX_TRACE.
502 */
503static raw_spinlock_t ftrace_max_lock =
504 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
505
506#ifdef CONFIG_TRACER_MAX_TRACE
507unsigned long __read_mostly tracing_max_latency;
508unsigned long __read_mostly tracing_thresh;
509
510/*
511 * Copy the new maximum trace into the separate maximum-trace
512 * structure. (this way the maximum trace is permanently saved,
513 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
514 */
484static void 515static void
485trace_print_seq(struct seq_file *m, struct trace_seq *s) 516__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
486{ 517{
487 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; 518 struct trace_array_cpu *data = tr->data[cpu];
519 struct trace_array_cpu *max_data = tr->data[cpu];
488 520
489 s->buffer[len] = 0; 521 max_tr.cpu = cpu;
490 seq_puts(m, s->buffer); 522 max_tr.time_start = data->preempt_timestamp;
523
524 max_data = max_tr.data[cpu];
525 max_data->saved_latency = tracing_max_latency;
526 max_data->critical_start = data->critical_start;
527 max_data->critical_end = data->critical_end;
491 528
492 trace_seq_reset(s); 529 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
530 max_data->pid = tsk->pid;
531 max_data->uid = task_uid(tsk);
532 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
533 max_data->policy = tsk->policy;
534 max_data->rt_priority = tsk->rt_priority;
535
536 /* record this tasks comm */
537 tracing_record_cmdline(tsk);
493} 538}
494 539
495/** 540/**
@@ -506,16 +551,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
506{ 551{
507 struct ring_buffer *buf = tr->buffer; 552 struct ring_buffer *buf = tr->buffer;
508 553
554 if (trace_stop_count)
555 return;
556
509 WARN_ON_ONCE(!irqs_disabled()); 557 WARN_ON_ONCE(!irqs_disabled());
510 __raw_spin_lock(&ftrace_max_lock); 558 __raw_spin_lock(&ftrace_max_lock);
511 559
512 tr->buffer = max_tr.buffer; 560 tr->buffer = max_tr.buffer;
513 max_tr.buffer = buf; 561 max_tr.buffer = buf;
514 562
515 ftrace_disable_cpu();
516 ring_buffer_reset(tr->buffer);
517 ftrace_enable_cpu();
518
519 __update_max_tr(tr, tsk, cpu); 563 __update_max_tr(tr, tsk, cpu);
520 __raw_spin_unlock(&ftrace_max_lock); 564 __raw_spin_unlock(&ftrace_max_lock);
521} 565}
@@ -533,21 +577,35 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
533{ 577{
534 int ret; 578 int ret;
535 579
580 if (trace_stop_count)
581 return;
582
536 WARN_ON_ONCE(!irqs_disabled()); 583 WARN_ON_ONCE(!irqs_disabled());
537 __raw_spin_lock(&ftrace_max_lock); 584 __raw_spin_lock(&ftrace_max_lock);
538 585
539 ftrace_disable_cpu(); 586 ftrace_disable_cpu();
540 587
541 ring_buffer_reset(max_tr.buffer);
542 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 588 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
543 589
590 if (ret == -EBUSY) {
591 /*
592 * We failed to swap the buffer due to a commit taking
593 * place on this CPU. We fail to record, but we reset
594 * the max trace buffer (no one writes directly to it)
595 * and flag that it failed.
596 */
597 trace_array_printk(&max_tr, _THIS_IP_,
598 "Failed to swap buffers due to commit in progress\n");
599 }
600
544 ftrace_enable_cpu(); 601 ftrace_enable_cpu();
545 602
546 WARN_ON_ONCE(ret); 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
547 604
548 __update_max_tr(tr, tsk, cpu); 605 __update_max_tr(tr, tsk, cpu);
549 __raw_spin_unlock(&ftrace_max_lock); 606 __raw_spin_unlock(&ftrace_max_lock);
550} 607}
608#endif /* CONFIG_TRACER_MAX_TRACE */
551 609
552/** 610/**
553 * register_tracer - register a tracer with the ftrace system. 611 * register_tracer - register a tracer with the ftrace system.
@@ -556,9 +614,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
556 * Register a new plugin tracer. 614 * Register a new plugin tracer.
557 */ 615 */
558int register_tracer(struct tracer *type) 616int register_tracer(struct tracer *type)
617__releases(kernel_lock)
618__acquires(kernel_lock)
559{ 619{
560 struct tracer *t; 620 struct tracer *t;
561 int len;
562 int ret = 0; 621 int ret = 0;
563 622
564 if (!type->name) { 623 if (!type->name) {
@@ -566,6 +625,11 @@ int register_tracer(struct tracer *type)
566 return -1; 625 return -1;
567 } 626 }
568 627
628 if (strlen(type->name) > MAX_TRACER_SIZE) {
629 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
630 return -1;
631 }
632
569 /* 633 /*
570 * When this gets called we hold the BKL which means that 634 * When this gets called we hold the BKL which means that
571 * preemption is disabled. Various trace selftests however 635 * preemption is disabled. Various trace selftests however
@@ -580,7 +644,7 @@ int register_tracer(struct tracer *type)
580 for (t = trace_types; t; t = t->next) { 644 for (t = trace_types; t; t = t->next) {
581 if (strcmp(type->name, t->name) == 0) { 645 if (strcmp(type->name, t->name) == 0) {
582 /* already found */ 646 /* already found */
583 pr_info("Trace %s already registered\n", 647 pr_info("Tracer %s already registered\n",
584 type->name); 648 type->name);
585 ret = -1; 649 ret = -1;
586 goto out; 650 goto out;
@@ -594,12 +658,14 @@ int register_tracer(struct tracer *type)
594 else 658 else
595 if (!type->flags->opts) 659 if (!type->flags->opts)
596 type->flags->opts = dummy_tracer_opt; 660 type->flags->opts = dummy_tracer_opt;
661 if (!type->wait_pipe)
662 type->wait_pipe = default_wait_pipe;
663
597 664
598#ifdef CONFIG_FTRACE_STARTUP_TEST 665#ifdef CONFIG_FTRACE_STARTUP_TEST
599 if (type->selftest) { 666 if (type->selftest && !tracing_selftest_disabled) {
600 struct tracer *saved_tracer = current_trace; 667 struct tracer *saved_tracer = current_trace;
601 struct trace_array *tr = &global_trace; 668 struct trace_array *tr = &global_trace;
602 int i;
603 669
604 /* 670 /*
605 * Run a selftest on this tracer. 671 * Run a selftest on this tracer.
@@ -608,8 +674,7 @@ int register_tracer(struct tracer *type)
608 * internal tracing to verify that everything is in order. 674 * internal tracing to verify that everything is in order.
609 * If we fail, we do not register this tracer. 675 * If we fail, we do not register this tracer.
610 */ 676 */
611 for_each_tracing_cpu(i) 677 tracing_reset_online_cpus(tr);
612 tracing_reset(tr, i);
613 678
614 current_trace = type; 679 current_trace = type;
615 /* the test is responsible for initializing and enabling */ 680 /* the test is responsible for initializing and enabling */
@@ -622,8 +687,7 @@ int register_tracer(struct tracer *type)
622 goto out; 687 goto out;
623 } 688 }
624 /* Only reset on passing, to avoid touching corrupted buffers */ 689 /* Only reset on passing, to avoid touching corrupted buffers */
625 for_each_tracing_cpu(i) 690 tracing_reset_online_cpus(tr);
626 tracing_reset(tr, i);
627 691
628 printk(KERN_CONT "PASSED\n"); 692 printk(KERN_CONT "PASSED\n");
629 } 693 }
@@ -631,82 +695,129 @@ int register_tracer(struct tracer *type)
631 695
632 type->next = trace_types; 696 type->next = trace_types;
633 trace_types = type; 697 trace_types = type;
634 len = strlen(type->name);
635 if (len > max_tracer_type_len)
636 max_tracer_type_len = len;
637 698
638 out: 699 out:
639 tracing_selftest_running = false; 700 tracing_selftest_running = false;
640 mutex_unlock(&trace_types_lock); 701 mutex_unlock(&trace_types_lock);
641 lock_kernel();
642 702
703 if (ret || !default_bootup_tracer)
704 goto out_unlock;
705
706 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
707 goto out_unlock;
708
709 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
710 /* Do we want this tracer to start on bootup? */
711 tracing_set_tracer(type->name);
712 default_bootup_tracer = NULL;
713 /* disable other selftests, since this will break it. */
714 tracing_selftest_disabled = 1;
715#ifdef CONFIG_FTRACE_STARTUP_TEST
716 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
717 type->name);
718#endif
719
720 out_unlock:
721 lock_kernel();
643 return ret; 722 return ret;
644} 723}
645 724
646void unregister_tracer(struct tracer *type) 725void unregister_tracer(struct tracer *type)
647{ 726{
648 struct tracer **t; 727 struct tracer **t;
649 int len;
650 728
651 mutex_lock(&trace_types_lock); 729 mutex_lock(&trace_types_lock);
652 for (t = &trace_types; *t; t = &(*t)->next) { 730 for (t = &trace_types; *t; t = &(*t)->next) {
653 if (*t == type) 731 if (*t == type)
654 goto found; 732 goto found;
655 } 733 }
656 pr_info("Trace %s not registered\n", type->name); 734 pr_info("Tracer %s not registered\n", type->name);
657 goto out; 735 goto out;
658 736
659 found: 737 found:
660 *t = (*t)->next; 738 *t = (*t)->next;
661 if (strlen(type->name) != max_tracer_type_len)
662 goto out;
663 739
664 max_tracer_type_len = 0; 740 if (type == current_trace && tracer_enabled) {
665 for (t = &trace_types; *t; t = &(*t)->next) { 741 tracer_enabled = 0;
666 len = strlen((*t)->name); 742 tracing_stop();
667 if (len > max_tracer_type_len) 743 if (current_trace->stop)
668 max_tracer_type_len = len; 744 current_trace->stop(&global_trace);
745 current_trace = &nop_trace;
669 } 746 }
670 out: 747out:
671 mutex_unlock(&trace_types_lock); 748 mutex_unlock(&trace_types_lock);
672} 749}
673 750
674void tracing_reset(struct trace_array *tr, int cpu) 751static void __tracing_reset(struct trace_array *tr, int cpu)
675{ 752{
676 ftrace_disable_cpu(); 753 ftrace_disable_cpu();
677 ring_buffer_reset_cpu(tr->buffer, cpu); 754 ring_buffer_reset_cpu(tr->buffer, cpu);
678 ftrace_enable_cpu(); 755 ftrace_enable_cpu();
679} 756}
680 757
758void tracing_reset(struct trace_array *tr, int cpu)
759{
760 struct ring_buffer *buffer = tr->buffer;
761
762 ring_buffer_record_disable(buffer);
763
764 /* Make sure all commits have finished */
765 synchronize_sched();
766 __tracing_reset(tr, cpu);
767
768 ring_buffer_record_enable(buffer);
769}
770
681void tracing_reset_online_cpus(struct trace_array *tr) 771void tracing_reset_online_cpus(struct trace_array *tr)
682{ 772{
773 struct ring_buffer *buffer = tr->buffer;
683 int cpu; 774 int cpu;
684 775
776 ring_buffer_record_disable(buffer);
777
778 /* Make sure all commits have finished */
779 synchronize_sched();
780
685 tr->time_start = ftrace_now(tr->cpu); 781 tr->time_start = ftrace_now(tr->cpu);
686 782
687 for_each_online_cpu(cpu) 783 for_each_online_cpu(cpu)
688 tracing_reset(tr, cpu); 784 __tracing_reset(tr, cpu);
785
786 ring_buffer_record_enable(buffer);
787}
788
789void tracing_reset_current(int cpu)
790{
791 tracing_reset(&global_trace, cpu);
792}
793
794void tracing_reset_current_online_cpus(void)
795{
796 tracing_reset_online_cpus(&global_trace);
689} 797}
690 798
691#define SAVED_CMDLINES 128 799#define SAVED_CMDLINES 128
800#define NO_CMDLINE_MAP UINT_MAX
692static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 801static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
693static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 802static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
694static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 803static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
695static int cmdline_idx; 804static int cmdline_idx;
696static DEFINE_SPINLOCK(trace_cmdline_lock); 805static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
697 806
698/* temporary disable recording */ 807/* temporary disable recording */
699atomic_t trace_record_cmdline_disabled __read_mostly; 808static atomic_t trace_record_cmdline_disabled __read_mostly;
700 809
701static void trace_init_cmdlines(void) 810static void trace_init_cmdlines(void)
702{ 811{
703 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline)); 812 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
704 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid)); 813 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
705 cmdline_idx = 0; 814 cmdline_idx = 0;
706} 815}
707 816
708static int trace_stop_count; 817int is_tracing_stopped(void)
709static DEFINE_SPINLOCK(tracing_start_lock); 818{
819 return trace_stop_count;
820}
710 821
711/** 822/**
712 * ftrace_off_permanent - disable all ftrace code permanently 823 * ftrace_off_permanent - disable all ftrace code permanently
@@ -738,13 +849,12 @@ void tracing_start(void)
738 return; 849 return;
739 850
740 spin_lock_irqsave(&tracing_start_lock, flags); 851 spin_lock_irqsave(&tracing_start_lock, flags);
741 if (--trace_stop_count) 852 if (--trace_stop_count) {
742 goto out; 853 if (trace_stop_count < 0) {
743 854 /* Someone screwed up their debugging */
744 if (trace_stop_count < 0) { 855 WARN_ON_ONCE(1);
745 /* Someone screwed up their debugging */ 856 trace_stop_count = 0;
746 WARN_ON_ONCE(1); 857 }
747 trace_stop_count = 0;
748 goto out; 858 goto out;
749 } 859 }
750 860
@@ -794,8 +904,7 @@ void trace_stop_cmdline_recording(void);
794 904
795static void trace_save_cmdline(struct task_struct *tsk) 905static void trace_save_cmdline(struct task_struct *tsk)
796{ 906{
797 unsigned map; 907 unsigned pid, idx;
798 unsigned idx;
799 908
800 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 909 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
801 return; 910 return;
@@ -806,17 +915,24 @@ static void trace_save_cmdline(struct task_struct *tsk)
806 * nor do we want to disable interrupts, 915 * nor do we want to disable interrupts,
807 * so if we miss here, then better luck next time. 916 * so if we miss here, then better luck next time.
808 */ 917 */
809 if (!spin_trylock(&trace_cmdline_lock)) 918 if (!__raw_spin_trylock(&trace_cmdline_lock))
810 return; 919 return;
811 920
812 idx = map_pid_to_cmdline[tsk->pid]; 921 idx = map_pid_to_cmdline[tsk->pid];
813 if (idx >= SAVED_CMDLINES) { 922 if (idx == NO_CMDLINE_MAP) {
814 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 923 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
815 924
816 map = map_cmdline_to_pid[idx]; 925 /*
817 if (map <= PID_MAX_DEFAULT) 926 * Check whether the cmdline buffer at idx has a pid
818 map_pid_to_cmdline[map] = (unsigned)-1; 927 * mapped. We are going to overwrite that entry so we
928 * need to clear the map_pid_to_cmdline. Otherwise we
929 * would read the new comm for the old pid.
930 */
931 pid = map_cmdline_to_pid[idx];
932 if (pid != NO_CMDLINE_MAP)
933 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
819 934
935 map_cmdline_to_pid[idx] = tsk->pid;
820 map_pid_to_cmdline[tsk->pid] = idx; 936 map_pid_to_cmdline[tsk->pid] = idx;
821 937
822 cmdline_idx = idx; 938 cmdline_idx = idx;
@@ -824,33 +940,39 @@ static void trace_save_cmdline(struct task_struct *tsk)
824 940
825 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
826 942
827 spin_unlock(&trace_cmdline_lock); 943 __raw_spin_unlock(&trace_cmdline_lock);
828} 944}
829 945
830char *trace_find_cmdline(int pid) 946void trace_find_cmdline(int pid, char comm[])
831{ 947{
832 char *cmdline = "<...>";
833 unsigned map; 948 unsigned map;
834 949
835 if (!pid) 950 if (!pid) {
836 return "<idle>"; 951 strcpy(comm, "<idle>");
952 return;
953 }
837 954
838 if (pid > PID_MAX_DEFAULT) 955 if (pid > PID_MAX_DEFAULT) {
839 goto out; 956 strcpy(comm, "<...>");
957 return;
958 }
840 959
960 preempt_disable();
961 __raw_spin_lock(&trace_cmdline_lock);
841 map = map_pid_to_cmdline[pid]; 962 map = map_pid_to_cmdline[pid];
842 if (map >= SAVED_CMDLINES) 963 if (map != NO_CMDLINE_MAP)
843 goto out; 964 strcpy(comm, saved_cmdlines[map]);
844 965 else
845 cmdline = saved_cmdlines[map]; 966 strcpy(comm, "<...>");
846 967
847 out: 968 __raw_spin_unlock(&trace_cmdline_lock);
848 return cmdline; 969 preempt_enable();
849} 970}
850 971
851void tracing_record_cmdline(struct task_struct *tsk) 972void tracing_record_cmdline(struct task_struct *tsk)
852{ 973{
853 if (atomic_read(&trace_record_cmdline_disabled)) 974 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
975 !tracing_is_on())
854 return; 976 return;
855 977
856 trace_save_cmdline(tsk); 978 trace_save_cmdline(tsk);
@@ -864,7 +986,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
864 986
865 entry->preempt_count = pc & 0xff; 987 entry->preempt_count = pc & 0xff;
866 entry->pid = (tsk) ? tsk->pid : 0; 988 entry->pid = (tsk) ? tsk->pid : 0;
867 entry->tgid = (tsk) ? tsk->tgid : 0; 989 entry->lock_depth = (tsk) ? tsk->lock_depth : 0;
868 entry->flags = 990 entry->flags =
869#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 991#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
870 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 992 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -875,81 +997,108 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
875 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 997 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
876 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 998 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
877} 999}
1000EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
878 1001
879void 1002struct ring_buffer_event *
880trace_function(struct trace_array *tr, struct trace_array_cpu *data, 1003trace_buffer_lock_reserve(struct ring_buffer *buffer,
881 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1004 int type,
882 int pc) 1005 unsigned long len,
1006 unsigned long flags, int pc)
883{ 1007{
884 struct ring_buffer_event *event; 1008 struct ring_buffer_event *event;
885 struct ftrace_entry *entry;
886 unsigned long irq_flags;
887 1009
888 /* If we are reading the ring buffer, don't trace */ 1010 event = ring_buffer_lock_reserve(buffer, len);
889 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1011 if (event != NULL) {
890 return; 1012 struct trace_entry *ent = ring_buffer_event_data(event);
891 1013
892 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 1014 tracing_generic_entry_update(ent, flags, pc);
893 &irq_flags); 1015 ent->type = type;
894 if (!event) 1016 }
895 return; 1017
896 entry = ring_buffer_event_data(event); 1018 return event;
897 tracing_generic_entry_update(&entry->ent, flags, pc);
898 entry->ent.type = TRACE_FN;
899 entry->ip = ip;
900 entry->parent_ip = parent_ip;
901 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
902} 1019}
903 1020
904#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1021static inline void
905static void __trace_graph_entry(struct trace_array *tr, 1022__trace_buffer_unlock_commit(struct ring_buffer *buffer,
906 struct trace_array_cpu *data, 1023 struct ring_buffer_event *event,
907 struct ftrace_graph_ent *trace, 1024 unsigned long flags, int pc,
908 unsigned long flags, 1025 int wake)
909 int pc)
910{ 1026{
911 struct ring_buffer_event *event; 1027 ring_buffer_unlock_commit(buffer, event);
912 struct ftrace_graph_ent_entry *entry;
913 unsigned long irq_flags;
914 1028
915 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1029 ftrace_trace_stack(buffer, flags, 6, pc);
916 return; 1030 ftrace_trace_userstack(buffer, flags, pc);
917 1031
918 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), 1032 if (wake)
919 &irq_flags); 1033 trace_wake_up();
920 if (!event) 1034}
921 return; 1035
922 entry = ring_buffer_event_data(event); 1036void trace_buffer_unlock_commit(struct ring_buffer *buffer,
923 tracing_generic_entry_update(&entry->ent, flags, pc); 1037 struct ring_buffer_event *event,
924 entry->ent.type = TRACE_GRAPH_ENT; 1038 unsigned long flags, int pc)
925 entry->graph_ent = *trace; 1039{
926 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 1040 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1041}
1042
1043struct ring_buffer_event *
1044trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1045 int type, unsigned long len,
1046 unsigned long flags, int pc)
1047{
1048 *current_rb = global_trace.buffer;
1049 return trace_buffer_lock_reserve(*current_rb,
1050 type, len, flags, pc);
1051}
1052EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1053
1054void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1055 struct ring_buffer_event *event,
1056 unsigned long flags, int pc)
1057{
1058 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
927} 1059}
1060EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
928 1061
929static void __trace_graph_return(struct trace_array *tr, 1062void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
930 struct trace_array_cpu *data, 1063 struct ring_buffer_event *event,
931 struct ftrace_graph_ret *trace, 1064 unsigned long flags, int pc)
932 unsigned long flags,
933 int pc)
934{ 1065{
1066 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1067}
1068EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1069
1070void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1071 struct ring_buffer_event *event)
1072{
1073 ring_buffer_discard_commit(buffer, event);
1074}
1075EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1076
1077void
1078trace_function(struct trace_array *tr,
1079 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1080 int pc)
1081{
1082 struct ftrace_event_call *call = &event_function;
1083 struct ring_buffer *buffer = tr->buffer;
935 struct ring_buffer_event *event; 1084 struct ring_buffer_event *event;
936 struct ftrace_graph_ret_entry *entry; 1085 struct ftrace_entry *entry;
937 unsigned long irq_flags;
938 1086
1087 /* If we are reading the ring buffer, don't trace */
939 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1088 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
940 return; 1089 return;
941 1090
942 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), 1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
943 &irq_flags); 1092 flags, pc);
944 if (!event) 1093 if (!event)
945 return; 1094 return;
946 entry = ring_buffer_event_data(event); 1095 entry = ring_buffer_event_data(event);
947 tracing_generic_entry_update(&entry->ent, flags, pc); 1096 entry->ip = ip;
948 entry->ent.type = TRACE_GRAPH_RET; 1097 entry->parent_ip = parent_ip;
949 entry->ret = *trace; 1098
950 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); 1099 if (!filter_check_discard(call, entry, buffer, event))
1100 ring_buffer_unlock_commit(buffer, event);
951} 1101}
952#endif
953 1102
954void 1103void
955ftrace(struct trace_array *tr, struct trace_array_cpu *data, 1104ftrace(struct trace_array *tr, struct trace_array_cpu *data,
@@ -957,31 +1106,24 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
957 int pc) 1106 int pc)
958{ 1107{
959 if (likely(!atomic_read(&data->disabled))) 1108 if (likely(!atomic_read(&data->disabled)))
960 trace_function(tr, data, ip, parent_ip, flags, pc); 1109 trace_function(tr, ip, parent_ip, flags, pc);
961} 1110}
962 1111
963static void ftrace_trace_stack(struct trace_array *tr,
964 struct trace_array_cpu *data,
965 unsigned long flags,
966 int skip, int pc)
967{
968#ifdef CONFIG_STACKTRACE 1112#ifdef CONFIG_STACKTRACE
1113static void __ftrace_trace_stack(struct ring_buffer *buffer,
1114 unsigned long flags,
1115 int skip, int pc)
1116{
1117 struct ftrace_event_call *call = &event_kernel_stack;
969 struct ring_buffer_event *event; 1118 struct ring_buffer_event *event;
970 struct stack_entry *entry; 1119 struct stack_entry *entry;
971 struct stack_trace trace; 1120 struct stack_trace trace;
972 unsigned long irq_flags;
973
974 if (!(trace_flags & TRACE_ITER_STACKTRACE))
975 return;
976 1121
977 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 1122 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
978 &irq_flags); 1123 sizeof(*entry), flags, pc);
979 if (!event) 1124 if (!event)
980 return; 1125 return;
981 entry = ring_buffer_event_data(event); 1126 entry = ring_buffer_event_data(event);
982 tracing_generic_entry_update(&entry->ent, flags, pc);
983 entry->ent.type = TRACE_STACK;
984
985 memset(&entry->caller, 0, sizeof(entry->caller)); 1127 memset(&entry->caller, 0, sizeof(entry->caller));
986 1128
987 trace.nr_entries = 0; 1129 trace.nr_entries = 0;
@@ -990,39 +1132,43 @@ static void ftrace_trace_stack(struct trace_array *tr,
990 trace.entries = entry->caller; 1132 trace.entries = entry->caller;
991 1133
992 save_stack_trace(&trace); 1134 save_stack_trace(&trace);
993 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1135 if (!filter_check_discard(call, entry, buffer, event))
994#endif 1136 ring_buffer_unlock_commit(buffer, event);
995} 1137}
996 1138
997void __trace_stack(struct trace_array *tr, 1139void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
998 struct trace_array_cpu *data, 1140 int skip, int pc)
999 unsigned long flags,
1000 int skip)
1001{ 1141{
1002 ftrace_trace_stack(tr, data, flags, skip, preempt_count()); 1142 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1143 return;
1144
1145 __ftrace_trace_stack(buffer, flags, skip, pc);
1003} 1146}
1004 1147
1005static void ftrace_trace_userstack(struct trace_array *tr, 1148void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1006 struct trace_array_cpu *data, 1149 int pc)
1007 unsigned long flags, int pc)
1008{ 1150{
1009#ifdef CONFIG_STACKTRACE 1151 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1152}
1153
1154void
1155ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1156{
1157 struct ftrace_event_call *call = &event_user_stack;
1010 struct ring_buffer_event *event; 1158 struct ring_buffer_event *event;
1011 struct userstack_entry *entry; 1159 struct userstack_entry *entry;
1012 struct stack_trace trace; 1160 struct stack_trace trace;
1013 unsigned long irq_flags;
1014 1161
1015 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1162 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1016 return; 1163 return;
1017 1164
1018 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 1165 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1019 &irq_flags); 1166 sizeof(*entry), flags, pc);
1020 if (!event) 1167 if (!event)
1021 return; 1168 return;
1022 entry = ring_buffer_event_data(event); 1169 entry = ring_buffer_event_data(event);
1023 tracing_generic_entry_update(&entry->ent, flags, pc);
1024 entry->ent.type = TRACE_USER_STACK;
1025 1170
1171 entry->tgid = current->tgid;
1026 memset(&entry->caller, 0, sizeof(entry->caller)); 1172 memset(&entry->caller, 0, sizeof(entry->caller));
1027 1173
1028 trace.nr_entries = 0; 1174 trace.nr_entries = 0;
@@ -1031,112 +1177,48 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1031 trace.entries = entry->caller; 1177 trace.entries = entry->caller;
1032 1178
1033 save_stack_trace_user(&trace); 1179 save_stack_trace_user(&trace);
1034 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 1180 if (!filter_check_discard(call, entry, buffer, event))
1035#endif 1181 ring_buffer_unlock_commit(buffer, event);
1036} 1182}
1037 1183
1038void __trace_userstack(struct trace_array *tr, 1184#ifdef UNUSED
1039 struct trace_array_cpu *data, 1185static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1040 unsigned long flags)
1041{ 1186{
1042 ftrace_trace_userstack(tr, data, flags, preempt_count()); 1187 ftrace_trace_userstack(tr, flags, preempt_count());
1043} 1188}
1189#endif /* UNUSED */
1190
1191#endif /* CONFIG_STACKTRACE */
1044 1192
1045static void 1193static void
1046ftrace_trace_special(void *__tr, void *__data, 1194ftrace_trace_special(void *__tr,
1047 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1195 unsigned long arg1, unsigned long arg2, unsigned long arg3,
1048 int pc) 1196 int pc)
1049{ 1197{
1198 struct ftrace_event_call *call = &event_special;
1050 struct ring_buffer_event *event; 1199 struct ring_buffer_event *event;
1051 struct trace_array_cpu *data = __data;
1052 struct trace_array *tr = __tr; 1200 struct trace_array *tr = __tr;
1201 struct ring_buffer *buffer = tr->buffer;
1053 struct special_entry *entry; 1202 struct special_entry *entry;
1054 unsigned long irq_flags;
1055 1203
1056 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 1204 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1057 &irq_flags); 1205 sizeof(*entry), 0, pc);
1058 if (!event) 1206 if (!event)
1059 return; 1207 return;
1060 entry = ring_buffer_event_data(event); 1208 entry = ring_buffer_event_data(event);
1061 tracing_generic_entry_update(&entry->ent, 0, pc);
1062 entry->ent.type = TRACE_SPECIAL;
1063 entry->arg1 = arg1; 1209 entry->arg1 = arg1;
1064 entry->arg2 = arg2; 1210 entry->arg2 = arg2;
1065 entry->arg3 = arg3; 1211 entry->arg3 = arg3;
1066 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1067 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
1068 ftrace_trace_userstack(tr, data, irq_flags, pc);
1069 1212
1070 trace_wake_up(); 1213 if (!filter_check_discard(call, entry, buffer, event))
1214 trace_buffer_unlock_commit(buffer, event, 0, pc);
1071} 1215}
1072 1216
1073void 1217void
1074__trace_special(void *__tr, void *__data, 1218__trace_special(void *__tr, void *__data,
1075 unsigned long arg1, unsigned long arg2, unsigned long arg3) 1219 unsigned long arg1, unsigned long arg2, unsigned long arg3)
1076{ 1220{
1077 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); 1221 ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
1078}
1079
1080void
1081tracing_sched_switch_trace(struct trace_array *tr,
1082 struct trace_array_cpu *data,
1083 struct task_struct *prev,
1084 struct task_struct *next,
1085 unsigned long flags, int pc)
1086{
1087 struct ring_buffer_event *event;
1088 struct ctx_switch_entry *entry;
1089 unsigned long irq_flags;
1090
1091 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1092 &irq_flags);
1093 if (!event)
1094 return;
1095 entry = ring_buffer_event_data(event);
1096 tracing_generic_entry_update(&entry->ent, flags, pc);
1097 entry->ent.type = TRACE_CTX;
1098 entry->prev_pid = prev->pid;
1099 entry->prev_prio = prev->prio;
1100 entry->prev_state = prev->state;
1101 entry->next_pid = next->pid;
1102 entry->next_prio = next->prio;
1103 entry->next_state = next->state;
1104 entry->next_cpu = task_cpu(next);
1105 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1106 ftrace_trace_stack(tr, data, flags, 5, pc);
1107 ftrace_trace_userstack(tr, data, flags, pc);
1108}
1109
1110void
1111tracing_sched_wakeup_trace(struct trace_array *tr,
1112 struct trace_array_cpu *data,
1113 struct task_struct *wakee,
1114 struct task_struct *curr,
1115 unsigned long flags, int pc)
1116{
1117 struct ring_buffer_event *event;
1118 struct ctx_switch_entry *entry;
1119 unsigned long irq_flags;
1120
1121 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1122 &irq_flags);
1123 if (!event)
1124 return;
1125 entry = ring_buffer_event_data(event);
1126 tracing_generic_entry_update(&entry->ent, flags, pc);
1127 entry->ent.type = TRACE_WAKE;
1128 entry->prev_pid = curr->pid;
1129 entry->prev_prio = curr->prio;
1130 entry->prev_state = curr->state;
1131 entry->next_pid = wakee->pid;
1132 entry->next_prio = wakee->prio;
1133 entry->next_state = wakee->state;
1134 entry->next_cpu = task_cpu(wakee);
1135 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1136 ftrace_trace_stack(tr, data, flags, 6, pc);
1137 ftrace_trace_userstack(tr, data, flags, pc);
1138
1139 trace_wake_up();
1140} 1222}
1141 1223
1142void 1224void
@@ -1157,152 +1239,164 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1157 data = tr->data[cpu]; 1239 data = tr->data[cpu];
1158 1240
1159 if (likely(atomic_inc_return(&data->disabled) == 1)) 1241 if (likely(atomic_inc_return(&data->disabled) == 1))
1160 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); 1242 ftrace_trace_special(tr, arg1, arg2, arg3, pc);
1161 1243
1162 atomic_dec(&data->disabled); 1244 atomic_dec(&data->disabled);
1163 local_irq_restore(flags); 1245 local_irq_restore(flags);
1164} 1246}
1165 1247
1166#ifdef CONFIG_FUNCTION_TRACER 1248/**
1167static void 1249 * trace_vbprintk - write binary msg to tracing buffer
1168function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) 1250 *
1251 */
1252int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1169{ 1253{
1254 static raw_spinlock_t trace_buf_lock =
1255 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
1256 static u32 trace_buf[TRACE_BUF_SIZE];
1257
1258 struct ftrace_event_call *call = &event_bprint;
1259 struct ring_buffer_event *event;
1260 struct ring_buffer *buffer;
1170 struct trace_array *tr = &global_trace; 1261 struct trace_array *tr = &global_trace;
1171 struct trace_array_cpu *data; 1262 struct trace_array_cpu *data;
1263 struct bprint_entry *entry;
1172 unsigned long flags; 1264 unsigned long flags;
1173 long disabled; 1265 int disable;
1174 int cpu, resched; 1266 int resched;
1175 int pc; 1267 int cpu, len = 0, size, pc;
1176 1268
1177 if (unlikely(!ftrace_function_enabled)) 1269 if (unlikely(tracing_selftest_running || tracing_disabled))
1178 return; 1270 return 0;
1271
1272 /* Don't pollute graph traces with trace_vprintk internals */
1273 pause_graph_tracing();
1179 1274
1180 pc = preempt_count(); 1275 pc = preempt_count();
1181 resched = ftrace_preempt_disable(); 1276 resched = ftrace_preempt_disable();
1182 local_save_flags(flags);
1183 cpu = raw_smp_processor_id(); 1277 cpu = raw_smp_processor_id();
1184 data = tr->data[cpu]; 1278 data = tr->data[cpu];
1185 disabled = atomic_inc_return(&data->disabled);
1186 1279
1187 if (likely(disabled == 1)) 1280 disable = atomic_inc_return(&data->disabled);
1188 trace_function(tr, data, ip, parent_ip, flags, pc); 1281 if (unlikely(disable != 1))
1189 1282 goto out;
1190 atomic_dec(&data->disabled);
1191 ftrace_preempt_enable(resched);
1192}
1193 1283
1194static void 1284 /* Lockdep uses trace_printk for lock tracing */
1195function_trace_call(unsigned long ip, unsigned long parent_ip) 1285 local_irq_save(flags);
1196{ 1286 __raw_spin_lock(&trace_buf_lock);
1197 struct trace_array *tr = &global_trace; 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1198 struct trace_array_cpu *data;
1199 unsigned long flags;
1200 long disabled;
1201 int cpu;
1202 int pc;
1203 1288
1204 if (unlikely(!ftrace_function_enabled)) 1289 if (len > TRACE_BUF_SIZE || len < 0)
1205 return; 1290 goto out_unlock;
1206 1291
1207 /* 1292 size = sizeof(*entry) + sizeof(u32) * len;
1208 * Need to use raw, since this must be called before the 1293 buffer = tr->buffer;
1209 * recursive protection is performed. 1294 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1210 */ 1295 flags, pc);
1211 local_irq_save(flags); 1296 if (!event)
1212 cpu = raw_smp_processor_id(); 1297 goto out_unlock;
1213 data = tr->data[cpu]; 1298 entry = ring_buffer_event_data(event);
1214 disabled = atomic_inc_return(&data->disabled); 1299 entry->ip = ip;
1300 entry->fmt = fmt;
1215 1301
1216 if (likely(disabled == 1)) { 1302 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1217 pc = preempt_count(); 1303 if (!filter_check_discard(call, entry, buffer, event))
1218 trace_function(tr, data, ip, parent_ip, flags, pc); 1304 ring_buffer_unlock_commit(buffer, event);
1219 }
1220 1305
1221 atomic_dec(&data->disabled); 1306out_unlock:
1307 __raw_spin_unlock(&trace_buf_lock);
1222 local_irq_restore(flags); 1308 local_irq_restore(flags);
1309
1310out:
1311 atomic_dec_return(&data->disabled);
1312 ftrace_preempt_enable(resched);
1313 unpause_graph_tracing();
1314
1315 return len;
1223} 1316}
1317EXPORT_SYMBOL_GPL(trace_vbprintk);
1224 1318
1225#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1319int trace_array_printk(struct trace_array *tr,
1226int trace_graph_entry(struct ftrace_graph_ent *trace) 1320 unsigned long ip, const char *fmt, ...)
1227{ 1321{
1228 struct trace_array *tr = &global_trace; 1322 int ret;
1229 struct trace_array_cpu *data; 1323 va_list ap;
1230 unsigned long flags;
1231 long disabled;
1232 int cpu;
1233 int pc;
1234
1235 if (!ftrace_trace_task(current))
1236 return 0;
1237 1324
1238 if (!ftrace_graph_addr(trace->func)) 1325 if (!(trace_flags & TRACE_ITER_PRINTK))
1239 return 0; 1326 return 0;
1240 1327
1241 local_irq_save(flags); 1328 va_start(ap, fmt);
1242 cpu = raw_smp_processor_id(); 1329 ret = trace_array_vprintk(tr, ip, fmt, ap);
1243 data = tr->data[cpu]; 1330 va_end(ap);
1244 disabled = atomic_inc_return(&data->disabled); 1331 return ret;
1245 if (likely(disabled == 1)) {
1246 pc = preempt_count();
1247 __trace_graph_entry(tr, data, trace, flags, pc);
1248 }
1249 /* Only do the atomic if it is not already set */
1250 if (!test_tsk_trace_graph(current))
1251 set_tsk_trace_graph(current);
1252 atomic_dec(&data->disabled);
1253 local_irq_restore(flags);
1254
1255 return 1;
1256} 1332}
1257 1333
1258void trace_graph_return(struct ftrace_graph_ret *trace) 1334int trace_array_vprintk(struct trace_array *tr,
1335 unsigned long ip, const char *fmt, va_list args)
1259{ 1336{
1260 struct trace_array *tr = &global_trace; 1337 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1338 static char trace_buf[TRACE_BUF_SIZE];
1339
1340 struct ftrace_event_call *call = &event_print;
1341 struct ring_buffer_event *event;
1342 struct ring_buffer *buffer;
1261 struct trace_array_cpu *data; 1343 struct trace_array_cpu *data;
1262 unsigned long flags; 1344 int cpu, len = 0, size, pc;
1263 long disabled; 1345 struct print_entry *entry;
1264 int cpu; 1346 unsigned long irq_flags;
1265 int pc; 1347 int disable;
1266 1348
1267 local_irq_save(flags); 1349 if (tracing_disabled || tracing_selftest_running)
1350 return 0;
1351
1352 pc = preempt_count();
1353 preempt_disable_notrace();
1268 cpu = raw_smp_processor_id(); 1354 cpu = raw_smp_processor_id();
1269 data = tr->data[cpu]; 1355 data = tr->data[cpu];
1270 disabled = atomic_inc_return(&data->disabled);
1271 if (likely(disabled == 1)) {
1272 pc = preempt_count();
1273 __trace_graph_return(tr, data, trace, flags, pc);
1274 }
1275 if (!trace->depth)
1276 clear_tsk_trace_graph(current);
1277 atomic_dec(&data->disabled);
1278 local_irq_restore(flags);
1279}
1280#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1281 1356
1282static struct ftrace_ops trace_ops __read_mostly = 1357 disable = atomic_inc_return(&data->disabled);
1283{ 1358 if (unlikely(disable != 1))
1284 .func = function_trace_call, 1359 goto out;
1285};
1286 1360
1287void tracing_start_function_trace(void) 1361 pause_graph_tracing();
1288{ 1362 raw_local_irq_save(irq_flags);
1289 ftrace_function_enabled = 0; 1363 __raw_spin_lock(&trace_buf_lock);
1364 if (args == NULL) {
1365 strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
1366 len = strlen(trace_buf);
1367 } else
1368 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1290 1369
1291 if (trace_flags & TRACE_ITER_PREEMPTONLY) 1370 size = sizeof(*entry) + len + 1;
1292 trace_ops.func = function_trace_call_preempt_only; 1371 buffer = tr->buffer;
1293 else 1372 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1294 trace_ops.func = function_trace_call; 1373 irq_flags, pc);
1374 if (!event)
1375 goto out_unlock;
1376 entry = ring_buffer_event_data(event);
1377 entry->ip = ip;
1378
1379 memcpy(&entry->buf, trace_buf, len);
1380 entry->buf[len] = '\0';
1381 if (!filter_check_discard(call, entry, buffer, event))
1382 ring_buffer_unlock_commit(buffer, event);
1383
1384 out_unlock:
1385 __raw_spin_unlock(&trace_buf_lock);
1386 raw_local_irq_restore(irq_flags);
1387 unpause_graph_tracing();
1388 out:
1389 atomic_dec_return(&data->disabled);
1390 preempt_enable_notrace();
1295 1391
1296 register_ftrace_function(&trace_ops); 1392 return len;
1297 ftrace_function_enabled = 1;
1298} 1393}
1299 1394
1300void tracing_stop_function_trace(void) 1395int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1301{ 1396{
1302 ftrace_function_enabled = 0; 1397 return trace_array_vprintk(&global_trace, ip, fmt, args);
1303 unregister_ftrace_function(&trace_ops);
1304} 1398}
1305#endif 1399EXPORT_SYMBOL_GPL(trace_vprintk);
1306 1400
1307enum trace_file_type { 1401enum trace_file_type {
1308 TRACE_FILE_LAT_FMT = 1, 1402 TRACE_FILE_LAT_FMT = 1,
@@ -1345,10 +1439,25 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1345{ 1439{
1346 struct ring_buffer *buffer = iter->tr->buffer; 1440 struct ring_buffer *buffer = iter->tr->buffer;
1347 struct trace_entry *ent, *next = NULL; 1441 struct trace_entry *ent, *next = NULL;
1442 int cpu_file = iter->cpu_file;
1348 u64 next_ts = 0, ts; 1443 u64 next_ts = 0, ts;
1349 int next_cpu = -1; 1444 int next_cpu = -1;
1350 int cpu; 1445 int cpu;
1351 1446
1447 /*
1448 * If we are in a per_cpu trace file, don't bother by iterating over
1449 * all cpu and peek directly.
1450 */
1451 if (cpu_file > TRACE_PIPE_ALL_CPU) {
1452 if (ring_buffer_empty_cpu(buffer, cpu_file))
1453 return NULL;
1454 ent = peek_next_entry(iter, cpu_file, ent_ts);
1455 if (ent_cpu)
1456 *ent_cpu = cpu_file;
1457
1458 return ent;
1459 }
1460
1352 for_each_tracing_cpu(cpu) { 1461 for_each_tracing_cpu(cpu) {
1353 1462
1354 if (ring_buffer_empty_cpu(buffer, cpu)) 1463 if (ring_buffer_empty_cpu(buffer, cpu))
@@ -1376,8 +1485,8 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1376} 1485}
1377 1486
1378/* Find the next real entry, without updating the iterator itself */ 1487/* Find the next real entry, without updating the iterator itself */
1379static struct trace_entry * 1488struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1380find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) 1489 int *ent_cpu, u64 *ent_ts)
1381{ 1490{
1382 return __find_next_entry(iter, ent_cpu, ent_ts); 1491 return __find_next_entry(iter, ent_cpu, ent_ts);
1383} 1492}
@@ -1426,19 +1535,63 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1426 return ent; 1535 return ent;
1427} 1536}
1428 1537
1538static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1539{
1540 struct trace_array *tr = iter->tr;
1541 struct ring_buffer_event *event;
1542 struct ring_buffer_iter *buf_iter;
1543 unsigned long entries = 0;
1544 u64 ts;
1545
1546 tr->data[cpu]->skipped_entries = 0;
1547
1548 if (!iter->buffer_iter[cpu])
1549 return;
1550
1551 buf_iter = iter->buffer_iter[cpu];
1552 ring_buffer_iter_reset(buf_iter);
1553
1554 /*
1555 * We could have the case with the max latency tracers
1556 * that a reset never took place on a cpu. This is evident
1557 * by the timestamp being before the start of the buffer.
1558 */
1559 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1560 if (ts >= iter->tr->time_start)
1561 break;
1562 entries++;
1563 ring_buffer_read(buf_iter, NULL);
1564 }
1565
1566 tr->data[cpu]->skipped_entries = entries;
1567}
1568
1569/*
1570 * No necessary locking here. The worst thing which can
1571 * happen is loosing events consumed at the same time
1572 * by a trace_pipe reader.
1573 * Other than that, we don't risk to crash the ring buffer
1574 * because it serializes the readers.
1575 *
1576 * The current tracer is copied to avoid a global locking
1577 * all around.
1578 */
1429static void *s_start(struct seq_file *m, loff_t *pos) 1579static void *s_start(struct seq_file *m, loff_t *pos)
1430{ 1580{
1431 struct trace_iterator *iter = m->private; 1581 struct trace_iterator *iter = m->private;
1582 static struct tracer *old_tracer;
1583 int cpu_file = iter->cpu_file;
1432 void *p = NULL; 1584 void *p = NULL;
1433 loff_t l = 0; 1585 loff_t l = 0;
1434 int cpu; 1586 int cpu;
1435 1587
1588 /* copy the tracer to avoid using a global lock all around */
1436 mutex_lock(&trace_types_lock); 1589 mutex_lock(&trace_types_lock);
1437 1590 if (unlikely(old_tracer != current_trace && current_trace)) {
1438 if (!current_trace || current_trace != iter->trace) { 1591 old_tracer = current_trace;
1439 mutex_unlock(&trace_types_lock); 1592 *iter->trace = *current_trace;
1440 return NULL;
1441 } 1593 }
1594 mutex_unlock(&trace_types_lock);
1442 1595
1443 atomic_inc(&trace_record_cmdline_disabled); 1596 atomic_inc(&trace_record_cmdline_disabled);
1444 1597
@@ -1449,9 +1602,11 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1449 1602
1450 ftrace_disable_cpu(); 1603 ftrace_disable_cpu();
1451 1604
1452 for_each_tracing_cpu(cpu) { 1605 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1453 ring_buffer_iter_reset(iter->buffer_iter[cpu]); 1606 for_each_tracing_cpu(cpu)
1454 } 1607 tracing_iter_reset(iter, cpu);
1608 } else
1609 tracing_iter_reset(iter, cpu_file);
1455 1610
1456 ftrace_enable_cpu(); 1611 ftrace_enable_cpu();
1457 1612
@@ -1463,161 +1618,14 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1463 p = s_next(m, p, &l); 1618 p = s_next(m, p, &l);
1464 } 1619 }
1465 1620
1621 trace_event_read_lock();
1466 return p; 1622 return p;
1467} 1623}
1468 1624
1469static void s_stop(struct seq_file *m, void *p) 1625static void s_stop(struct seq_file *m, void *p)
1470{ 1626{
1471 atomic_dec(&trace_record_cmdline_disabled); 1627 atomic_dec(&trace_record_cmdline_disabled);
1472 mutex_unlock(&trace_types_lock); 1628 trace_event_read_unlock();
1473}
1474
1475#ifdef CONFIG_KRETPROBES
1476static inline const char *kretprobed(const char *name)
1477{
1478 static const char tramp_name[] = "kretprobe_trampoline";
1479 int size = sizeof(tramp_name);
1480
1481 if (strncmp(tramp_name, name, size) == 0)
1482 return "[unknown/kretprobe'd]";
1483 return name;
1484}
1485#else
1486static inline const char *kretprobed(const char *name)
1487{
1488 return name;
1489}
1490#endif /* CONFIG_KRETPROBES */
1491
1492static int
1493seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1494{
1495#ifdef CONFIG_KALLSYMS
1496 char str[KSYM_SYMBOL_LEN];
1497 const char *name;
1498
1499 kallsyms_lookup(address, NULL, NULL, NULL, str);
1500
1501 name = kretprobed(str);
1502
1503 return trace_seq_printf(s, fmt, name);
1504#endif
1505 return 1;
1506}
1507
1508static int
1509seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1510 unsigned long address)
1511{
1512#ifdef CONFIG_KALLSYMS
1513 char str[KSYM_SYMBOL_LEN];
1514 const char *name;
1515
1516 sprint_symbol(str, address);
1517 name = kretprobed(str);
1518
1519 return trace_seq_printf(s, fmt, name);
1520#endif
1521 return 1;
1522}
1523
1524#ifndef CONFIG_64BIT
1525# define IP_FMT "%08lx"
1526#else
1527# define IP_FMT "%016lx"
1528#endif
1529
1530int
1531seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1532{
1533 int ret;
1534
1535 if (!ip)
1536 return trace_seq_printf(s, "0");
1537
1538 if (sym_flags & TRACE_ITER_SYM_OFFSET)
1539 ret = seq_print_sym_offset(s, "%s", ip);
1540 else
1541 ret = seq_print_sym_short(s, "%s", ip);
1542
1543 if (!ret)
1544 return 0;
1545
1546 if (sym_flags & TRACE_ITER_SYM_ADDR)
1547 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1548 return ret;
1549}
1550
1551static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
1552 unsigned long ip, unsigned long sym_flags)
1553{
1554 struct file *file = NULL;
1555 unsigned long vmstart = 0;
1556 int ret = 1;
1557
1558 if (mm) {
1559 const struct vm_area_struct *vma;
1560
1561 down_read(&mm->mmap_sem);
1562 vma = find_vma(mm, ip);
1563 if (vma) {
1564 file = vma->vm_file;
1565 vmstart = vma->vm_start;
1566 }
1567 if (file) {
1568 ret = trace_seq_path(s, &file->f_path);
1569 if (ret)
1570 ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
1571 }
1572 up_read(&mm->mmap_sem);
1573 }
1574 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
1575 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1576 return ret;
1577}
1578
1579static int
1580seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
1581 unsigned long sym_flags)
1582{
1583 struct mm_struct *mm = NULL;
1584 int ret = 1;
1585 unsigned int i;
1586
1587 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
1588 struct task_struct *task;
1589 /*
1590 * we do the lookup on the thread group leader,
1591 * since individual threads might have already quit!
1592 */
1593 rcu_read_lock();
1594 task = find_task_by_vpid(entry->ent.tgid);
1595 if (task)
1596 mm = get_task_mm(task);
1597 rcu_read_unlock();
1598 }
1599
1600 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1601 unsigned long ip = entry->caller[i];
1602
1603 if (ip == ULONG_MAX || !ret)
1604 break;
1605 if (i && ret)
1606 ret = trace_seq_puts(s, " <- ");
1607 if (!ip) {
1608 if (ret)
1609 ret = trace_seq_puts(s, "??");
1610 continue;
1611 }
1612 if (!ret)
1613 break;
1614 if (ret)
1615 ret = seq_print_user_ip(s, mm, ip, sym_flags);
1616 }
1617
1618 if (mm)
1619 mmput(mm);
1620 return ret;
1621} 1629}
1622 1630
1623static void print_lat_help_header(struct seq_file *m) 1631static void print_lat_help_header(struct seq_file *m)
@@ -1627,10 +1635,10 @@ static void print_lat_help_header(struct seq_file *m)
1627 seq_puts(m, "# | / _----=> need-resched \n"); 1635 seq_puts(m, "# | / _----=> need-resched \n");
1628 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1636 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1629 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1637 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1630 seq_puts(m, "# |||| / \n"); 1638 seq_puts(m, "# |||| /_--=> lock-depth \n");
1631 seq_puts(m, "# ||||| delay \n"); 1639 seq_puts(m, "# |||||/ delay \n");
1632 seq_puts(m, "# cmd pid ||||| time | caller \n"); 1640 seq_puts(m, "# cmd pid |||||| time | caller \n");
1633 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1641 seq_puts(m, "# \\ / |||||| \\ | / \n");
1634} 1642}
1635 1643
1636static void print_func_help_header(struct seq_file *m) 1644static void print_func_help_header(struct seq_file *m)
@@ -1647,22 +1655,38 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1647 struct trace_array *tr = iter->tr; 1655 struct trace_array *tr = iter->tr;
1648 struct trace_array_cpu *data = tr->data[tr->cpu]; 1656 struct trace_array_cpu *data = tr->data[tr->cpu];
1649 struct tracer *type = current_trace; 1657 struct tracer *type = current_trace;
1650 unsigned long total; 1658 unsigned long entries = 0;
1651 unsigned long entries; 1659 unsigned long total = 0;
1660 unsigned long count;
1652 const char *name = "preemption"; 1661 const char *name = "preemption";
1662 int cpu;
1653 1663
1654 if (type) 1664 if (type)
1655 name = type->name; 1665 name = type->name;
1656 1666
1657 entries = ring_buffer_entries(iter->tr->buffer);
1658 total = entries +
1659 ring_buffer_overruns(iter->tr->buffer);
1660 1667
1661 seq_printf(m, "%s latency trace v1.1.5 on %s\n", 1668 for_each_tracing_cpu(cpu) {
1669 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1670 /*
1671 * If this buffer has skipped entries, then we hold all
1672 * entries for the trace and we need to ignore the
1673 * ones before the time stamp.
1674 */
1675 if (tr->data[cpu]->skipped_entries) {
1676 count -= tr->data[cpu]->skipped_entries;
1677 /* total is the same as the entries */
1678 total += count;
1679 } else
1680 total += count +
1681 ring_buffer_overrun_cpu(tr->buffer, cpu);
1682 entries += count;
1683 }
1684
1685 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1662 name, UTS_RELEASE); 1686 name, UTS_RELEASE);
1663 seq_puts(m, "-----------------------------------" 1687 seq_puts(m, "# -----------------------------------"
1664 "---------------------------------\n"); 1688 "---------------------------------\n");
1665 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |" 1689 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
1666 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 1690 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1667 nsecs_to_usecs(data->saved_latency), 1691 nsecs_to_usecs(data->saved_latency),
1668 entries, 1692 entries,
@@ -1684,121 +1708,24 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1684#else 1708#else
1685 seq_puts(m, ")\n"); 1709 seq_puts(m, ")\n");
1686#endif 1710#endif
1687 seq_puts(m, " -----------------\n"); 1711 seq_puts(m, "# -----------------\n");
1688 seq_printf(m, " | task: %.16s-%d " 1712 seq_printf(m, "# | task: %.16s-%d "
1689 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 1713 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1690 data->comm, data->pid, data->uid, data->nice, 1714 data->comm, data->pid, data->uid, data->nice,
1691 data->policy, data->rt_priority); 1715 data->policy, data->rt_priority);
1692 seq_puts(m, " -----------------\n"); 1716 seq_puts(m, "# -----------------\n");
1693 1717
1694 if (data->critical_start) { 1718 if (data->critical_start) {
1695 seq_puts(m, " => started at: "); 1719 seq_puts(m, "# => started at: ");
1696 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 1720 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1697 trace_print_seq(m, &iter->seq); 1721 trace_print_seq(m, &iter->seq);
1698 seq_puts(m, "\n => ended at: "); 1722 seq_puts(m, "\n# => ended at: ");
1699 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 1723 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1700 trace_print_seq(m, &iter->seq); 1724 trace_print_seq(m, &iter->seq);
1701 seq_puts(m, "\n"); 1725 seq_puts(m, "\n#\n");
1702 } 1726 }
1703 1727
1704 seq_puts(m, "\n"); 1728 seq_puts(m, "#\n");
1705}
1706
1707static void
1708lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1709{
1710 int hardirq, softirq;
1711 char *comm;
1712
1713 comm = trace_find_cmdline(entry->pid);
1714
1715 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1716 trace_seq_printf(s, "%3d", cpu);
1717 trace_seq_printf(s, "%c%c",
1718 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1719 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
1720 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1721
1722 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1723 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1724 if (hardirq && softirq) {
1725 trace_seq_putc(s, 'H');
1726 } else {
1727 if (hardirq) {
1728 trace_seq_putc(s, 'h');
1729 } else {
1730 if (softirq)
1731 trace_seq_putc(s, 's');
1732 else
1733 trace_seq_putc(s, '.');
1734 }
1735 }
1736
1737 if (entry->preempt_count)
1738 trace_seq_printf(s, "%x", entry->preempt_count);
1739 else
1740 trace_seq_puts(s, ".");
1741}
1742
1743unsigned long preempt_mark_thresh = 100;
1744
1745static void
1746lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1747 unsigned long rel_usecs)
1748{
1749 trace_seq_printf(s, " %4lldus", abs_usecs);
1750 if (rel_usecs > preempt_mark_thresh)
1751 trace_seq_puts(s, "!: ");
1752 else if (rel_usecs > 1)
1753 trace_seq_puts(s, "+: ");
1754 else
1755 trace_seq_puts(s, " : ");
1756}
1757
1758static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1759
1760static int task_state_char(unsigned long state)
1761{
1762 int bit = state ? __ffs(state) + 1 : 0;
1763
1764 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1765}
1766
1767/*
1768 * The message is supposed to contain an ending newline.
1769 * If the printing stops prematurely, try to add a newline of our own.
1770 */
1771void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1772{
1773 struct trace_entry *ent;
1774 struct trace_field_cont *cont;
1775 bool ok = true;
1776
1777 ent = peek_next_entry(iter, iter->cpu, NULL);
1778 if (!ent || ent->type != TRACE_CONT) {
1779 trace_seq_putc(s, '\n');
1780 return;
1781 }
1782
1783 do {
1784 cont = (struct trace_field_cont *)ent;
1785 if (ok)
1786 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
1787
1788 ftrace_disable_cpu();
1789
1790 if (iter->buffer_iter[iter->cpu])
1791 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1792 else
1793 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1794
1795 ftrace_enable_cpu();
1796
1797 ent = peek_next_entry(iter, iter->cpu, NULL);
1798 } while (ent && ent->type == TRACE_CONT);
1799
1800 if (!ok)
1801 trace_seq_putc(s, '\n');
1802} 1729}
1803 1730
1804static void test_cpu_buff_start(struct trace_iterator *iter) 1731static void test_cpu_buff_start(struct trace_iterator *iter)
@@ -1814,142 +1741,15 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1814 if (cpumask_test_cpu(iter->cpu, iter->started)) 1741 if (cpumask_test_cpu(iter->cpu, iter->started))
1815 return; 1742 return;
1816 1743
1817 cpumask_set_cpu(iter->cpu, iter->started); 1744 if (iter->tr->data[iter->cpu]->skipped_entries)
1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1745 return;
1819}
1820
1821static enum print_line_t
1822print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1823{
1824 struct trace_seq *s = &iter->seq;
1825 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1826 struct trace_entry *next_entry;
1827 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1828 struct trace_entry *entry = iter->ent;
1829 unsigned long abs_usecs;
1830 unsigned long rel_usecs;
1831 u64 next_ts;
1832 char *comm;
1833 int S, T;
1834 int i;
1835
1836 if (entry->type == TRACE_CONT)
1837 return TRACE_TYPE_HANDLED;
1838
1839 test_cpu_buff_start(iter);
1840
1841 next_entry = find_next_entry(iter, NULL, &next_ts);
1842 if (!next_entry)
1843 next_ts = iter->ts;
1844 rel_usecs = ns2usecs(next_ts - iter->ts);
1845 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
1846
1847 if (verbose) {
1848 comm = trace_find_cmdline(entry->pid);
1849 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1850 " %ld.%03ldms (+%ld.%03ldms): ",
1851 comm,
1852 entry->pid, cpu, entry->flags,
1853 entry->preempt_count, trace_idx,
1854 ns2usecs(iter->ts),
1855 abs_usecs/1000,
1856 abs_usecs % 1000, rel_usecs/1000,
1857 rel_usecs % 1000);
1858 } else {
1859 lat_print_generic(s, entry, cpu);
1860 lat_print_timestamp(s, abs_usecs, rel_usecs);
1861 }
1862 switch (entry->type) {
1863 case TRACE_FN: {
1864 struct ftrace_entry *field;
1865
1866 trace_assign_type(field, entry);
1867
1868 seq_print_ip_sym(s, field->ip, sym_flags);
1869 trace_seq_puts(s, " (");
1870 seq_print_ip_sym(s, field->parent_ip, sym_flags);
1871 trace_seq_puts(s, ")\n");
1872 break;
1873 }
1874 case TRACE_CTX:
1875 case TRACE_WAKE: {
1876 struct ctx_switch_entry *field;
1877
1878 trace_assign_type(field, entry);
1879
1880 T = task_state_char(field->next_state);
1881 S = task_state_char(field->prev_state);
1882 comm = trace_find_cmdline(field->next_pid);
1883 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1884 field->prev_pid,
1885 field->prev_prio,
1886 S, entry->type == TRACE_CTX ? "==>" : " +",
1887 field->next_cpu,
1888 field->next_pid,
1889 field->next_prio,
1890 T, comm);
1891 break;
1892 }
1893 case TRACE_SPECIAL: {
1894 struct special_entry *field;
1895
1896 trace_assign_type(field, entry);
1897
1898 trace_seq_printf(s, "# %ld %ld %ld\n",
1899 field->arg1,
1900 field->arg2,
1901 field->arg3);
1902 break;
1903 }
1904 case TRACE_STACK: {
1905 struct stack_entry *field;
1906
1907 trace_assign_type(field, entry);
1908
1909 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1910 if (i)
1911 trace_seq_puts(s, " <= ");
1912 seq_print_ip_sym(s, field->caller[i], sym_flags);
1913 }
1914 trace_seq_puts(s, "\n");
1915 break;
1916 }
1917 case TRACE_PRINT: {
1918 struct print_entry *field;
1919
1920 trace_assign_type(field, entry);
1921
1922 seq_print_ip_sym(s, field->ip, sym_flags);
1923 trace_seq_printf(s, ": %s", field->buf);
1924 if (entry->flags & TRACE_FLAG_CONT)
1925 trace_seq_print_cont(s, iter);
1926 break;
1927 }
1928 case TRACE_BRANCH: {
1929 struct trace_branch *field;
1930
1931 trace_assign_type(field, entry);
1932
1933 trace_seq_printf(s, "[%s] %s:%s:%d\n",
1934 field->correct ? " ok " : " MISS ",
1935 field->func,
1936 field->file,
1937 field->line);
1938 break;
1939 }
1940 case TRACE_USER_STACK: {
1941 struct userstack_entry *field;
1942 1746
1943 trace_assign_type(field, entry); 1747 cpumask_set_cpu(iter->cpu, iter->started);
1944 1748
1945 seq_print_userip_objs(field, s, sym_flags); 1749 /* Don't print started cpu buffer for the first entry of the trace */
1946 trace_seq_putc(s, '\n'); 1750 if (iter->idx > 1)
1947 break; 1751 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
1948 } 1752 iter->cpu);
1949 default:
1950 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1951 }
1952 return TRACE_TYPE_HANDLED;
1953} 1753}
1954 1754
1955static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 1755static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
@@ -1957,333 +1757,84 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1957 struct trace_seq *s = &iter->seq; 1757 struct trace_seq *s = &iter->seq;
1958 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1758 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1959 struct trace_entry *entry; 1759 struct trace_entry *entry;
1960 unsigned long usec_rem; 1760 struct trace_event *event;
1961 unsigned long long t;
1962 unsigned long secs;
1963 char *comm;
1964 int ret;
1965 int S, T;
1966 int i;
1967 1761
1968 entry = iter->ent; 1762 entry = iter->ent;
1969 1763
1970 if (entry->type == TRACE_CONT)
1971 return TRACE_TYPE_HANDLED;
1972
1973 test_cpu_buff_start(iter); 1764 test_cpu_buff_start(iter);
1974 1765
1975 comm = trace_find_cmdline(iter->ent->pid); 1766 event = ftrace_find_event(entry->type);
1976
1977 t = ns2usecs(iter->ts);
1978 usec_rem = do_div(t, 1000000ULL);
1979 secs = (unsigned long)t;
1980 1767
1981 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); 1768 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1982 if (!ret) 1769 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1983 return TRACE_TYPE_PARTIAL_LINE; 1770 if (!trace_print_lat_context(iter))
1984 ret = trace_seq_printf(s, "[%03d] ", iter->cpu); 1771 goto partial;
1985 if (!ret) 1772 } else {
1986 return TRACE_TYPE_PARTIAL_LINE; 1773 if (!trace_print_context(iter))
1987 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); 1774 goto partial;
1988 if (!ret)
1989 return TRACE_TYPE_PARTIAL_LINE;
1990
1991 switch (entry->type) {
1992 case TRACE_FN: {
1993 struct ftrace_entry *field;
1994
1995 trace_assign_type(field, entry);
1996
1997 ret = seq_print_ip_sym(s, field->ip, sym_flags);
1998 if (!ret)
1999 return TRACE_TYPE_PARTIAL_LINE;
2000 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
2001 field->parent_ip) {
2002 ret = trace_seq_printf(s, " <-");
2003 if (!ret)
2004 return TRACE_TYPE_PARTIAL_LINE;
2005 ret = seq_print_ip_sym(s,
2006 field->parent_ip,
2007 sym_flags);
2008 if (!ret)
2009 return TRACE_TYPE_PARTIAL_LINE;
2010 }
2011 ret = trace_seq_printf(s, "\n");
2012 if (!ret)
2013 return TRACE_TYPE_PARTIAL_LINE;
2014 break;
2015 }
2016 case TRACE_CTX:
2017 case TRACE_WAKE: {
2018 struct ctx_switch_entry *field;
2019
2020 trace_assign_type(field, entry);
2021
2022 T = task_state_char(field->next_state);
2023 S = task_state_char(field->prev_state);
2024 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
2025 field->prev_pid,
2026 field->prev_prio,
2027 S,
2028 entry->type == TRACE_CTX ? "==>" : " +",
2029 field->next_cpu,
2030 field->next_pid,
2031 field->next_prio,
2032 T);
2033 if (!ret)
2034 return TRACE_TYPE_PARTIAL_LINE;
2035 break;
2036 }
2037 case TRACE_SPECIAL: {
2038 struct special_entry *field;
2039
2040 trace_assign_type(field, entry);
2041
2042 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
2043 field->arg1,
2044 field->arg2,
2045 field->arg3);
2046 if (!ret)
2047 return TRACE_TYPE_PARTIAL_LINE;
2048 break;
2049 }
2050 case TRACE_STACK: {
2051 struct stack_entry *field;
2052
2053 trace_assign_type(field, entry);
2054
2055 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
2056 if (i) {
2057 ret = trace_seq_puts(s, " <= ");
2058 if (!ret)
2059 return TRACE_TYPE_PARTIAL_LINE;
2060 }
2061 ret = seq_print_ip_sym(s, field->caller[i],
2062 sym_flags);
2063 if (!ret)
2064 return TRACE_TYPE_PARTIAL_LINE;
2065 } 1775 }
2066 ret = trace_seq_puts(s, "\n");
2067 if (!ret)
2068 return TRACE_TYPE_PARTIAL_LINE;
2069 break;
2070 } 1776 }
2071 case TRACE_PRINT: {
2072 struct print_entry *field;
2073 1777
2074 trace_assign_type(field, entry); 1778 if (event)
1779 return event->trace(iter, sym_flags);
2075 1780
2076 seq_print_ip_sym(s, field->ip, sym_flags); 1781 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2077 trace_seq_printf(s, ": %s", field->buf); 1782 goto partial;
2078 if (entry->flags & TRACE_FLAG_CONT)
2079 trace_seq_print_cont(s, iter);
2080 break;
2081 }
2082 case TRACE_GRAPH_RET: {
2083 return print_graph_function(iter);
2084 }
2085 case TRACE_GRAPH_ENT: {
2086 return print_graph_function(iter);
2087 }
2088 case TRACE_BRANCH: {
2089 struct trace_branch *field;
2090
2091 trace_assign_type(field, entry);
2092
2093 trace_seq_printf(s, "[%s] %s:%s:%d\n",
2094 field->correct ? " ok " : " MISS ",
2095 field->func,
2096 field->file,
2097 field->line);
2098 break;
2099 }
2100 case TRACE_USER_STACK: {
2101 struct userstack_entry *field;
2102 1783
2103 trace_assign_type(field, entry);
2104
2105 ret = seq_print_userip_objs(field, s, sym_flags);
2106 if (!ret)
2107 return TRACE_TYPE_PARTIAL_LINE;
2108 ret = trace_seq_putc(s, '\n');
2109 if (!ret)
2110 return TRACE_TYPE_PARTIAL_LINE;
2111 break;
2112 }
2113 }
2114 return TRACE_TYPE_HANDLED; 1784 return TRACE_TYPE_HANDLED;
1785partial:
1786 return TRACE_TYPE_PARTIAL_LINE;
2115} 1787}
2116 1788
2117static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 1789static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2118{ 1790{
2119 struct trace_seq *s = &iter->seq; 1791 struct trace_seq *s = &iter->seq;
2120 struct trace_entry *entry; 1792 struct trace_entry *entry;
2121 int ret; 1793 struct trace_event *event;
2122 int S, T;
2123 1794
2124 entry = iter->ent; 1795 entry = iter->ent;
2125 1796
2126 if (entry->type == TRACE_CONT) 1797 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2127 return TRACE_TYPE_HANDLED; 1798 if (!trace_seq_printf(s, "%d %d %llu ",
2128 1799 entry->pid, iter->cpu, iter->ts))
2129 ret = trace_seq_printf(s, "%d %d %llu ", 1800 goto partial;
2130 entry->pid, iter->cpu, iter->ts);
2131 if (!ret)
2132 return TRACE_TYPE_PARTIAL_LINE;
2133
2134 switch (entry->type) {
2135 case TRACE_FN: {
2136 struct ftrace_entry *field;
2137
2138 trace_assign_type(field, entry);
2139
2140 ret = trace_seq_printf(s, "%x %x\n",
2141 field->ip,
2142 field->parent_ip);
2143 if (!ret)
2144 return TRACE_TYPE_PARTIAL_LINE;
2145 break;
2146 } 1801 }
2147 case TRACE_CTX:
2148 case TRACE_WAKE: {
2149 struct ctx_switch_entry *field;
2150
2151 trace_assign_type(field, entry);
2152
2153 T = task_state_char(field->next_state);
2154 S = entry->type == TRACE_WAKE ? '+' :
2155 task_state_char(field->prev_state);
2156 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
2157 field->prev_pid,
2158 field->prev_prio,
2159 S,
2160 field->next_cpu,
2161 field->next_pid,
2162 field->next_prio,
2163 T);
2164 if (!ret)
2165 return TRACE_TYPE_PARTIAL_LINE;
2166 break;
2167 }
2168 case TRACE_SPECIAL:
2169 case TRACE_USER_STACK:
2170 case TRACE_STACK: {
2171 struct special_entry *field;
2172
2173 trace_assign_type(field, entry);
2174 1802
2175 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1803 event = ftrace_find_event(entry->type);
2176 field->arg1, 1804 if (event)
2177 field->arg2, 1805 return event->raw(iter, 0);
2178 field->arg3);
2179 if (!ret)
2180 return TRACE_TYPE_PARTIAL_LINE;
2181 break;
2182 }
2183 case TRACE_PRINT: {
2184 struct print_entry *field;
2185 1806
2186 trace_assign_type(field, entry); 1807 if (!trace_seq_printf(s, "%d ?\n", entry->type))
1808 goto partial;
2187 1809
2188 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
2189 if (entry->flags & TRACE_FLAG_CONT)
2190 trace_seq_print_cont(s, iter);
2191 break;
2192 }
2193 }
2194 return TRACE_TYPE_HANDLED; 1810 return TRACE_TYPE_HANDLED;
1811partial:
1812 return TRACE_TYPE_PARTIAL_LINE;
2195} 1813}
2196 1814
2197#define SEQ_PUT_FIELD_RET(s, x) \
2198do { \
2199 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
2200 return 0; \
2201} while (0)
2202
2203#define SEQ_PUT_HEX_FIELD_RET(s, x) \
2204do { \
2205 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
2206 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
2207 return 0; \
2208} while (0)
2209
2210static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 1815static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2211{ 1816{
2212 struct trace_seq *s = &iter->seq; 1817 struct trace_seq *s = &iter->seq;
2213 unsigned char newline = '\n'; 1818 unsigned char newline = '\n';
2214 struct trace_entry *entry; 1819 struct trace_entry *entry;
2215 int S, T; 1820 struct trace_event *event;
2216 1821
2217 entry = iter->ent; 1822 entry = iter->ent;
2218 1823
2219 if (entry->type == TRACE_CONT) 1824 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2220 return TRACE_TYPE_HANDLED; 1825 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2221 1826 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2222 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 1827 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2223 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2224 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2225
2226 switch (entry->type) {
2227 case TRACE_FN: {
2228 struct ftrace_entry *field;
2229
2230 trace_assign_type(field, entry);
2231
2232 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
2233 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
2234 break;
2235 } 1828 }
2236 case TRACE_CTX:
2237 case TRACE_WAKE: {
2238 struct ctx_switch_entry *field;
2239
2240 trace_assign_type(field, entry);
2241
2242 T = task_state_char(field->next_state);
2243 S = entry->type == TRACE_WAKE ? '+' :
2244 task_state_char(field->prev_state);
2245 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
2246 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
2247 SEQ_PUT_HEX_FIELD_RET(s, S);
2248 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
2249 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
2250 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
2251 SEQ_PUT_HEX_FIELD_RET(s, T);
2252 break;
2253 }
2254 case TRACE_SPECIAL:
2255 case TRACE_USER_STACK:
2256 case TRACE_STACK: {
2257 struct special_entry *field;
2258
2259 trace_assign_type(field, entry);
2260 1829
2261 SEQ_PUT_HEX_FIELD_RET(s, field->arg1); 1830 event = ftrace_find_event(entry->type);
2262 SEQ_PUT_HEX_FIELD_RET(s, field->arg2); 1831 if (event) {
2263 SEQ_PUT_HEX_FIELD_RET(s, field->arg3); 1832 enum print_line_t ret = event->hex(iter, 0);
2264 break; 1833 if (ret != TRACE_TYPE_HANDLED)
2265 } 1834 return ret;
2266 } 1835 }
2267 SEQ_PUT_FIELD_RET(s, newline);
2268
2269 return TRACE_TYPE_HANDLED;
2270}
2271
2272static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
2273{
2274 struct trace_seq *s = &iter->seq;
2275 struct trace_entry *entry = iter->ent;
2276 struct print_entry *field;
2277 int ret;
2278
2279 trace_assign_type(field, entry);
2280 1836
2281 ret = trace_seq_printf(s, field->buf); 1837 SEQ_PUT_FIELD_RET(s, newline);
2282 if (!ret)
2283 return TRACE_TYPE_PARTIAL_LINE;
2284
2285 if (entry->flags & TRACE_FLAG_CONT)
2286 trace_seq_print_cont(s, iter);
2287 1838
2288 return TRACE_TYPE_HANDLED; 1839 return TRACE_TYPE_HANDLED;
2289} 1840}
@@ -2292,59 +1843,37 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2292{ 1843{
2293 struct trace_seq *s = &iter->seq; 1844 struct trace_seq *s = &iter->seq;
2294 struct trace_entry *entry; 1845 struct trace_entry *entry;
1846 struct trace_event *event;
2295 1847
2296 entry = iter->ent; 1848 entry = iter->ent;
2297 1849
2298 if (entry->type == TRACE_CONT) 1850 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2299 return TRACE_TYPE_HANDLED; 1851 SEQ_PUT_FIELD_RET(s, entry->pid);
2300 1852 SEQ_PUT_FIELD_RET(s, iter->cpu);
2301 SEQ_PUT_FIELD_RET(s, entry->pid); 1853 SEQ_PUT_FIELD_RET(s, iter->ts);
2302 SEQ_PUT_FIELD_RET(s, entry->cpu);
2303 SEQ_PUT_FIELD_RET(s, iter->ts);
2304
2305 switch (entry->type) {
2306 case TRACE_FN: {
2307 struct ftrace_entry *field;
2308
2309 trace_assign_type(field, entry);
2310
2311 SEQ_PUT_FIELD_RET(s, field->ip);
2312 SEQ_PUT_FIELD_RET(s, field->parent_ip);
2313 break;
2314 }
2315 case TRACE_CTX: {
2316 struct ctx_switch_entry *field;
2317
2318 trace_assign_type(field, entry);
2319
2320 SEQ_PUT_FIELD_RET(s, field->prev_pid);
2321 SEQ_PUT_FIELD_RET(s, field->prev_prio);
2322 SEQ_PUT_FIELD_RET(s, field->prev_state);
2323 SEQ_PUT_FIELD_RET(s, field->next_pid);
2324 SEQ_PUT_FIELD_RET(s, field->next_prio);
2325 SEQ_PUT_FIELD_RET(s, field->next_state);
2326 break;
2327 } 1854 }
2328 case TRACE_SPECIAL:
2329 case TRACE_USER_STACK:
2330 case TRACE_STACK: {
2331 struct special_entry *field;
2332
2333 trace_assign_type(field, entry);
2334 1855
2335 SEQ_PUT_FIELD_RET(s, field->arg1); 1856 event = ftrace_find_event(entry->type);
2336 SEQ_PUT_FIELD_RET(s, field->arg2); 1857 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
2337 SEQ_PUT_FIELD_RET(s, field->arg3);
2338 break;
2339 }
2340 }
2341 return 1;
2342} 1858}
2343 1859
2344static int trace_empty(struct trace_iterator *iter) 1860static int trace_empty(struct trace_iterator *iter)
2345{ 1861{
2346 int cpu; 1862 int cpu;
2347 1863
1864 /* If we are looking at one CPU buffer, only check that one */
1865 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
1866 cpu = iter->cpu_file;
1867 if (iter->buffer_iter[cpu]) {
1868 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1869 return 0;
1870 } else {
1871 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1872 return 0;
1873 }
1874 return 1;
1875 }
1876
2348 for_each_tracing_cpu(cpu) { 1877 for_each_tracing_cpu(cpu) {
2349 if (iter->buffer_iter[cpu]) { 1878 if (iter->buffer_iter[cpu]) {
2350 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) 1879 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
@@ -2358,6 +1887,7 @@ static int trace_empty(struct trace_iterator *iter)
2358 return 1; 1887 return 1;
2359} 1888}
2360 1889
1890/* Called with trace_event_read_lock() held. */
2361static enum print_line_t print_trace_line(struct trace_iterator *iter) 1891static enum print_line_t print_trace_line(struct trace_iterator *iter)
2362{ 1892{
2363 enum print_line_t ret; 1893 enum print_line_t ret;
@@ -2368,10 +1898,15 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2368 return ret; 1898 return ret;
2369 } 1899 }
2370 1900
1901 if (iter->ent->type == TRACE_BPRINT &&
1902 trace_flags & TRACE_ITER_PRINTK &&
1903 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
1904 return trace_print_bprintk_msg_only(iter);
1905
2371 if (iter->ent->type == TRACE_PRINT && 1906 if (iter->ent->type == TRACE_PRINT &&
2372 trace_flags & TRACE_ITER_PRINTK && 1907 trace_flags & TRACE_ITER_PRINTK &&
2373 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 1908 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2374 return print_printk_msg_only(iter); 1909 return trace_print_printk_msg_only(iter);
2375 1910
2376 if (trace_flags & TRACE_ITER_BIN) 1911 if (trace_flags & TRACE_ITER_BIN)
2377 return print_bin_fmt(iter); 1912 return print_bin_fmt(iter);
@@ -2382,9 +1917,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2382 if (trace_flags & TRACE_ITER_RAW) 1917 if (trace_flags & TRACE_ITER_RAW)
2383 return print_raw_fmt(iter); 1918 return print_raw_fmt(iter);
2384 1919
2385 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2386 return print_lat_fmt(iter, iter->idx, iter->cpu);
2387
2388 return print_trace_fmt(iter); 1920 return print_trace_fmt(iter);
2389} 1921}
2390 1922
@@ -2418,7 +1950,7 @@ static int s_show(struct seq_file *m, void *v)
2418 return 0; 1950 return 0;
2419} 1951}
2420 1952
2421static struct seq_operations tracer_seq_ops = { 1953static const struct seq_operations tracer_seq_ops = {
2422 .start = s_start, 1954 .start = s_start,
2423 .next = s_next, 1955 .next = s_next,
2424 .stop = s_stop, 1956 .stop = s_stop,
@@ -2426,30 +1958,43 @@ static struct seq_operations tracer_seq_ops = {
2426}; 1958};
2427 1959
2428static struct trace_iterator * 1960static struct trace_iterator *
2429__tracing_open(struct inode *inode, struct file *file, int *ret) 1961__tracing_open(struct inode *inode, struct file *file)
2430{ 1962{
1963 long cpu_file = (long) inode->i_private;
1964 void *fail_ret = ERR_PTR(-ENOMEM);
2431 struct trace_iterator *iter; 1965 struct trace_iterator *iter;
2432 struct seq_file *m; 1966 struct seq_file *m;
2433 int cpu; 1967 int cpu, ret;
2434 1968
2435 if (tracing_disabled) { 1969 if (tracing_disabled)
2436 *ret = -ENODEV; 1970 return ERR_PTR(-ENODEV);
2437 return NULL;
2438 }
2439 1971
2440 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 1972 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2441 if (!iter) { 1973 if (!iter)
2442 *ret = -ENOMEM; 1974 return ERR_PTR(-ENOMEM);
2443 goto out;
2444 }
2445 1975
1976 /*
1977 * We make a copy of the current tracer to avoid concurrent
1978 * changes on it while we are reading.
1979 */
2446 mutex_lock(&trace_types_lock); 1980 mutex_lock(&trace_types_lock);
1981 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
1982 if (!iter->trace)
1983 goto fail;
1984
1985 if (current_trace)
1986 *iter->trace = *current_trace;
1987
1988 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
1989 goto fail;
1990
2447 if (current_trace && current_trace->print_max) 1991 if (current_trace && current_trace->print_max)
2448 iter->tr = &max_tr; 1992 iter->tr = &max_tr;
2449 else 1993 else
2450 iter->tr = inode->i_private; 1994 iter->tr = &global_trace;
2451 iter->trace = current_trace;
2452 iter->pos = -1; 1995 iter->pos = -1;
1996 mutex_init(&iter->mutex);
1997 iter->cpu_file = cpu_file;
2453 1998
2454 /* Notify the tracer early; before we stop tracing. */ 1999 /* Notify the tracer early; before we stop tracing. */
2455 if (iter->trace && iter->trace->open) 2000 if (iter->trace && iter->trace->open)
@@ -2459,30 +2004,34 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2459 if (ring_buffer_overruns(iter->tr->buffer)) 2004 if (ring_buffer_overruns(iter->tr->buffer))
2460 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2005 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2461 2006
2007 /* stop the trace while dumping */
2008 tracing_stop();
2462 2009
2463 for_each_tracing_cpu(cpu) { 2010 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2011 for_each_tracing_cpu(cpu) {
2464 2012
2013 iter->buffer_iter[cpu] =
2014 ring_buffer_read_start(iter->tr->buffer, cpu);
2015 tracing_iter_reset(iter, cpu);
2016 }
2017 } else {
2018 cpu = iter->cpu_file;
2465 iter->buffer_iter[cpu] = 2019 iter->buffer_iter[cpu] =
2466 ring_buffer_read_start(iter->tr->buffer, cpu); 2020 ring_buffer_read_start(iter->tr->buffer, cpu);
2467 2021 tracing_iter_reset(iter, cpu);
2468 if (!iter->buffer_iter[cpu])
2469 goto fail_buffer;
2470 } 2022 }
2471 2023
2472 /* TODO stop tracer */ 2024 ret = seq_open(file, &tracer_seq_ops);
2473 *ret = seq_open(file, &tracer_seq_ops); 2025 if (ret < 0) {
2474 if (*ret) 2026 fail_ret = ERR_PTR(ret);
2475 goto fail_buffer; 2027 goto fail_buffer;
2028 }
2476 2029
2477 m = file->private_data; 2030 m = file->private_data;
2478 m->private = iter; 2031 m->private = iter;
2479 2032
2480 /* stop the trace while dumping */
2481 tracing_stop();
2482
2483 mutex_unlock(&trace_types_lock); 2033 mutex_unlock(&trace_types_lock);
2484 2034
2485 out:
2486 return iter; 2035 return iter;
2487 2036
2488 fail_buffer: 2037 fail_buffer:
@@ -2490,10 +2039,14 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
2490 if (iter->buffer_iter[cpu]) 2039 if (iter->buffer_iter[cpu])
2491 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2040 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2492 } 2041 }
2042 free_cpumask_var(iter->started);
2043 tracing_start();
2044 fail:
2493 mutex_unlock(&trace_types_lock); 2045 mutex_unlock(&trace_types_lock);
2046 kfree(iter->trace);
2494 kfree(iter); 2047 kfree(iter);
2495 2048
2496 return ERR_PTR(-ENOMEM); 2049 return fail_ret;
2497} 2050}
2498 2051
2499int tracing_open_generic(struct inode *inode, struct file *filp) 2052int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -2505,12 +2058,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
2505 return 0; 2058 return 0;
2506} 2059}
2507 2060
2508int tracing_release(struct inode *inode, struct file *file) 2061static int tracing_release(struct inode *inode, struct file *file)
2509{ 2062{
2510 struct seq_file *m = (struct seq_file *)file->private_data; 2063 struct seq_file *m = (struct seq_file *)file->private_data;
2511 struct trace_iterator *iter = m->private; 2064 struct trace_iterator *iter;
2512 int cpu; 2065 int cpu;
2513 2066
2067 if (!(file->f_mode & FMODE_READ))
2068 return 0;
2069
2070 iter = m->private;
2071
2514 mutex_lock(&trace_types_lock); 2072 mutex_lock(&trace_types_lock);
2515 for_each_tracing_cpu(cpu) { 2073 for_each_tracing_cpu(cpu) {
2516 if (iter->buffer_iter[cpu]) 2074 if (iter->buffer_iter[cpu])
@@ -2525,55 +2083,59 @@ int tracing_release(struct inode *inode, struct file *file)
2525 mutex_unlock(&trace_types_lock); 2083 mutex_unlock(&trace_types_lock);
2526 2084
2527 seq_release(inode, file); 2085 seq_release(inode, file);
2086 mutex_destroy(&iter->mutex);
2087 free_cpumask_var(iter->started);
2088 kfree(iter->trace);
2528 kfree(iter); 2089 kfree(iter);
2529 return 0; 2090 return 0;
2530} 2091}
2531 2092
2532static int tracing_open(struct inode *inode, struct file *file) 2093static int tracing_open(struct inode *inode, struct file *file)
2533{ 2094{
2534 int ret;
2535
2536 __tracing_open(inode, file, &ret);
2537
2538 return ret;
2539}
2540
2541static int tracing_lt_open(struct inode *inode, struct file *file)
2542{
2543 struct trace_iterator *iter; 2095 struct trace_iterator *iter;
2544 int ret; 2096 int ret = 0;
2545 2097
2546 iter = __tracing_open(inode, file, &ret); 2098 /* If this file was open for write, then erase contents */
2099 if ((file->f_mode & FMODE_WRITE) &&
2100 (file->f_flags & O_TRUNC)) {
2101 long cpu = (long) inode->i_private;
2547 2102
2548 if (!ret) 2103 if (cpu == TRACE_PIPE_ALL_CPU)
2549 iter->iter_flags |= TRACE_FILE_LAT_FMT; 2104 tracing_reset_online_cpus(&global_trace);
2105 else
2106 tracing_reset(&global_trace, cpu);
2107 }
2550 2108
2109 if (file->f_mode & FMODE_READ) {
2110 iter = __tracing_open(inode, file);
2111 if (IS_ERR(iter))
2112 ret = PTR_ERR(iter);
2113 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2114 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2115 }
2551 return ret; 2116 return ret;
2552} 2117}
2553 2118
2554
2555static void * 2119static void *
2556t_next(struct seq_file *m, void *v, loff_t *pos) 2120t_next(struct seq_file *m, void *v, loff_t *pos)
2557{ 2121{
2558 struct tracer *t = m->private; 2122 struct tracer *t = v;
2559 2123
2560 (*pos)++; 2124 (*pos)++;
2561 2125
2562 if (t) 2126 if (t)
2563 t = t->next; 2127 t = t->next;
2564 2128
2565 m->private = t;
2566
2567 return t; 2129 return t;
2568} 2130}
2569 2131
2570static void *t_start(struct seq_file *m, loff_t *pos) 2132static void *t_start(struct seq_file *m, loff_t *pos)
2571{ 2133{
2572 struct tracer *t = m->private; 2134 struct tracer *t;
2573 loff_t l = 0; 2135 loff_t l = 0;
2574 2136
2575 mutex_lock(&trace_types_lock); 2137 mutex_lock(&trace_types_lock);
2576 for (; t && l < *pos; t = t_next(m, t, &l)) 2138 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2577 ; 2139 ;
2578 2140
2579 return t; 2141 return t;
@@ -2600,7 +2162,7 @@ static int t_show(struct seq_file *m, void *v)
2600 return 0; 2162 return 0;
2601} 2163}
2602 2164
2603static struct seq_operations show_traces_seq_ops = { 2165static const struct seq_operations show_traces_seq_ops = {
2604 .start = t_start, 2166 .start = t_start,
2605 .next = t_next, 2167 .next = t_next,
2606 .stop = t_stop, 2168 .stop = t_stop,
@@ -2609,35 +2171,28 @@ static struct seq_operations show_traces_seq_ops = {
2609 2171
2610static int show_traces_open(struct inode *inode, struct file *file) 2172static int show_traces_open(struct inode *inode, struct file *file)
2611{ 2173{
2612 int ret;
2613
2614 if (tracing_disabled) 2174 if (tracing_disabled)
2615 return -ENODEV; 2175 return -ENODEV;
2616 2176
2617 ret = seq_open(file, &show_traces_seq_ops); 2177 return seq_open(file, &show_traces_seq_ops);
2618 if (!ret) { 2178}
2619 struct seq_file *m = file->private_data;
2620 m->private = trace_types;
2621 }
2622 2179
2623 return ret; 2180static ssize_t
2181tracing_write_stub(struct file *filp, const char __user *ubuf,
2182 size_t count, loff_t *ppos)
2183{
2184 return count;
2624} 2185}
2625 2186
2626static struct file_operations tracing_fops = { 2187static const struct file_operations tracing_fops = {
2627 .open = tracing_open, 2188 .open = tracing_open,
2628 .read = seq_read, 2189 .read = seq_read,
2190 .write = tracing_write_stub,
2629 .llseek = seq_lseek, 2191 .llseek = seq_lseek,
2630 .release = tracing_release, 2192 .release = tracing_release,
2631}; 2193};
2632 2194
2633static struct file_operations tracing_lt_fops = { 2195static const struct file_operations show_traces_fops = {
2634 .open = tracing_lt_open,
2635 .read = seq_read,
2636 .llseek = seq_lseek,
2637 .release = tracing_release,
2638};
2639
2640static struct file_operations show_traces_fops = {
2641 .open = show_traces_open, 2196 .open = show_traces_open,
2642 .read = seq_read, 2197 .read = seq_read,
2643 .release = seq_release, 2198 .release = seq_release,
@@ -2692,11 +2247,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2692 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 2247 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2693 return -ENOMEM; 2248 return -ENOMEM;
2694 2249
2695 mutex_lock(&tracing_cpumask_update_lock);
2696 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2250 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2697 if (err) 2251 if (err)
2698 goto err_unlock; 2252 goto err_unlock;
2699 2253
2254 mutex_lock(&tracing_cpumask_update_lock);
2255
2700 local_irq_disable(); 2256 local_irq_disable();
2701 __raw_spin_lock(&ftrace_max_lock); 2257 __raw_spin_lock(&ftrace_max_lock);
2702 for_each_tracing_cpu(cpu) { 2258 for_each_tracing_cpu(cpu) {
@@ -2724,13 +2280,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2724 return count; 2280 return count;
2725 2281
2726err_unlock: 2282err_unlock:
2727 mutex_unlock(&tracing_cpumask_update_lock); 2283 free_cpumask_var(tracing_cpumask_new);
2728 free_cpumask_var(tracing_cpumask);
2729 2284
2730 return err; 2285 return err;
2731} 2286}
2732 2287
2733static struct file_operations tracing_cpumask_fops = { 2288static const struct file_operations tracing_cpumask_fops = {
2734 .open = tracing_open_generic, 2289 .open = tracing_open_generic,
2735 .read = tracing_cpumask_read, 2290 .read = tracing_cpumask_read,
2736 .write = tracing_cpumask_write, 2291 .write = tracing_cpumask_write,
@@ -2740,80 +2295,85 @@ static ssize_t
2740tracing_trace_options_read(struct file *filp, char __user *ubuf, 2295tracing_trace_options_read(struct file *filp, char __user *ubuf,
2741 size_t cnt, loff_t *ppos) 2296 size_t cnt, loff_t *ppos)
2742{ 2297{
2743 int i; 2298 struct tracer_opt *trace_opts;
2299 u32 tracer_flags;
2300 int len = 0;
2744 char *buf; 2301 char *buf;
2745 int r = 0; 2302 int r = 0;
2746 int len = 0; 2303 int i;
2747 u32 tracer_flags = current_trace->flags->val;
2748 struct tracer_opt *trace_opts = current_trace->flags->opts;
2749 2304
2750 2305
2751 /* calulate max size */ 2306 /* calculate max size */
2752 for (i = 0; trace_options[i]; i++) { 2307 for (i = 0; trace_options[i]; i++) {
2753 len += strlen(trace_options[i]); 2308 len += strlen(trace_options[i]);
2754 len += 3; /* "no" and space */ 2309 len += 3; /* "no" and newline */
2755 } 2310 }
2756 2311
2312 mutex_lock(&trace_types_lock);
2313 tracer_flags = current_trace->flags->val;
2314 trace_opts = current_trace->flags->opts;
2315
2757 /* 2316 /*
2758 * Increase the size with names of options specific 2317 * Increase the size with names of options specific
2759 * of the current tracer. 2318 * of the current tracer.
2760 */ 2319 */
2761 for (i = 0; trace_opts[i].name; i++) { 2320 for (i = 0; trace_opts[i].name; i++) {
2762 len += strlen(trace_opts[i].name); 2321 len += strlen(trace_opts[i].name);
2763 len += 3; /* "no" and space */ 2322 len += 3; /* "no" and newline */
2764 } 2323 }
2765 2324
2766 /* +2 for \n and \0 */ 2325 /* +1 for \0 */
2767 buf = kmalloc(len + 2, GFP_KERNEL); 2326 buf = kmalloc(len + 1, GFP_KERNEL);
2768 if (!buf) 2327 if (!buf) {
2328 mutex_unlock(&trace_types_lock);
2769 return -ENOMEM; 2329 return -ENOMEM;
2330 }
2770 2331
2771 for (i = 0; trace_options[i]; i++) { 2332 for (i = 0; trace_options[i]; i++) {
2772 if (trace_flags & (1 << i)) 2333 if (trace_flags & (1 << i))
2773 r += sprintf(buf + r, "%s ", trace_options[i]); 2334 r += sprintf(buf + r, "%s\n", trace_options[i]);
2774 else 2335 else
2775 r += sprintf(buf + r, "no%s ", trace_options[i]); 2336 r += sprintf(buf + r, "no%s\n", trace_options[i]);
2776 } 2337 }
2777 2338
2778 for (i = 0; trace_opts[i].name; i++) { 2339 for (i = 0; trace_opts[i].name; i++) {
2779 if (tracer_flags & trace_opts[i].bit) 2340 if (tracer_flags & trace_opts[i].bit)
2780 r += sprintf(buf + r, "%s ", 2341 r += sprintf(buf + r, "%s\n",
2781 trace_opts[i].name); 2342 trace_opts[i].name);
2782 else 2343 else
2783 r += sprintf(buf + r, "no%s ", 2344 r += sprintf(buf + r, "no%s\n",
2784 trace_opts[i].name); 2345 trace_opts[i].name);
2785 } 2346 }
2347 mutex_unlock(&trace_types_lock);
2786 2348
2787 r += sprintf(buf + r, "\n"); 2349 WARN_ON(r >= len + 1);
2788 WARN_ON(r >= len + 2);
2789 2350
2790 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2351 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2791 2352
2792 kfree(buf); 2353 kfree(buf);
2793
2794 return r; 2354 return r;
2795} 2355}
2796 2356
2797/* Try to assign a tracer specific option */ 2357/* Try to assign a tracer specific option */
2798static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2358static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2799{ 2359{
2800 struct tracer_flags *trace_flags = trace->flags; 2360 struct tracer_flags *tracer_flags = trace->flags;
2801 struct tracer_opt *opts = NULL; 2361 struct tracer_opt *opts = NULL;
2802 int ret = 0, i = 0; 2362 int ret = 0, i = 0;
2803 int len; 2363 int len;
2804 2364
2805 for (i = 0; trace_flags->opts[i].name; i++) { 2365 for (i = 0; tracer_flags->opts[i].name; i++) {
2806 opts = &trace_flags->opts[i]; 2366 opts = &tracer_flags->opts[i];
2807 len = strlen(opts->name); 2367 len = strlen(opts->name);
2808 2368
2809 if (strncmp(cmp, opts->name, len) == 0) { 2369 if (strncmp(cmp, opts->name, len) == 0) {
2810 ret = trace->set_flag(trace_flags->val, 2370 ret = trace->set_flag(tracer_flags->val,
2811 opts->bit, !neg); 2371 opts->bit, !neg);
2812 break; 2372 break;
2813 } 2373 }
2814 } 2374 }
2815 /* Not found */ 2375 /* Not found */
2816 if (!trace_flags->opts[i].name) 2376 if (!tracer_flags->opts[i].name)
2817 return -EINVAL; 2377 return -EINVAL;
2818 2378
2819 /* Refused to handle */ 2379 /* Refused to handle */
@@ -2821,13 +2381,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2821 return ret; 2381 return ret;
2822 2382
2823 if (neg) 2383 if (neg)
2824 trace_flags->val &= ~opts->bit; 2384 tracer_flags->val &= ~opts->bit;
2825 else 2385 else
2826 trace_flags->val |= opts->bit; 2386 tracer_flags->val |= opts->bit;
2827 2387
2828 return 0; 2388 return 0;
2829} 2389}
2830 2390
2391static void set_tracer_flags(unsigned int mask, int enabled)
2392{
2393 /* do nothing if flag is already set */
2394 if (!!(trace_flags & mask) == !!enabled)
2395 return;
2396
2397 if (enabled)
2398 trace_flags |= mask;
2399 else
2400 trace_flags &= ~mask;
2401}
2402
2831static ssize_t 2403static ssize_t
2832tracing_trace_options_write(struct file *filp, const char __user *ubuf, 2404tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2833 size_t cnt, loff_t *ppos) 2405 size_t cnt, loff_t *ppos)
@@ -2855,27 +2427,26 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2855 int len = strlen(trace_options[i]); 2427 int len = strlen(trace_options[i]);
2856 2428
2857 if (strncmp(cmp, trace_options[i], len) == 0) { 2429 if (strncmp(cmp, trace_options[i], len) == 0) {
2858 if (neg) 2430 set_tracer_flags(1 << i, !neg);
2859 trace_flags &= ~(1 << i);
2860 else
2861 trace_flags |= (1 << i);
2862 break; 2431 break;
2863 } 2432 }
2864 } 2433 }
2865 2434
2866 /* If no option could be set, test the specific tracer options */ 2435 /* If no option could be set, test the specific tracer options */
2867 if (!trace_options[i]) { 2436 if (!trace_options[i]) {
2437 mutex_lock(&trace_types_lock);
2868 ret = set_tracer_option(current_trace, cmp, neg); 2438 ret = set_tracer_option(current_trace, cmp, neg);
2439 mutex_unlock(&trace_types_lock);
2869 if (ret) 2440 if (ret)
2870 return ret; 2441 return ret;
2871 } 2442 }
2872 2443
2873 filp->f_pos += cnt; 2444 *ppos += cnt;
2874 2445
2875 return cnt; 2446 return cnt;
2876} 2447}
2877 2448
2878static struct file_operations tracing_iter_fops = { 2449static const struct file_operations tracing_iter_fops = {
2879 .open = tracing_open_generic, 2450 .open = tracing_open_generic,
2880 .read = tracing_trace_options_read, 2451 .read = tracing_trace_options_read,
2881 .write = tracing_trace_options_write, 2452 .write = tracing_trace_options_write,
@@ -2883,21 +2454,20 @@ static struct file_operations tracing_iter_fops = {
2883 2454
2884static const char readme_msg[] = 2455static const char readme_msg[] =
2885 "tracing mini-HOWTO:\n\n" 2456 "tracing mini-HOWTO:\n\n"
2886 "# mkdir /debug\n" 2457 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2887 "# mount -t debugfs nodev /debug\n\n" 2458 "# cat /sys/kernel/debug/tracing/available_tracers\n"
2888 "# cat /debug/tracing/available_tracers\n" 2459 "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
2889 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n" 2460 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2890 "# cat /debug/tracing/current_tracer\n" 2461 "nop\n"
2891 "none\n" 2462 "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
2892 "# echo sched_switch > /debug/tracing/current_tracer\n" 2463 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2893 "# cat /debug/tracing/current_tracer\n"
2894 "sched_switch\n" 2464 "sched_switch\n"
2895 "# cat /debug/tracing/trace_options\n" 2465 "# cat /sys/kernel/debug/tracing/trace_options\n"
2896 "noprint-parent nosym-offset nosym-addr noverbose\n" 2466 "noprint-parent nosym-offset nosym-addr noverbose\n"
2897 "# echo print-parent > /debug/tracing/trace_options\n" 2467 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2898 "# echo 1 > /debug/tracing/tracing_enabled\n" 2468 "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
2899 "# cat /debug/tracing/trace > /tmp/trace.txt\n" 2469 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2900 "echo 0 > /debug/tracing/tracing_enabled\n" 2470 "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
2901; 2471;
2902 2472
2903static ssize_t 2473static ssize_t
@@ -2908,12 +2478,62 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
2908 readme_msg, strlen(readme_msg)); 2478 readme_msg, strlen(readme_msg));
2909} 2479}
2910 2480
2911static struct file_operations tracing_readme_fops = { 2481static const struct file_operations tracing_readme_fops = {
2912 .open = tracing_open_generic, 2482 .open = tracing_open_generic,
2913 .read = tracing_readme_read, 2483 .read = tracing_readme_read,
2914}; 2484};
2915 2485
2916static ssize_t 2486static ssize_t
2487tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2488 size_t cnt, loff_t *ppos)
2489{
2490 char *buf_comm;
2491 char *file_buf;
2492 char *buf;
2493 int len = 0;
2494 int pid;
2495 int i;
2496
2497 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2498 if (!file_buf)
2499 return -ENOMEM;
2500
2501 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2502 if (!buf_comm) {
2503 kfree(file_buf);
2504 return -ENOMEM;
2505 }
2506
2507 buf = file_buf;
2508
2509 for (i = 0; i < SAVED_CMDLINES; i++) {
2510 int r;
2511
2512 pid = map_cmdline_to_pid[i];
2513 if (pid == -1 || pid == NO_CMDLINE_MAP)
2514 continue;
2515
2516 trace_find_cmdline(pid, buf_comm);
2517 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2518 buf += r;
2519 len += r;
2520 }
2521
2522 len = simple_read_from_buffer(ubuf, cnt, ppos,
2523 file_buf, len);
2524
2525 kfree(file_buf);
2526 kfree(buf_comm);
2527
2528 return len;
2529}
2530
2531static const struct file_operations tracing_saved_cmdlines_fops = {
2532 .open = tracing_open_generic,
2533 .read = tracing_saved_cmdlines_read,
2534};
2535
2536static ssize_t
2917tracing_ctrl_read(struct file *filp, char __user *ubuf, 2537tracing_ctrl_read(struct file *filp, char __user *ubuf,
2918 size_t cnt, loff_t *ppos) 2538 size_t cnt, loff_t *ppos)
2919{ 2539{
@@ -2930,7 +2550,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2930{ 2550{
2931 struct trace_array *tr = filp->private_data; 2551 struct trace_array *tr = filp->private_data;
2932 char buf[64]; 2552 char buf[64];
2933 long val; 2553 unsigned long val;
2934 int ret; 2554 int ret;
2935 2555
2936 if (cnt >= sizeof(buf)) 2556 if (cnt >= sizeof(buf))
@@ -2963,7 +2583,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2963 } 2583 }
2964 mutex_unlock(&trace_types_lock); 2584 mutex_unlock(&trace_types_lock);
2965 2585
2966 filp->f_pos += cnt; 2586 *ppos += cnt;
2967 2587
2968 return cnt; 2588 return cnt;
2969} 2589}
@@ -2972,7 +2592,7 @@ static ssize_t
2972tracing_set_trace_read(struct file *filp, char __user *ubuf, 2592tracing_set_trace_read(struct file *filp, char __user *ubuf,
2973 size_t cnt, loff_t *ppos) 2593 size_t cnt, loff_t *ppos)
2974{ 2594{
2975 char buf[max_tracer_type_len+2]; 2595 char buf[MAX_TRACER_SIZE+2];
2976 int r; 2596 int r;
2977 2597
2978 mutex_lock(&trace_types_lock); 2598 mutex_lock(&trace_types_lock);
@@ -2985,13 +2605,105 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
2985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2605 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2986} 2606}
2987 2607
2988static int tracing_set_tracer(char *buf) 2608int tracer_init(struct tracer *t, struct trace_array *tr)
2609{
2610 tracing_reset_online_cpus(tr);
2611 return t->init(tr);
2612}
2613
2614static int tracing_resize_ring_buffer(unsigned long size)
2615{
2616 int ret;
2617
2618 /*
2619 * If kernel or user changes the size of the ring buffer
2620 * we use the size that was given, and we can forget about
2621 * expanding it later.
2622 */
2623 ring_buffer_expanded = 1;
2624
2625 ret = ring_buffer_resize(global_trace.buffer, size);
2626 if (ret < 0)
2627 return ret;
2628
2629 ret = ring_buffer_resize(max_tr.buffer, size);
2630 if (ret < 0) {
2631 int r;
2632
2633 r = ring_buffer_resize(global_trace.buffer,
2634 global_trace.entries);
2635 if (r < 0) {
2636 /*
2637 * AARGH! We are left with different
2638 * size max buffer!!!!
2639 * The max buffer is our "snapshot" buffer.
2640 * When a tracer needs a snapshot (one of the
2641 * latency tracers), it swaps the max buffer
2642 * with the saved snap shot. We succeeded to
2643 * update the size of the main buffer, but failed to
2644 * update the size of the max buffer. But when we tried
2645 * to reset the main buffer to the original size, we
2646 * failed there too. This is very unlikely to
2647 * happen, but if it does, warn and kill all
2648 * tracing.
2649 */
2650 WARN_ON(1);
2651 tracing_disabled = 1;
2652 }
2653 return ret;
2654 }
2655
2656 global_trace.entries = size;
2657
2658 return ret;
2659}
2660
2661/**
2662 * tracing_update_buffers - used by tracing facility to expand ring buffers
2663 *
2664 * To save on memory when the tracing is never used on a system with it
2665 * configured in. The ring buffers are set to a minimum size. But once
2666 * a user starts to use the tracing facility, then they need to grow
2667 * to their default size.
2668 *
2669 * This function is to be called when a tracer is about to be used.
2670 */
2671int tracing_update_buffers(void)
2672{
2673 int ret = 0;
2674
2675 mutex_lock(&trace_types_lock);
2676 if (!ring_buffer_expanded)
2677 ret = tracing_resize_ring_buffer(trace_buf_size);
2678 mutex_unlock(&trace_types_lock);
2679
2680 return ret;
2681}
2682
2683struct trace_option_dentry;
2684
2685static struct trace_option_dentry *
2686create_trace_option_files(struct tracer *tracer);
2687
2688static void
2689destroy_trace_option_files(struct trace_option_dentry *topts);
2690
2691static int tracing_set_tracer(const char *buf)
2989{ 2692{
2693 static struct trace_option_dentry *topts;
2990 struct trace_array *tr = &global_trace; 2694 struct trace_array *tr = &global_trace;
2991 struct tracer *t; 2695 struct tracer *t;
2992 int ret = 0; 2696 int ret = 0;
2993 2697
2994 mutex_lock(&trace_types_lock); 2698 mutex_lock(&trace_types_lock);
2699
2700 if (!ring_buffer_expanded) {
2701 ret = tracing_resize_ring_buffer(trace_buf_size);
2702 if (ret < 0)
2703 goto out;
2704 ret = 0;
2705 }
2706
2995 for (t = trace_types; t; t = t->next) { 2707 for (t = trace_types; t; t = t->next) {
2996 if (strcmp(t->name, buf) == 0) 2708 if (strcmp(t->name, buf) == 0)
2997 break; 2709 break;
@@ -3007,9 +2719,14 @@ static int tracing_set_tracer(char *buf)
3007 if (current_trace && current_trace->reset) 2719 if (current_trace && current_trace->reset)
3008 current_trace->reset(tr); 2720 current_trace->reset(tr);
3009 2721
2722 destroy_trace_option_files(topts);
2723
3010 current_trace = t; 2724 current_trace = t;
2725
2726 topts = create_trace_option_files(current_trace);
2727
3011 if (t->init) { 2728 if (t->init) {
3012 ret = t->init(tr); 2729 ret = tracer_init(t, tr);
3013 if (ret) 2730 if (ret)
3014 goto out; 2731 goto out;
3015 } 2732 }
@@ -3025,15 +2742,15 @@ static ssize_t
3025tracing_set_trace_write(struct file *filp, const char __user *ubuf, 2742tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3026 size_t cnt, loff_t *ppos) 2743 size_t cnt, loff_t *ppos)
3027{ 2744{
3028 char buf[max_tracer_type_len+1]; 2745 char buf[MAX_TRACER_SIZE+1];
3029 int i; 2746 int i;
3030 size_t ret; 2747 size_t ret;
3031 int err; 2748 int err;
3032 2749
3033 ret = cnt; 2750 ret = cnt;
3034 2751
3035 if (cnt > max_tracer_type_len) 2752 if (cnt > MAX_TRACER_SIZE)
3036 cnt = max_tracer_type_len; 2753 cnt = MAX_TRACER_SIZE;
3037 2754
3038 if (copy_from_user(&buf, ubuf, cnt)) 2755 if (copy_from_user(&buf, ubuf, cnt))
3039 return -EFAULT; 2756 return -EFAULT;
@@ -3048,7 +2765,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3048 if (err) 2765 if (err)
3049 return err; 2766 return err;
3050 2767
3051 filp->f_pos += ret; 2768 *ppos += ret;
3052 2769
3053 return ret; 2770 return ret;
3054} 2771}
@@ -3072,9 +2789,9 @@ static ssize_t
3072tracing_max_lat_write(struct file *filp, const char __user *ubuf, 2789tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3073 size_t cnt, loff_t *ppos) 2790 size_t cnt, loff_t *ppos)
3074{ 2791{
3075 long *ptr = filp->private_data; 2792 unsigned long *ptr = filp->private_data;
3076 char buf[64]; 2793 char buf[64];
3077 long val; 2794 unsigned long val;
3078 int ret; 2795 int ret;
3079 2796
3080 if (cnt >= sizeof(buf)) 2797 if (cnt >= sizeof(buf))
@@ -3094,54 +2811,99 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3094 return cnt; 2811 return cnt;
3095} 2812}
3096 2813
3097static atomic_t tracing_reader;
3098
3099static int tracing_open_pipe(struct inode *inode, struct file *filp) 2814static int tracing_open_pipe(struct inode *inode, struct file *filp)
3100{ 2815{
2816 long cpu_file = (long) inode->i_private;
3101 struct trace_iterator *iter; 2817 struct trace_iterator *iter;
2818 int ret = 0;
3102 2819
3103 if (tracing_disabled) 2820 if (tracing_disabled)
3104 return -ENODEV; 2821 return -ENODEV;
3105 2822
3106 /* We only allow for reader of the pipe */ 2823 mutex_lock(&trace_types_lock);
3107 if (atomic_inc_return(&tracing_reader) != 1) { 2824
3108 atomic_dec(&tracing_reader); 2825 /* We only allow one reader per cpu */
3109 return -EBUSY; 2826 if (cpu_file == TRACE_PIPE_ALL_CPU) {
2827 if (!cpumask_empty(tracing_reader_cpumask)) {
2828 ret = -EBUSY;
2829 goto out;
2830 }
2831 cpumask_setall(tracing_reader_cpumask);
2832 } else {
2833 if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
2834 cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
2835 else {
2836 ret = -EBUSY;
2837 goto out;
2838 }
3110 } 2839 }
3111 2840
3112 /* create a buffer to store the information to pass to userspace */ 2841 /* create a buffer to store the information to pass to userspace */
3113 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2842 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3114 if (!iter) 2843 if (!iter) {
3115 return -ENOMEM; 2844 ret = -ENOMEM;
2845 goto out;
2846 }
3116 2847
3117 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 2848 /*
3118 kfree(iter); 2849 * We make a copy of the current tracer to avoid concurrent
3119 return -ENOMEM; 2850 * changes on it while we are reading.
2851 */
2852 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
2853 if (!iter->trace) {
2854 ret = -ENOMEM;
2855 goto fail;
3120 } 2856 }
2857 if (current_trace)
2858 *iter->trace = *current_trace;
3121 2859
3122 mutex_lock(&trace_types_lock); 2860 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
2861 ret = -ENOMEM;
2862 goto fail;
2863 }
3123 2864
3124 /* trace pipe does not show start of buffer */ 2865 /* trace pipe does not show start of buffer */
3125 cpumask_setall(iter->started); 2866 cpumask_setall(iter->started);
3126 2867
2868 if (trace_flags & TRACE_ITER_LATENCY_FMT)
2869 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2870
2871 iter->cpu_file = cpu_file;
3127 iter->tr = &global_trace; 2872 iter->tr = &global_trace;
3128 iter->trace = current_trace; 2873 mutex_init(&iter->mutex);
3129 filp->private_data = iter; 2874 filp->private_data = iter;
3130 2875
3131 if (iter->trace->pipe_open) 2876 if (iter->trace->pipe_open)
3132 iter->trace->pipe_open(iter); 2877 iter->trace->pipe_open(iter);
2878
2879out:
3133 mutex_unlock(&trace_types_lock); 2880 mutex_unlock(&trace_types_lock);
2881 return ret;
3134 2882
3135 return 0; 2883fail:
2884 kfree(iter->trace);
2885 kfree(iter);
2886 mutex_unlock(&trace_types_lock);
2887 return ret;
3136} 2888}
3137 2889
3138static int tracing_release_pipe(struct inode *inode, struct file *file) 2890static int tracing_release_pipe(struct inode *inode, struct file *file)
3139{ 2891{
3140 struct trace_iterator *iter = file->private_data; 2892 struct trace_iterator *iter = file->private_data;
3141 2893
2894 mutex_lock(&trace_types_lock);
2895
2896 if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
2897 cpumask_clear(tracing_reader_cpumask);
2898 else
2899 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2900
2901 mutex_unlock(&trace_types_lock);
2902
3142 free_cpumask_var(iter->started); 2903 free_cpumask_var(iter->started);
2904 mutex_destroy(&iter->mutex);
2905 kfree(iter->trace);
3143 kfree(iter); 2906 kfree(iter);
3144 atomic_dec(&tracing_reader);
3145 2907
3146 return 0; 2908 return 0;
3147} 2909}
@@ -3167,67 +2929,57 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3167 } 2929 }
3168} 2930}
3169 2931
3170/* 2932
3171 * Consumer reader. 2933void default_wait_pipe(struct trace_iterator *iter)
3172 */
3173static ssize_t
3174tracing_read_pipe(struct file *filp, char __user *ubuf,
3175 size_t cnt, loff_t *ppos)
3176{ 2934{
3177 struct trace_iterator *iter = filp->private_data; 2935 DEFINE_WAIT(wait);
3178 ssize_t sret;
3179 2936
3180 /* return any leftover data */ 2937 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3181 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3182 if (sret != -EBUSY)
3183 return sret;
3184 2938
3185 trace_seq_reset(&iter->seq); 2939 if (trace_empty(iter))
2940 schedule();
3186 2941
3187 mutex_lock(&trace_types_lock); 2942 finish_wait(&trace_wait, &wait);
3188 if (iter->trace->read) { 2943}
3189 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 2944
3190 if (sret) 2945/*
3191 goto out; 2946 * This is a make-shift waitqueue.
3192 } 2947 * A tracer might use this callback on some rare cases:
2948 *
2949 * 1) the current tracer might hold the runqueue lock when it wakes up
2950 * a reader, hence a deadlock (sched, function, and function graph tracers)
2951 * 2) the function tracers, trace all functions, we don't want
2952 * the overhead of calling wake_up and friends
2953 * (and tracing them too)
2954 *
2955 * Anyway, this is really very primitive wakeup.
2956 */
2957void poll_wait_pipe(struct trace_iterator *iter)
2958{
2959 set_current_state(TASK_INTERRUPTIBLE);
2960 /* sleep for 100 msecs, and try again. */
2961 schedule_timeout(HZ / 10);
2962}
2963
2964/* Must be called with trace_types_lock mutex held. */
2965static int tracing_wait_pipe(struct file *filp)
2966{
2967 struct trace_iterator *iter = filp->private_data;
3193 2968
3194waitagain:
3195 sret = 0;
3196 while (trace_empty(iter)) { 2969 while (trace_empty(iter)) {
3197 2970
3198 if ((filp->f_flags & O_NONBLOCK)) { 2971 if ((filp->f_flags & O_NONBLOCK)) {
3199 sret = -EAGAIN; 2972 return -EAGAIN;
3200 goto out;
3201 } 2973 }
3202 2974
3203 /* 2975 mutex_unlock(&iter->mutex);
3204 * This is a make-shift waitqueue. The reason we don't use
3205 * an actual wait queue is because:
3206 * 1) we only ever have one waiter
3207 * 2) the tracing, traces all functions, we don't want
3208 * the overhead of calling wake_up and friends
3209 * (and tracing them too)
3210 * Anyway, this is really very primitive wakeup.
3211 */
3212 set_current_state(TASK_INTERRUPTIBLE);
3213 iter->tr->waiter = current;
3214
3215 mutex_unlock(&trace_types_lock);
3216
3217 /* sleep for 100 msecs, and try again. */
3218 schedule_timeout(HZ/10);
3219
3220 mutex_lock(&trace_types_lock);
3221 2976
3222 iter->tr->waiter = NULL; 2977 iter->trace->wait_pipe(iter);
3223 2978
3224 if (signal_pending(current)) { 2979 mutex_lock(&iter->mutex);
3225 sret = -EINTR;
3226 goto out;
3227 }
3228 2980
3229 if (iter->trace != current_trace) 2981 if (signal_pending(current))
3230 goto out; 2982 return -EINTR;
3231 2983
3232 /* 2984 /*
3233 * We block until we read something and tracing is disabled. 2985 * We block until we read something and tracing is disabled.
@@ -3240,13 +2992,59 @@ waitagain:
3240 */ 2992 */
3241 if (!tracer_enabled && iter->pos) 2993 if (!tracer_enabled && iter->pos)
3242 break; 2994 break;
2995 }
2996
2997 return 1;
2998}
2999
3000/*
3001 * Consumer reader.
3002 */
3003static ssize_t
3004tracing_read_pipe(struct file *filp, char __user *ubuf,
3005 size_t cnt, loff_t *ppos)
3006{
3007 struct trace_iterator *iter = filp->private_data;
3008 static struct tracer *old_tracer;
3009 ssize_t sret;
3010
3011 /* return any leftover data */
3012 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3013 if (sret != -EBUSY)
3014 return sret;
3015
3016 trace_seq_init(&iter->seq);
3243 3017
3244 continue; 3018 /* copy the tracer to avoid using a global lock all around */
3019 mutex_lock(&trace_types_lock);
3020 if (unlikely(old_tracer != current_trace && current_trace)) {
3021 old_tracer = current_trace;
3022 *iter->trace = *current_trace;
3245 } 3023 }
3024 mutex_unlock(&trace_types_lock);
3025
3026 /*
3027 * Avoid more than one consumer on a single file descriptor
3028 * This is just a matter of traces coherency, the ring buffer itself
3029 * is protected.
3030 */
3031 mutex_lock(&iter->mutex);
3032 if (iter->trace->read) {
3033 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3034 if (sret)
3035 goto out;
3036 }
3037
3038waitagain:
3039 sret = tracing_wait_pipe(filp);
3040 if (sret <= 0)
3041 goto out;
3246 3042
3247 /* stop when tracing is finished */ 3043 /* stop when tracing is finished */
3248 if (trace_empty(iter)) 3044 if (trace_empty(iter)) {
3045 sret = 0;
3249 goto out; 3046 goto out;
3047 }
3250 3048
3251 if (cnt >= PAGE_SIZE) 3049 if (cnt >= PAGE_SIZE)
3252 cnt = PAGE_SIZE - 1; 3050 cnt = PAGE_SIZE - 1;
@@ -3257,6 +3055,7 @@ waitagain:
3257 offsetof(struct trace_iterator, seq)); 3055 offsetof(struct trace_iterator, seq));
3258 iter->pos = -1; 3056 iter->pos = -1;
3259 3057
3058 trace_event_read_lock();
3260 while (find_next_entry_inc(iter) != NULL) { 3059 while (find_next_entry_inc(iter) != NULL) {
3261 enum print_line_t ret; 3060 enum print_line_t ret;
3262 int len = iter->seq.len; 3061 int len = iter->seq.len;
@@ -3267,17 +3066,18 @@ waitagain:
3267 iter->seq.len = len; 3066 iter->seq.len = len;
3268 break; 3067 break;
3269 } 3068 }
3270 3069 if (ret != TRACE_TYPE_NO_CONSUME)
3271 trace_consume(iter); 3070 trace_consume(iter);
3272 3071
3273 if (iter->seq.len >= cnt) 3072 if (iter->seq.len >= cnt)
3274 break; 3073 break;
3275 } 3074 }
3075 trace_event_read_unlock();
3276 3076
3277 /* Now copy what we have to the user */ 3077 /* Now copy what we have to the user */
3278 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3078 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3279 if (iter->seq.readpos >= iter->seq.len) 3079 if (iter->seq.readpos >= iter->seq.len)
3280 trace_seq_reset(&iter->seq); 3080 trace_seq_init(&iter->seq);
3281 3081
3282 /* 3082 /*
3283 * If there was nothing to send to user, inspite of consuming trace 3083 * If there was nothing to send to user, inspite of consuming trace
@@ -3287,20 +3087,169 @@ waitagain:
3287 goto waitagain; 3087 goto waitagain;
3288 3088
3289out: 3089out:
3290 mutex_unlock(&trace_types_lock); 3090 mutex_unlock(&iter->mutex);
3291 3091
3292 return sret; 3092 return sret;
3293} 3093}
3294 3094
3095static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3096 struct pipe_buffer *buf)
3097{
3098 __free_page(buf->page);
3099}
3100
3101static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3102 unsigned int idx)
3103{
3104 __free_page(spd->pages[idx]);
3105}
3106
3107static struct pipe_buf_operations tracing_pipe_buf_ops = {
3108 .can_merge = 0,
3109 .map = generic_pipe_buf_map,
3110 .unmap = generic_pipe_buf_unmap,
3111 .confirm = generic_pipe_buf_confirm,
3112 .release = tracing_pipe_buf_release,
3113 .steal = generic_pipe_buf_steal,
3114 .get = generic_pipe_buf_get,
3115};
3116
3117static size_t
3118tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3119{
3120 size_t count;
3121 int ret;
3122
3123 /* Seq buffer is page-sized, exactly what we need. */
3124 for (;;) {
3125 count = iter->seq.len;
3126 ret = print_trace_line(iter);
3127 count = iter->seq.len - count;
3128 if (rem < count) {
3129 rem = 0;
3130 iter->seq.len -= count;
3131 break;
3132 }
3133 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3134 iter->seq.len -= count;
3135 break;
3136 }
3137
3138 if (ret != TRACE_TYPE_NO_CONSUME)
3139 trace_consume(iter);
3140 rem -= count;
3141 if (!find_next_entry_inc(iter)) {
3142 rem = 0;
3143 iter->ent = NULL;
3144 break;
3145 }
3146 }
3147
3148 return rem;
3149}
3150
3151static ssize_t tracing_splice_read_pipe(struct file *filp,
3152 loff_t *ppos,
3153 struct pipe_inode_info *pipe,
3154 size_t len,
3155 unsigned int flags)
3156{
3157 struct page *pages[PIPE_BUFFERS];
3158 struct partial_page partial[PIPE_BUFFERS];
3159 struct trace_iterator *iter = filp->private_data;
3160 struct splice_pipe_desc spd = {
3161 .pages = pages,
3162 .partial = partial,
3163 .nr_pages = 0, /* This gets updated below. */
3164 .flags = flags,
3165 .ops = &tracing_pipe_buf_ops,
3166 .spd_release = tracing_spd_release_pipe,
3167 };
3168 static struct tracer *old_tracer;
3169 ssize_t ret;
3170 size_t rem;
3171 unsigned int i;
3172
3173 /* copy the tracer to avoid using a global lock all around */
3174 mutex_lock(&trace_types_lock);
3175 if (unlikely(old_tracer != current_trace && current_trace)) {
3176 old_tracer = current_trace;
3177 *iter->trace = *current_trace;
3178 }
3179 mutex_unlock(&trace_types_lock);
3180
3181 mutex_lock(&iter->mutex);
3182
3183 if (iter->trace->splice_read) {
3184 ret = iter->trace->splice_read(iter, filp,
3185 ppos, pipe, len, flags);
3186 if (ret)
3187 goto out_err;
3188 }
3189
3190 ret = tracing_wait_pipe(filp);
3191 if (ret <= 0)
3192 goto out_err;
3193
3194 if (!iter->ent && !find_next_entry_inc(iter)) {
3195 ret = -EFAULT;
3196 goto out_err;
3197 }
3198
3199 trace_event_read_lock();
3200
3201 /* Fill as many pages as possible. */
3202 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
3203 pages[i] = alloc_page(GFP_KERNEL);
3204 if (!pages[i])
3205 break;
3206
3207 rem = tracing_fill_pipe_page(rem, iter);
3208
3209 /* Copy the data into the page, so we can start over. */
3210 ret = trace_seq_to_buffer(&iter->seq,
3211 page_address(pages[i]),
3212 iter->seq.len);
3213 if (ret < 0) {
3214 __free_page(pages[i]);
3215 break;
3216 }
3217 partial[i].offset = 0;
3218 partial[i].len = iter->seq.len;
3219
3220 trace_seq_init(&iter->seq);
3221 }
3222
3223 trace_event_read_unlock();
3224 mutex_unlock(&iter->mutex);
3225
3226 spd.nr_pages = i;
3227
3228 return splice_to_pipe(pipe, &spd);
3229
3230out_err:
3231 mutex_unlock(&iter->mutex);
3232
3233 return ret;
3234}
3235
3295static ssize_t 3236static ssize_t
3296tracing_entries_read(struct file *filp, char __user *ubuf, 3237tracing_entries_read(struct file *filp, char __user *ubuf,
3297 size_t cnt, loff_t *ppos) 3238 size_t cnt, loff_t *ppos)
3298{ 3239{
3299 struct trace_array *tr = filp->private_data; 3240 struct trace_array *tr = filp->private_data;
3300 char buf[64]; 3241 char buf[96];
3301 int r; 3242 int r;
3302 3243
3303 r = sprintf(buf, "%lu\n", tr->entries >> 10); 3244 mutex_lock(&trace_types_lock);
3245 if (!ring_buffer_expanded)
3246 r = sprintf(buf, "%lu (expanded: %lu)\n",
3247 tr->entries >> 10,
3248 trace_buf_size >> 10);
3249 else
3250 r = sprintf(buf, "%lu\n", tr->entries >> 10);
3251 mutex_unlock(&trace_types_lock);
3252
3304 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3253 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3305} 3254}
3306 3255
@@ -3344,31 +3293,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3344 val <<= 10; 3293 val <<= 10;
3345 3294
3346 if (val != global_trace.entries) { 3295 if (val != global_trace.entries) {
3347 ret = ring_buffer_resize(global_trace.buffer, val); 3296 ret = tracing_resize_ring_buffer(val);
3348 if (ret < 0) { 3297 if (ret < 0) {
3349 cnt = ret; 3298 cnt = ret;
3350 goto out; 3299 goto out;
3351 } 3300 }
3352
3353 ret = ring_buffer_resize(max_tr.buffer, val);
3354 if (ret < 0) {
3355 int r;
3356 cnt = ret;
3357 r = ring_buffer_resize(global_trace.buffer,
3358 global_trace.entries);
3359 if (r < 0) {
3360 /* AARGH! We are left with different
3361 * size max buffer!!!! */
3362 WARN_ON(1);
3363 tracing_disabled = 1;
3364 }
3365 goto out;
3366 }
3367
3368 global_trace.entries = val;
3369 } 3301 }
3370 3302
3371 filp->f_pos += cnt; 3303 *ppos += cnt;
3372 3304
3373 /* If check pages failed, return ENOMEM */ 3305 /* If check pages failed, return ENOMEM */
3374 if (tracing_disabled) 3306 if (tracing_disabled)
@@ -3388,22 +3320,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3388 return cnt; 3320 return cnt;
3389} 3321}
3390 3322
3391static int mark_printk(const char *fmt, ...)
3392{
3393 int ret;
3394 va_list args;
3395 va_start(args, fmt);
3396 ret = trace_vprintk(0, -1, fmt, args);
3397 va_end(args);
3398 return ret;
3399}
3400
3401static ssize_t 3323static ssize_t
3402tracing_mark_write(struct file *filp, const char __user *ubuf, 3324tracing_mark_write(struct file *filp, const char __user *ubuf,
3403 size_t cnt, loff_t *fpos) 3325 size_t cnt, loff_t *fpos)
3404{ 3326{
3405 char *buf; 3327 char *buf;
3406 char *end;
3407 3328
3408 if (tracing_disabled) 3329 if (tracing_disabled)
3409 return -EINVAL; 3330 return -EINVAL;
@@ -3411,7 +3332,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3411 if (cnt > TRACE_BUF_SIZE) 3332 if (cnt > TRACE_BUF_SIZE)
3412 cnt = TRACE_BUF_SIZE; 3333 cnt = TRACE_BUF_SIZE;
3413 3334
3414 buf = kmalloc(cnt + 1, GFP_KERNEL); 3335 buf = kmalloc(cnt + 2, GFP_KERNEL);
3415 if (buf == NULL) 3336 if (buf == NULL)
3416 return -ENOMEM; 3337 return -ENOMEM;
3417 3338
@@ -3419,56 +3340,410 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3419 kfree(buf); 3340 kfree(buf);
3420 return -EFAULT; 3341 return -EFAULT;
3421 } 3342 }
3343 if (buf[cnt-1] != '\n') {
3344 buf[cnt] = '\n';
3345 buf[cnt+1] = '\0';
3346 } else
3347 buf[cnt] = '\0';
3422 3348
3423 /* Cut from the first nil or newline. */ 3349 cnt = trace_vprintk(0, buf, NULL);
3424 buf[cnt] = '\0';
3425 end = strchr(buf, '\n');
3426 if (end)
3427 *end = '\0';
3428
3429 cnt = mark_printk("%s\n", buf);
3430 kfree(buf); 3350 kfree(buf);
3431 *fpos += cnt; 3351 *fpos += cnt;
3432 3352
3433 return cnt; 3353 return cnt;
3434} 3354}
3435 3355
3436static struct file_operations tracing_max_lat_fops = { 3356static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
3357 size_t cnt, loff_t *ppos)
3358{
3359 char buf[64];
3360 int bufiter = 0;
3361 int i;
3362
3363 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3364 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
3365 "%s%s%s%s", i ? " " : "",
3366 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3367 i == trace_clock_id ? "]" : "");
3368 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
3369
3370 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
3371}
3372
3373static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3374 size_t cnt, loff_t *fpos)
3375{
3376 char buf[64];
3377 const char *clockstr;
3378 int i;
3379
3380 if (cnt >= sizeof(buf))
3381 return -EINVAL;
3382
3383 if (copy_from_user(&buf, ubuf, cnt))
3384 return -EFAULT;
3385
3386 buf[cnt] = 0;
3387
3388 clockstr = strstrip(buf);
3389
3390 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3391 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3392 break;
3393 }
3394 if (i == ARRAY_SIZE(trace_clocks))
3395 return -EINVAL;
3396
3397 trace_clock_id = i;
3398
3399 mutex_lock(&trace_types_lock);
3400
3401 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3402 if (max_tr.buffer)
3403 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3404
3405 mutex_unlock(&trace_types_lock);
3406
3407 *fpos += cnt;
3408
3409 return cnt;
3410}
3411
3412static const struct file_operations tracing_max_lat_fops = {
3437 .open = tracing_open_generic, 3413 .open = tracing_open_generic,
3438 .read = tracing_max_lat_read, 3414 .read = tracing_max_lat_read,
3439 .write = tracing_max_lat_write, 3415 .write = tracing_max_lat_write,
3440}; 3416};
3441 3417
3442static struct file_operations tracing_ctrl_fops = { 3418static const struct file_operations tracing_ctrl_fops = {
3443 .open = tracing_open_generic, 3419 .open = tracing_open_generic,
3444 .read = tracing_ctrl_read, 3420 .read = tracing_ctrl_read,
3445 .write = tracing_ctrl_write, 3421 .write = tracing_ctrl_write,
3446}; 3422};
3447 3423
3448static struct file_operations set_tracer_fops = { 3424static const struct file_operations set_tracer_fops = {
3449 .open = tracing_open_generic, 3425 .open = tracing_open_generic,
3450 .read = tracing_set_trace_read, 3426 .read = tracing_set_trace_read,
3451 .write = tracing_set_trace_write, 3427 .write = tracing_set_trace_write,
3452}; 3428};
3453 3429
3454static struct file_operations tracing_pipe_fops = { 3430static const struct file_operations tracing_pipe_fops = {
3455 .open = tracing_open_pipe, 3431 .open = tracing_open_pipe,
3456 .poll = tracing_poll_pipe, 3432 .poll = tracing_poll_pipe,
3457 .read = tracing_read_pipe, 3433 .read = tracing_read_pipe,
3434 .splice_read = tracing_splice_read_pipe,
3458 .release = tracing_release_pipe, 3435 .release = tracing_release_pipe,
3459}; 3436};
3460 3437
3461static struct file_operations tracing_entries_fops = { 3438static const struct file_operations tracing_entries_fops = {
3462 .open = tracing_open_generic, 3439 .open = tracing_open_generic,
3463 .read = tracing_entries_read, 3440 .read = tracing_entries_read,
3464 .write = tracing_entries_write, 3441 .write = tracing_entries_write,
3465}; 3442};
3466 3443
3467static struct file_operations tracing_mark_fops = { 3444static const struct file_operations tracing_mark_fops = {
3468 .open = tracing_open_generic, 3445 .open = tracing_open_generic,
3469 .write = tracing_mark_write, 3446 .write = tracing_mark_write,
3470}; 3447};
3471 3448
3449static const struct file_operations trace_clock_fops = {
3450 .open = tracing_open_generic,
3451 .read = tracing_clock_read,
3452 .write = tracing_clock_write,
3453};
3454
3455struct ftrace_buffer_info {
3456 struct trace_array *tr;
3457 void *spare;
3458 int cpu;
3459 unsigned int read;
3460};
3461
3462static int tracing_buffers_open(struct inode *inode, struct file *filp)
3463{
3464 int cpu = (int)(long)inode->i_private;
3465 struct ftrace_buffer_info *info;
3466
3467 if (tracing_disabled)
3468 return -ENODEV;
3469
3470 info = kzalloc(sizeof(*info), GFP_KERNEL);
3471 if (!info)
3472 return -ENOMEM;
3473
3474 info->tr = &global_trace;
3475 info->cpu = cpu;
3476 info->spare = NULL;
3477 /* Force reading ring buffer for first read */
3478 info->read = (unsigned int)-1;
3479
3480 filp->private_data = info;
3481
3482 return nonseekable_open(inode, filp);
3483}
3484
3485static ssize_t
3486tracing_buffers_read(struct file *filp, char __user *ubuf,
3487 size_t count, loff_t *ppos)
3488{
3489 struct ftrace_buffer_info *info = filp->private_data;
3490 unsigned int pos;
3491 ssize_t ret;
3492 size_t size;
3493
3494 if (!count)
3495 return 0;
3496
3497 if (!info->spare)
3498 info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
3499 if (!info->spare)
3500 return -ENOMEM;
3501
3502 /* Do we have previous read data to read? */
3503 if (info->read < PAGE_SIZE)
3504 goto read;
3505
3506 info->read = 0;
3507
3508 ret = ring_buffer_read_page(info->tr->buffer,
3509 &info->spare,
3510 count,
3511 info->cpu, 0);
3512 if (ret < 0)
3513 return 0;
3514
3515 pos = ring_buffer_page_len(info->spare);
3516
3517 if (pos < PAGE_SIZE)
3518 memset(info->spare + pos, 0, PAGE_SIZE - pos);
3519
3520read:
3521 size = PAGE_SIZE - info->read;
3522 if (size > count)
3523 size = count;
3524
3525 ret = copy_to_user(ubuf, info->spare + info->read, size);
3526 if (ret == size)
3527 return -EFAULT;
3528 size -= ret;
3529
3530 *ppos += size;
3531 info->read += size;
3532
3533 return size;
3534}
3535
3536static int tracing_buffers_release(struct inode *inode, struct file *file)
3537{
3538 struct ftrace_buffer_info *info = file->private_data;
3539
3540 if (info->spare)
3541 ring_buffer_free_read_page(info->tr->buffer, info->spare);
3542 kfree(info);
3543
3544 return 0;
3545}
3546
3547struct buffer_ref {
3548 struct ring_buffer *buffer;
3549 void *page;
3550 int ref;
3551};
3552
3553static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
3554 struct pipe_buffer *buf)
3555{
3556 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3557
3558 if (--ref->ref)
3559 return;
3560
3561 ring_buffer_free_read_page(ref->buffer, ref->page);
3562 kfree(ref);
3563 buf->private = 0;
3564}
3565
3566static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
3567 struct pipe_buffer *buf)
3568{
3569 return 1;
3570}
3571
3572static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3573 struct pipe_buffer *buf)
3574{
3575 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3576
3577 ref->ref++;
3578}
3579
3580/* Pipe buffer operations for a buffer. */
3581static struct pipe_buf_operations buffer_pipe_buf_ops = {
3582 .can_merge = 0,
3583 .map = generic_pipe_buf_map,
3584 .unmap = generic_pipe_buf_unmap,
3585 .confirm = generic_pipe_buf_confirm,
3586 .release = buffer_pipe_buf_release,
3587 .steal = buffer_pipe_buf_steal,
3588 .get = buffer_pipe_buf_get,
3589};
3590
3591/*
3592 * Callback from splice_to_pipe(), if we need to release some pages
3593 * at the end of the spd in case we error'ed out in filling the pipe.
3594 */
3595static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
3596{
3597 struct buffer_ref *ref =
3598 (struct buffer_ref *)spd->partial[i].private;
3599
3600 if (--ref->ref)
3601 return;
3602
3603 ring_buffer_free_read_page(ref->buffer, ref->page);
3604 kfree(ref);
3605 spd->partial[i].private = 0;
3606}
3607
3608static ssize_t
3609tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3610 struct pipe_inode_info *pipe, size_t len,
3611 unsigned int flags)
3612{
3613 struct ftrace_buffer_info *info = file->private_data;
3614 struct partial_page partial[PIPE_BUFFERS];
3615 struct page *pages[PIPE_BUFFERS];
3616 struct splice_pipe_desc spd = {
3617 .pages = pages,
3618 .partial = partial,
3619 .flags = flags,
3620 .ops = &buffer_pipe_buf_ops,
3621 .spd_release = buffer_spd_release,
3622 };
3623 struct buffer_ref *ref;
3624 int entries, size, i;
3625 size_t ret;
3626
3627 if (*ppos & (PAGE_SIZE - 1)) {
3628 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
3629 return -EINVAL;
3630 }
3631
3632 if (len & (PAGE_SIZE - 1)) {
3633 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
3634 if (len < PAGE_SIZE)
3635 return -EINVAL;
3636 len &= PAGE_MASK;
3637 }
3638
3639 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3640
3641 for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
3642 struct page *page;
3643 int r;
3644
3645 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
3646 if (!ref)
3647 break;
3648
3649 ref->ref = 1;
3650 ref->buffer = info->tr->buffer;
3651 ref->page = ring_buffer_alloc_read_page(ref->buffer);
3652 if (!ref->page) {
3653 kfree(ref);
3654 break;
3655 }
3656
3657 r = ring_buffer_read_page(ref->buffer, &ref->page,
3658 len, info->cpu, 1);
3659 if (r < 0) {
3660 ring_buffer_free_read_page(ref->buffer,
3661 ref->page);
3662 kfree(ref);
3663 break;
3664 }
3665
3666 /*
3667 * zero out any left over data, this is going to
3668 * user land.
3669 */
3670 size = ring_buffer_page_len(ref->page);
3671 if (size < PAGE_SIZE)
3672 memset(ref->page + size, 0, PAGE_SIZE - size);
3673
3674 page = virt_to_page(ref->page);
3675
3676 spd.pages[i] = page;
3677 spd.partial[i].len = PAGE_SIZE;
3678 spd.partial[i].offset = 0;
3679 spd.partial[i].private = (unsigned long)ref;
3680 spd.nr_pages++;
3681 *ppos += PAGE_SIZE;
3682
3683 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3684 }
3685
3686 spd.nr_pages = i;
3687
3688 /* did we read anything? */
3689 if (!spd.nr_pages) {
3690 if (flags & SPLICE_F_NONBLOCK)
3691 ret = -EAGAIN;
3692 else
3693 ret = 0;
3694 /* TODO: block */
3695 return ret;
3696 }
3697
3698 ret = splice_to_pipe(pipe, &spd);
3699
3700 return ret;
3701}
3702
3703static const struct file_operations tracing_buffers_fops = {
3704 .open = tracing_buffers_open,
3705 .read = tracing_buffers_read,
3706 .release = tracing_buffers_release,
3707 .splice_read = tracing_buffers_splice_read,
3708 .llseek = no_llseek,
3709};
3710
3711static ssize_t
3712tracing_stats_read(struct file *filp, char __user *ubuf,
3713 size_t count, loff_t *ppos)
3714{
3715 unsigned long cpu = (unsigned long)filp->private_data;
3716 struct trace_array *tr = &global_trace;
3717 struct trace_seq *s;
3718 unsigned long cnt;
3719
3720 s = kmalloc(sizeof(*s), GFP_KERNEL);
3721 if (!s)
3722 return -ENOMEM;
3723
3724 trace_seq_init(s);
3725
3726 cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
3727 trace_seq_printf(s, "entries: %ld\n", cnt);
3728
3729 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
3730 trace_seq_printf(s, "overrun: %ld\n", cnt);
3731
3732 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
3733 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
3734
3735 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
3736
3737 kfree(s);
3738
3739 return count;
3740}
3741
3742static const struct file_operations tracing_stats_fops = {
3743 .open = tracing_open_generic,
3744 .read = tracing_stats_read,
3745};
3746
3472#ifdef CONFIG_DYNAMIC_FTRACE 3747#ifdef CONFIG_DYNAMIC_FTRACE
3473 3748
3474int __weak ftrace_arch_read_dyn_info(char *buf, int size) 3749int __weak ftrace_arch_read_dyn_info(char *buf, int size)
@@ -3500,7 +3775,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf,
3500 return r; 3775 return r;
3501} 3776}
3502 3777
3503static struct file_operations tracing_dyn_info_fops = { 3778static const struct file_operations tracing_dyn_info_fops = {
3504 .open = tracing_open_generic, 3779 .open = tracing_open_generic,
3505 .read = tracing_read_dyn_info, 3780 .read = tracing_read_dyn_info,
3506}; 3781};
@@ -3515,6 +3790,9 @@ struct dentry *tracing_init_dentry(void)
3515 if (d_tracer) 3790 if (d_tracer)
3516 return d_tracer; 3791 return d_tracer;
3517 3792
3793 if (!debugfs_initialized())
3794 return NULL;
3795
3518 d_tracer = debugfs_create_dir("tracing", NULL); 3796 d_tracer = debugfs_create_dir("tracing", NULL);
3519 3797
3520 if (!d_tracer && !once) { 3798 if (!d_tracer && !once) {
@@ -3526,170 +3804,405 @@ struct dentry *tracing_init_dentry(void)
3526 return d_tracer; 3804 return d_tracer;
3527} 3805}
3528 3806
3807static struct dentry *d_percpu;
3808
3809struct dentry *tracing_dentry_percpu(void)
3810{
3811 static int once;
3812 struct dentry *d_tracer;
3813
3814 if (d_percpu)
3815 return d_percpu;
3816
3817 d_tracer = tracing_init_dentry();
3818
3819 if (!d_tracer)
3820 return NULL;
3821
3822 d_percpu = debugfs_create_dir("per_cpu", d_tracer);
3823
3824 if (!d_percpu && !once) {
3825 once = 1;
3826 pr_warning("Could not create debugfs directory 'per_cpu'\n");
3827 return NULL;
3828 }
3829
3830 return d_percpu;
3831}
3832
3833static void tracing_init_debugfs_percpu(long cpu)
3834{
3835 struct dentry *d_percpu = tracing_dentry_percpu();
3836 struct dentry *d_cpu;
3837 /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
3838 char cpu_dir[7];
3839
3840 if (cpu > 999 || cpu < 0)
3841 return;
3842
3843 sprintf(cpu_dir, "cpu%ld", cpu);
3844 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
3845 if (!d_cpu) {
3846 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
3847 return;
3848 }
3849
3850 /* per cpu trace_pipe */
3851 trace_create_file("trace_pipe", 0444, d_cpu,
3852 (void *) cpu, &tracing_pipe_fops);
3853
3854 /* per cpu trace */
3855 trace_create_file("trace", 0644, d_cpu,
3856 (void *) cpu, &tracing_fops);
3857
3858 trace_create_file("trace_pipe_raw", 0444, d_cpu,
3859 (void *) cpu, &tracing_buffers_fops);
3860
3861 trace_create_file("stats", 0444, d_cpu,
3862 (void *) cpu, &tracing_stats_fops);
3863}
3864
3529#ifdef CONFIG_FTRACE_SELFTEST 3865#ifdef CONFIG_FTRACE_SELFTEST
3530/* Let selftest have access to static functions in this file */ 3866/* Let selftest have access to static functions in this file */
3531#include "trace_selftest.c" 3867#include "trace_selftest.c"
3532#endif 3868#endif
3533 3869
3534static __init int tracer_init_debugfs(void) 3870struct trace_option_dentry {
3871 struct tracer_opt *opt;
3872 struct tracer_flags *flags;
3873 struct dentry *entry;
3874};
3875
3876static ssize_t
3877trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
3878 loff_t *ppos)
3879{
3880 struct trace_option_dentry *topt = filp->private_data;
3881 char *buf;
3882
3883 if (topt->flags->val & topt->opt->bit)
3884 buf = "1\n";
3885 else
3886 buf = "0\n";
3887
3888 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
3889}
3890
3891static ssize_t
3892trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
3893 loff_t *ppos)
3894{
3895 struct trace_option_dentry *topt = filp->private_data;
3896 unsigned long val;
3897 char buf[64];
3898 int ret;
3899
3900 if (cnt >= sizeof(buf))
3901 return -EINVAL;
3902
3903 if (copy_from_user(&buf, ubuf, cnt))
3904 return -EFAULT;
3905
3906 buf[cnt] = 0;
3907
3908 ret = strict_strtoul(buf, 10, &val);
3909 if (ret < 0)
3910 return ret;
3911
3912 ret = 0;
3913 switch (val) {
3914 case 0:
3915 /* do nothing if already cleared */
3916 if (!(topt->flags->val & topt->opt->bit))
3917 break;
3918
3919 mutex_lock(&trace_types_lock);
3920 if (current_trace->set_flag)
3921 ret = current_trace->set_flag(topt->flags->val,
3922 topt->opt->bit, 0);
3923 mutex_unlock(&trace_types_lock);
3924 if (ret)
3925 return ret;
3926 topt->flags->val &= ~topt->opt->bit;
3927 break;
3928 case 1:
3929 /* do nothing if already set */
3930 if (topt->flags->val & topt->opt->bit)
3931 break;
3932
3933 mutex_lock(&trace_types_lock);
3934 if (current_trace->set_flag)
3935 ret = current_trace->set_flag(topt->flags->val,
3936 topt->opt->bit, 1);
3937 mutex_unlock(&trace_types_lock);
3938 if (ret)
3939 return ret;
3940 topt->flags->val |= topt->opt->bit;
3941 break;
3942
3943 default:
3944 return -EINVAL;
3945 }
3946
3947 *ppos += cnt;
3948
3949 return cnt;
3950}
3951
3952
3953static const struct file_operations trace_options_fops = {
3954 .open = tracing_open_generic,
3955 .read = trace_options_read,
3956 .write = trace_options_write,
3957};
3958
3959static ssize_t
3960trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
3961 loff_t *ppos)
3962{
3963 long index = (long)filp->private_data;
3964 char *buf;
3965
3966 if (trace_flags & (1 << index))
3967 buf = "1\n";
3968 else
3969 buf = "0\n";
3970
3971 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
3972}
3973
3974static ssize_t
3975trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3976 loff_t *ppos)
3977{
3978 long index = (long)filp->private_data;
3979 char buf[64];
3980 unsigned long val;
3981 int ret;
3982
3983 if (cnt >= sizeof(buf))
3984 return -EINVAL;
3985
3986 if (copy_from_user(&buf, ubuf, cnt))
3987 return -EFAULT;
3988
3989 buf[cnt] = 0;
3990
3991 ret = strict_strtoul(buf, 10, &val);
3992 if (ret < 0)
3993 return ret;
3994
3995 if (val != 0 && val != 1)
3996 return -EINVAL;
3997 set_tracer_flags(1 << index, val);
3998
3999 *ppos += cnt;
4000
4001 return cnt;
4002}
4003
4004static const struct file_operations trace_options_core_fops = {
4005 .open = tracing_open_generic,
4006 .read = trace_options_core_read,
4007 .write = trace_options_core_write,
4008};
4009
4010struct dentry *trace_create_file(const char *name,
4011 mode_t mode,
4012 struct dentry *parent,
4013 void *data,
4014 const struct file_operations *fops)
4015{
4016 struct dentry *ret;
4017
4018 ret = debugfs_create_file(name, mode, parent, data, fops);
4019 if (!ret)
4020 pr_warning("Could not create debugfs '%s' entry\n", name);
4021
4022 return ret;
4023}
4024
4025
4026static struct dentry *trace_options_init_dentry(void)
3535{ 4027{
3536 struct dentry *d_tracer; 4028 struct dentry *d_tracer;
3537 struct dentry *entry; 4029 static struct dentry *t_options;
4030
4031 if (t_options)
4032 return t_options;
3538 4033
3539 d_tracer = tracing_init_dentry(); 4034 d_tracer = tracing_init_dentry();
4035 if (!d_tracer)
4036 return NULL;
3540 4037
3541 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, 4038 t_options = debugfs_create_dir("options", d_tracer);
3542 &global_trace, &tracing_ctrl_fops); 4039 if (!t_options) {
3543 if (!entry) 4040 pr_warning("Could not create debugfs directory 'options'\n");
3544 pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); 4041 return NULL;
3545 4042 }
3546 entry = debugfs_create_file("trace_options", 0644, d_tracer,
3547 NULL, &tracing_iter_fops);
3548 if (!entry)
3549 pr_warning("Could not create debugfs 'trace_options' entry\n");
3550
3551 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
3552 NULL, &tracing_cpumask_fops);
3553 if (!entry)
3554 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
3555
3556 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
3557 &global_trace, &tracing_lt_fops);
3558 if (!entry)
3559 pr_warning("Could not create debugfs 'latency_trace' entry\n");
3560
3561 entry = debugfs_create_file("trace", 0444, d_tracer,
3562 &global_trace, &tracing_fops);
3563 if (!entry)
3564 pr_warning("Could not create debugfs 'trace' entry\n");
3565
3566 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
3567 &global_trace, &show_traces_fops);
3568 if (!entry)
3569 pr_warning("Could not create debugfs 'available_tracers' entry\n");
3570
3571 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
3572 &global_trace, &set_tracer_fops);
3573 if (!entry)
3574 pr_warning("Could not create debugfs 'current_tracer' entry\n");
3575
3576 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
3577 &tracing_max_latency,
3578 &tracing_max_lat_fops);
3579 if (!entry)
3580 pr_warning("Could not create debugfs "
3581 "'tracing_max_latency' entry\n");
3582
3583 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
3584 &tracing_thresh, &tracing_max_lat_fops);
3585 if (!entry)
3586 pr_warning("Could not create debugfs "
3587 "'tracing_thresh' entry\n");
3588 entry = debugfs_create_file("README", 0644, d_tracer,
3589 NULL, &tracing_readme_fops);
3590 if (!entry)
3591 pr_warning("Could not create debugfs 'README' entry\n");
3592
3593 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
3594 NULL, &tracing_pipe_fops);
3595 if (!entry)
3596 pr_warning("Could not create debugfs "
3597 "'trace_pipe' entry\n");
3598
3599 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
3600 &global_trace, &tracing_entries_fops);
3601 if (!entry)
3602 pr_warning("Could not create debugfs "
3603 "'buffer_size_kb' entry\n");
3604
3605 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
3606 NULL, &tracing_mark_fops);
3607 if (!entry)
3608 pr_warning("Could not create debugfs "
3609 "'trace_marker' entry\n");
3610 4043
3611#ifdef CONFIG_DYNAMIC_FTRACE 4044 return t_options;
3612 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
3613 &ftrace_update_tot_cnt,
3614 &tracing_dyn_info_fops);
3615 if (!entry)
3616 pr_warning("Could not create debugfs "
3617 "'dyn_ftrace_total_info' entry\n");
3618#endif
3619#ifdef CONFIG_SYSPROF_TRACER
3620 init_tracer_sysprof_debugfs(d_tracer);
3621#endif
3622 return 0;
3623} 4045}
3624 4046
3625int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) 4047static void
4048create_trace_option_file(struct trace_option_dentry *topt,
4049 struct tracer_flags *flags,
4050 struct tracer_opt *opt)
3626{ 4051{
3627 static DEFINE_SPINLOCK(trace_buf_lock); 4052 struct dentry *t_options;
3628 static char trace_buf[TRACE_BUF_SIZE];
3629 4053
3630 struct ring_buffer_event *event; 4054 t_options = trace_options_init_dentry();
3631 struct trace_array *tr = &global_trace; 4055 if (!t_options)
3632 struct trace_array_cpu *data; 4056 return;
3633 int cpu, len = 0, size, pc;
3634 struct print_entry *entry;
3635 unsigned long irq_flags;
3636 4057
3637 if (tracing_disabled || tracing_selftest_running) 4058 topt->flags = flags;
3638 return 0; 4059 topt->opt = opt;
3639 4060
3640 pc = preempt_count(); 4061 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
3641 preempt_disable_notrace(); 4062 &trace_options_fops);
3642 cpu = raw_smp_processor_id();
3643 data = tr->data[cpu];
3644 4063
3645 if (unlikely(atomic_read(&data->disabled))) 4064}
3646 goto out;
3647 4065
3648 pause_graph_tracing(); 4066static struct trace_option_dentry *
3649 spin_lock_irqsave(&trace_buf_lock, irq_flags); 4067create_trace_option_files(struct tracer *tracer)
3650 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 4068{
4069 struct trace_option_dentry *topts;
4070 struct tracer_flags *flags;
4071 struct tracer_opt *opts;
4072 int cnt;
3651 4073
3652 len = min(len, TRACE_BUF_SIZE-1); 4074 if (!tracer)
3653 trace_buf[len] = 0; 4075 return NULL;
3654 4076
3655 size = sizeof(*entry) + len + 1; 4077 flags = tracer->flags;
3656 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
3657 if (!event)
3658 goto out_unlock;
3659 entry = ring_buffer_event_data(event);
3660 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3661 entry->ent.type = TRACE_PRINT;
3662 entry->ip = ip;
3663 entry->depth = depth;
3664 4078
3665 memcpy(&entry->buf, trace_buf, len); 4079 if (!flags || !flags->opts)
3666 entry->buf[len] = 0; 4080 return NULL;
3667 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3668 4081
3669 out_unlock: 4082 opts = flags->opts;
3670 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3671 unpause_graph_tracing();
3672 out:
3673 preempt_enable_notrace();
3674 4083
3675 return len; 4084 for (cnt = 0; opts[cnt].name; cnt++)
4085 ;
4086
4087 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4088 if (!topts)
4089 return NULL;
4090
4091 for (cnt = 0; opts[cnt].name; cnt++)
4092 create_trace_option_file(&topts[cnt], flags,
4093 &opts[cnt]);
4094
4095 return topts;
3676} 4096}
3677EXPORT_SYMBOL_GPL(trace_vprintk);
3678 4097
3679int __ftrace_printk(unsigned long ip, const char *fmt, ...) 4098static void
4099destroy_trace_option_files(struct trace_option_dentry *topts)
3680{ 4100{
3681 int ret; 4101 int cnt;
3682 va_list ap;
3683 4102
3684 if (!(trace_flags & TRACE_ITER_PRINTK)) 4103 if (!topts)
3685 return 0; 4104 return;
3686 4105
3687 va_start(ap, fmt); 4106 for (cnt = 0; topts[cnt].opt; cnt++) {
3688 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); 4107 if (topts[cnt].entry)
3689 va_end(ap); 4108 debugfs_remove(topts[cnt].entry);
3690 return ret; 4109 }
4110
4111 kfree(topts);
4112}
4113
4114static struct dentry *
4115create_trace_option_core_file(const char *option, long index)
4116{
4117 struct dentry *t_options;
4118
4119 t_options = trace_options_init_dentry();
4120 if (!t_options)
4121 return NULL;
4122
4123 return trace_create_file(option, 0644, t_options, (void *)index,
4124 &trace_options_core_fops);
4125}
4126
4127static __init void create_trace_options_dir(void)
4128{
4129 struct dentry *t_options;
4130 int i;
4131
4132 t_options = trace_options_init_dentry();
4133 if (!t_options)
4134 return;
4135
4136 for (i = 0; trace_options[i]; i++)
4137 create_trace_option_core_file(trace_options[i], i);
4138}
4139
4140static __init int tracer_init_debugfs(void)
4141{
4142 struct dentry *d_tracer;
4143 int cpu;
4144
4145 d_tracer = tracing_init_dentry();
4146
4147 trace_create_file("tracing_enabled", 0644, d_tracer,
4148 &global_trace, &tracing_ctrl_fops);
4149
4150 trace_create_file("trace_options", 0644, d_tracer,
4151 NULL, &tracing_iter_fops);
4152
4153 trace_create_file("tracing_cpumask", 0644, d_tracer,
4154 NULL, &tracing_cpumask_fops);
4155
4156 trace_create_file("trace", 0644, d_tracer,
4157 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4158
4159 trace_create_file("available_tracers", 0444, d_tracer,
4160 &global_trace, &show_traces_fops);
4161
4162 trace_create_file("current_tracer", 0644, d_tracer,
4163 &global_trace, &set_tracer_fops);
4164
4165#ifdef CONFIG_TRACER_MAX_TRACE
4166 trace_create_file("tracing_max_latency", 0644, d_tracer,
4167 &tracing_max_latency, &tracing_max_lat_fops);
4168
4169 trace_create_file("tracing_thresh", 0644, d_tracer,
4170 &tracing_thresh, &tracing_max_lat_fops);
4171#endif
4172
4173 trace_create_file("README", 0444, d_tracer,
4174 NULL, &tracing_readme_fops);
4175
4176 trace_create_file("trace_pipe", 0444, d_tracer,
4177 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4178
4179 trace_create_file("buffer_size_kb", 0644, d_tracer,
4180 &global_trace, &tracing_entries_fops);
4181
4182 trace_create_file("trace_marker", 0220, d_tracer,
4183 NULL, &tracing_mark_fops);
4184
4185 trace_create_file("saved_cmdlines", 0444, d_tracer,
4186 NULL, &tracing_saved_cmdlines_fops);
4187
4188 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4189 &trace_clock_fops);
4190
4191#ifdef CONFIG_DYNAMIC_FTRACE
4192 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4193 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4194#endif
4195#ifdef CONFIG_SYSPROF_TRACER
4196 init_tracer_sysprof_debugfs(d_tracer);
4197#endif
4198
4199 create_trace_options_dir();
4200
4201 for_each_tracing_cpu(cpu)
4202 tracing_init_debugfs_percpu(cpu);
4203
4204 return 0;
3691} 4205}
3692EXPORT_SYMBOL_GPL(__ftrace_printk);
3693 4206
3694static int trace_panic_handler(struct notifier_block *this, 4207static int trace_panic_handler(struct notifier_block *this,
3695 unsigned long event, void *unused) 4208 unsigned long event, void *unused)
@@ -3750,40 +4263,48 @@ trace_printk_seq(struct trace_seq *s)
3750 4263
3751 printk(KERN_TRACE "%s", s->buffer); 4264 printk(KERN_TRACE "%s", s->buffer);
3752 4265
3753 trace_seq_reset(s); 4266 trace_seq_init(s);
3754} 4267}
3755 4268
3756void ftrace_dump(void) 4269static void __ftrace_dump(bool disable_tracing)
3757{ 4270{
3758 static DEFINE_SPINLOCK(ftrace_dump_lock); 4271 static raw_spinlock_t ftrace_dump_lock =
4272 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3759 /* use static because iter can be a bit big for the stack */ 4273 /* use static because iter can be a bit big for the stack */
3760 static struct trace_iterator iter; 4274 static struct trace_iterator iter;
4275 unsigned int old_userobj;
3761 static int dump_ran; 4276 static int dump_ran;
3762 unsigned long flags; 4277 unsigned long flags;
3763 int cnt = 0, cpu; 4278 int cnt = 0, cpu;
3764 4279
3765 /* only one dump */ 4280 /* only one dump */
3766 spin_lock_irqsave(&ftrace_dump_lock, flags); 4281 local_irq_save(flags);
4282 __raw_spin_lock(&ftrace_dump_lock);
3767 if (dump_ran) 4283 if (dump_ran)
3768 goto out; 4284 goto out;
3769 4285
3770 dump_ran = 1; 4286 dump_ran = 1;
3771 4287
3772 /* No turning back! */
3773 tracing_off(); 4288 tracing_off();
3774 ftrace_kill(); 4289
4290 if (disable_tracing)
4291 ftrace_kill();
3775 4292
3776 for_each_tracing_cpu(cpu) { 4293 for_each_tracing_cpu(cpu) {
3777 atomic_inc(&global_trace.data[cpu]->disabled); 4294 atomic_inc(&global_trace.data[cpu]->disabled);
3778 } 4295 }
3779 4296
4297 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4298
3780 /* don't look at user memory in panic mode */ 4299 /* don't look at user memory in panic mode */
3781 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4300 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
3782 4301
3783 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 4302 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3784 4303
4304 /* Simulate the iterator */
3785 iter.tr = &global_trace; 4305 iter.tr = &global_trace;
3786 iter.trace = current_trace; 4306 iter.trace = current_trace;
4307 iter.cpu_file = TRACE_PIPE_ALL_CPU;
3787 4308
3788 /* 4309 /*
3789 * We need to stop all tracing on all CPUS to read the 4310 * We need to stop all tracing on all CPUS to read the
@@ -3807,8 +4328,11 @@ void ftrace_dump(void)
3807 iter.pos = -1; 4328 iter.pos = -1;
3808 4329
3809 if (find_next_entry_inc(&iter) != NULL) { 4330 if (find_next_entry_inc(&iter) != NULL) {
3810 print_trace_line(&iter); 4331 int ret;
3811 trace_consume(&iter); 4332
4333 ret = print_trace_line(&iter);
4334 if (ret != TRACE_TYPE_NO_CONSUME)
4335 trace_consume(&iter);
3812 } 4336 }
3813 4337
3814 trace_printk_seq(&iter.seq); 4338 trace_printk_seq(&iter.seq);
@@ -3819,13 +4343,30 @@ void ftrace_dump(void)
3819 else 4343 else
3820 printk(KERN_TRACE "---------------------------------\n"); 4344 printk(KERN_TRACE "---------------------------------\n");
3821 4345
4346 /* Re-enable tracing if requested */
4347 if (!disable_tracing) {
4348 trace_flags |= old_userobj;
4349
4350 for_each_tracing_cpu(cpu) {
4351 atomic_dec(&global_trace.data[cpu]->disabled);
4352 }
4353 tracing_on();
4354 }
4355
3822 out: 4356 out:
3823 spin_unlock_irqrestore(&ftrace_dump_lock, flags); 4357 __raw_spin_unlock(&ftrace_dump_lock);
4358 local_irq_restore(flags);
4359}
4360
4361/* By default: disable tracing after the dump */
4362void ftrace_dump(void)
4363{
4364 __ftrace_dump(true);
3824} 4365}
3825 4366
3826__init static int tracer_alloc_buffers(void) 4367__init static int tracer_alloc_buffers(void)
3827{ 4368{
3828 struct trace_array_cpu *data; 4369 int ring_buf_size;
3829 int i; 4370 int i;
3830 int ret = -ENOMEM; 4371 int ret = -ENOMEM;
3831 4372
@@ -3835,11 +4376,20 @@ __init static int tracer_alloc_buffers(void)
3835 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 4376 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3836 goto out_free_buffer_mask; 4377 goto out_free_buffer_mask;
3837 4378
4379 if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4380 goto out_free_tracing_cpumask;
4381
4382 /* To save memory, keep the ring buffer size to its minimum */
4383 if (ring_buffer_expanded)
4384 ring_buf_size = trace_buf_size;
4385 else
4386 ring_buf_size = 1;
4387
3838 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4388 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3839 cpumask_copy(tracing_cpumask, cpu_all_mask); 4389 cpumask_copy(tracing_cpumask, cpu_all_mask);
3840 4390
3841 /* TODO: make the number of buffers hot pluggable with CPUS */ 4391 /* TODO: make the number of buffers hot pluggable with CPUS */
3842 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 4392 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
3843 TRACE_BUFFER_FLAGS); 4393 TRACE_BUFFER_FLAGS);
3844 if (!global_trace.buffer) { 4394 if (!global_trace.buffer) {
3845 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 4395 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
@@ -3850,7 +4400,7 @@ __init static int tracer_alloc_buffers(void)
3850 4400
3851 4401
3852#ifdef CONFIG_TRACER_MAX_TRACE 4402#ifdef CONFIG_TRACER_MAX_TRACE
3853 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 4403 max_tr.buffer = ring_buffer_alloc(ring_buf_size,
3854 TRACE_BUFFER_FLAGS); 4404 TRACE_BUFFER_FLAGS);
3855 if (!max_tr.buffer) { 4405 if (!max_tr.buffer) {
3856 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4406 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
@@ -3864,21 +4414,17 @@ __init static int tracer_alloc_buffers(void)
3864 4414
3865 /* Allocate the first page for all buffers */ 4415 /* Allocate the first page for all buffers */
3866 for_each_tracing_cpu(i) { 4416 for_each_tracing_cpu(i) {
3867 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4417 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3868 max_tr.data[i] = &per_cpu(max_data, i); 4418 max_tr.data[i] = &per_cpu(max_data, i);
3869 } 4419 }
3870 4420
3871 trace_init_cmdlines(); 4421 trace_init_cmdlines();
3872 4422
3873 register_tracer(&nop_trace); 4423 register_tracer(&nop_trace);
4424 current_trace = &nop_trace;
3874#ifdef CONFIG_BOOT_TRACER 4425#ifdef CONFIG_BOOT_TRACER
3875 register_tracer(&boot_tracer); 4426 register_tracer(&boot_tracer);
3876 current_trace = &boot_tracer;
3877 current_trace->init(&global_trace);
3878#else
3879 current_trace = &nop_trace;
3880#endif 4427#endif
3881
3882 /* All seems OK, enable tracing */ 4428 /* All seems OK, enable tracing */
3883 tracing_disabled = 0; 4429 tracing_disabled = 0;
3884 4430
@@ -3886,14 +4432,38 @@ __init static int tracer_alloc_buffers(void)
3886 &trace_panic_notifier); 4432 &trace_panic_notifier);
3887 4433
3888 register_die_notifier(&trace_die_notifier); 4434 register_die_notifier(&trace_die_notifier);
3889 ret = 0; 4435
4436 return 0;
3890 4437
3891out_free_cpumask: 4438out_free_cpumask:
4439 free_cpumask_var(tracing_reader_cpumask);
4440out_free_tracing_cpumask:
3892 free_cpumask_var(tracing_cpumask); 4441 free_cpumask_var(tracing_cpumask);
3893out_free_buffer_mask: 4442out_free_buffer_mask:
3894 free_cpumask_var(tracing_buffer_mask); 4443 free_cpumask_var(tracing_buffer_mask);
3895out: 4444out:
3896 return ret; 4445 return ret;
3897} 4446}
4447
4448__init static int clear_boot_tracer(void)
4449{
4450 /*
4451 * The default tracer at boot buffer is an init section.
4452 * This function is called in lateinit. If we did not
4453 * find the boot tracer, then clear it out, to prevent
4454 * later registration from accessing the buffer that is
4455 * about to be freed.
4456 */
4457 if (!default_bootup_tracer)
4458 return 0;
4459
4460 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
4461 default_bootup_tracer);
4462 default_bootup_tracer = NULL;
4463
4464 return 0;
4465}
4466
3898early_initcall(tracer_alloc_buffers); 4467early_initcall(tracer_alloc_buffers);
3899fs_initcall(tracer_init_debugfs); 4468fs_initcall(tracer_init_debugfs);
4469late_initcall(clear_boot_tracer);