diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 3034 | 
1 files changed, 1689 insertions, 1345 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 17bb88d86ac2..a0174a40c563 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c  | |||
| @@ -11,32 +11,33 @@ | |||
| 11 | * Copyright (C) 2004-2006 Ingo Molnar | 11 | * Copyright (C) 2004-2006 Ingo Molnar | 
| 12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III | 
| 13 | */ | 13 | */ | 
| 14 | #include <linux/ring_buffer.h> | ||
| 14 | #include <linux/utsrelease.h> | 15 | #include <linux/utsrelease.h> | 
| 16 | #include <linux/stacktrace.h> | ||
| 17 | #include <linux/writeback.h> | ||
| 15 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> | 
| 16 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> | 
| 17 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> | 
| 21 | #include <linux/irqflags.h> | ||
| 18 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> | 
| 19 | #include <linux/pagemap.h> | 23 | #include <linux/pagemap.h> | 
| 20 | #include <linux/hardirq.h> | 24 | #include <linux/hardirq.h> | 
| 21 | #include <linux/linkage.h> | 25 | #include <linux/linkage.h> | 
| 22 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> | 
| 27 | #include <linux/kprobes.h> | ||
| 23 | #include <linux/ftrace.h> | 28 | #include <linux/ftrace.h> | 
| 24 | #include <linux/module.h> | 29 | #include <linux/module.h> | 
| 25 | #include <linux/percpu.h> | 30 | #include <linux/percpu.h> | 
| 31 | #include <linux/splice.h> | ||
| 26 | #include <linux/kdebug.h> | 32 | #include <linux/kdebug.h> | 
| 27 | #include <linux/ctype.h> | 33 | #include <linux/ctype.h> | 
| 28 | #include <linux/init.h> | 34 | #include <linux/init.h> | 
| 29 | #include <linux/poll.h> | 35 | #include <linux/poll.h> | 
| 30 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> | 
| 31 | #include <linux/fs.h> | 37 | #include <linux/fs.h> | 
| 32 | #include <linux/kprobes.h> | ||
| 33 | #include <linux/writeback.h> | ||
| 34 | |||
| 35 | #include <linux/stacktrace.h> | ||
| 36 | #include <linux/ring_buffer.h> | ||
| 37 | #include <linux/irqflags.h> | ||
| 38 | 38 | ||
| 39 | #include "trace.h" | 39 | #include "trace.h" | 
| 40 | #include "trace_output.h" | ||
| 40 | 41 | ||
| 41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 42 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 
| 42 | 43 | ||
| @@ -44,14 +45,25 @@ unsigned long __read_mostly tracing_max_latency; | |||
| 44 | unsigned long __read_mostly tracing_thresh; | 45 | unsigned long __read_mostly tracing_thresh; | 
| 45 | 46 | ||
| 46 | /* | 47 | /* | 
| 48 | * On boot up, the ring buffer is set to the minimum size, so that | ||
| 49 | * we do not waste memory on systems that are not using tracing. | ||
| 50 | */ | ||
| 51 | static int ring_buffer_expanded; | ||
| 52 | |||
| 53 | /* | ||
| 47 | * We need to change this state when a selftest is running. | 54 | * We need to change this state when a selftest is running. | 
| 48 | * A selftest will lurk into the ring-buffer to count the | 55 | * A selftest will lurk into the ring-buffer to count the | 
| 49 | * entries inserted during the selftest although some concurrent | 56 | * entries inserted during the selftest although some concurrent | 
| 50 | * insertions into the ring-buffer such as ftrace_printk could occurred | 57 | * insertions into the ring-buffer such as trace_printk could occurred | 
| 51 | * at the same time, giving false positive or negative results. | 58 | * at the same time, giving false positive or negative results. | 
| 52 | */ | 59 | */ | 
| 53 | static bool __read_mostly tracing_selftest_running; | 60 | static bool __read_mostly tracing_selftest_running; | 
| 54 | 61 | ||
| 62 | /* | ||
| 63 | * If a tracer is running, we do not want to run SELFTEST. | ||
| 64 | */ | ||
| 65 | static bool __read_mostly tracing_selftest_disabled; | ||
| 66 | |||
| 55 | /* For tracers that don't implement custom flags */ | 67 | /* For tracers that don't implement custom flags */ | 
| 56 | static struct tracer_opt dummy_tracer_opt[] = { | 68 | static struct tracer_opt dummy_tracer_opt[] = { | 
| 57 | { } | 69 | { } | 
| @@ -73,7 +85,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
| 73 | * of the tracer is successful. But that is the only place that sets | 85 | * of the tracer is successful. But that is the only place that sets | 
| 74 | * this back to zero. | 86 | * this back to zero. | 
| 75 | */ | 87 | */ | 
| 76 | int tracing_disabled = 1; | 88 | static int tracing_disabled = 1; | 
| 77 | 89 | ||
| 78 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 90 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 
| 79 | 91 | ||
| @@ -91,6 +103,9 @@ static inline void ftrace_enable_cpu(void) | |||
| 91 | 103 | ||
| 92 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 104 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 
| 93 | 105 | ||
| 106 | /* Define which cpu buffers are currently read in trace_pipe */ | ||
| 107 | static cpumask_var_t tracing_reader_cpumask; | ||
| 108 | |||
| 94 | #define for_each_tracing_cpu(cpu) \ | 109 | #define for_each_tracing_cpu(cpu) \ | 
| 95 | for_each_cpu(cpu, tracing_buffer_mask) | 110 | for_each_cpu(cpu, tracing_buffer_mask) | 
| 96 | 111 | ||
| @@ -109,14 +124,21 @@ static cpumask_var_t __read_mostly tracing_buffer_mask; | |||
| 109 | */ | 124 | */ | 
| 110 | int ftrace_dump_on_oops; | 125 | int ftrace_dump_on_oops; | 
| 111 | 126 | ||
| 112 | static int tracing_set_tracer(char *buf); | 127 | static int tracing_set_tracer(const char *buf); | 
| 128 | |||
| 129 | #define BOOTUP_TRACER_SIZE 100 | ||
| 130 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | ||
| 131 | static char *default_bootup_tracer; | ||
| 113 | 132 | ||
| 114 | static int __init set_ftrace(char *str) | 133 | static int __init set_ftrace(char *str) | 
| 115 | { | 134 | { | 
| 116 | tracing_set_tracer(str); | 135 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); | 
| 136 | default_bootup_tracer = bootup_tracer_buf; | ||
| 137 | /* We are using ftrace early, expand it */ | ||
| 138 | ring_buffer_expanded = 1; | ||
| 117 | return 1; | 139 | return 1; | 
| 118 | } | 140 | } | 
| 119 | __setup("ftrace", set_ftrace); | 141 | __setup("ftrace=", set_ftrace); | 
| 120 | 142 | ||
| 121 | static int __init set_ftrace_dump_on_oops(char *str) | 143 | static int __init set_ftrace_dump_on_oops(char *str) | 
| 122 | { | 144 | { | 
| @@ -133,13 +155,6 @@ ns2usecs(cycle_t nsec) | |||
| 133 | return nsec; | 155 | return nsec; | 
| 134 | } | 156 | } | 
| 135 | 157 | ||
| 136 | cycle_t ftrace_now(int cpu) | ||
| 137 | { | ||
| 138 | u64 ts = ring_buffer_time_stamp(cpu); | ||
| 139 | ring_buffer_normalize_time_stamp(cpu, &ts); | ||
| 140 | return ts; | ||
| 141 | } | ||
| 142 | |||
| 143 | /* | 158 | /* | 
| 144 | * The global_trace is the descriptor that holds the tracing | 159 | * The global_trace is the descriptor that holds the tracing | 
| 145 | * buffers for the live tracing. For each CPU, it contains | 160 | * buffers for the live tracing. For each CPU, it contains | 
| @@ -156,6 +171,20 @@ static struct trace_array global_trace; | |||
| 156 | 171 | ||
| 157 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 
| 158 | 173 | ||
| 174 | cycle_t ftrace_now(int cpu) | ||
| 175 | { | ||
| 176 | u64 ts; | ||
| 177 | |||
| 178 | /* Early boot up does not have a buffer yet */ | ||
| 179 | if (!global_trace.buffer) | ||
| 180 | return trace_clock_local(); | ||
| 181 | |||
| 182 | ts = ring_buffer_time_stamp(global_trace.buffer, cpu); | ||
| 183 | ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); | ||
| 184 | |||
| 185 | return ts; | ||
| 186 | } | ||
| 187 | |||
| 159 | /* | 188 | /* | 
| 160 | * The max_tr is used to snapshot the global_trace when a maximum | 189 | * The max_tr is used to snapshot the global_trace when a maximum | 
| 161 | * latency is reached. Some tracers will use this to store a maximum | 190 | * latency is reached. Some tracers will use this to store a maximum | 
| @@ -186,9 +215,6 @@ int tracing_is_enabled(void) | |||
| 186 | return tracer_enabled; | 215 | return tracer_enabled; | 
| 187 | } | 216 | } | 
| 188 | 217 | ||
| 189 | /* function tracing enabled */ | ||
| 190 | int ftrace_function_enabled; | ||
| 191 | |||
| 192 | /* | 218 | /* | 
| 193 | * trace_buf_size is the size in bytes that is allocated | 219 | * trace_buf_size is the size in bytes that is allocated | 
| 194 | * for a buffer. Note, the number of bytes is always rounded | 220 | * for a buffer. Note, the number of bytes is always rounded | 
| @@ -229,7 +255,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
| 229 | 255 | ||
| 230 | /* trace_flags holds trace_options default values */ | 256 | /* trace_flags holds trace_options default values */ | 
| 231 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 257 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 
| 232 | TRACE_ITER_ANNOTATE; | 258 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; | 
| 233 | 259 | ||
| 234 | /** | 260 | /** | 
| 235 | * trace_wake_up - wake up tasks waiting for trace input | 261 | * trace_wake_up - wake up tasks waiting for trace input | 
| @@ -280,13 +306,17 @@ static const char *trace_options[] = { | |||
| 280 | "block", | 306 | "block", | 
| 281 | "stacktrace", | 307 | "stacktrace", | 
| 282 | "sched-tree", | 308 | "sched-tree", | 
| 283 | "ftrace_printk", | 309 | "trace_printk", | 
| 284 | "ftrace_preempt", | 310 | "ftrace_preempt", | 
| 285 | "branch", | 311 | "branch", | 
| 286 | "annotate", | 312 | "annotate", | 
| 287 | "userstacktrace", | 313 | "userstacktrace", | 
| 288 | "sym-userobj", | 314 | "sym-userobj", | 
| 289 | "printk-msg-only", | 315 | "printk-msg-only", | 
| 316 | "context-info", | ||
| 317 | "latency-format", | ||
| 318 | "global-clock", | ||
| 319 | "sleep-time", | ||
| 290 | NULL | 320 | NULL | 
| 291 | }; | 321 | }; | 
| 292 | 322 | ||
| @@ -326,146 +356,37 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 326 | data->rt_priority = tsk->rt_priority; | 356 | data->rt_priority = tsk->rt_priority; | 
| 327 | 357 | ||
| 328 | /* record this tasks comm */ | 358 | /* record this tasks comm */ | 
| 329 | tracing_record_cmdline(current); | 359 | tracing_record_cmdline(tsk); | 
| 330 | } | 360 | } | 
| 331 | 361 | ||
| 332 | /** | 362 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 
| 333 | * trace_seq_printf - sequence printing of trace information | ||
| 334 | * @s: trace sequence descriptor | ||
| 335 | * @fmt: printf format string | ||
| 336 | * | ||
| 337 | * The tracer may use either sequence operations or its own | ||
| 338 | * copy to user routines. To simplify formating of a trace | ||
| 339 | * trace_seq_printf is used to store strings into a special | ||
| 340 | * buffer (@s). Then the output may be either used by | ||
| 341 | * the sequencer or pulled into another buffer. | ||
| 342 | */ | ||
| 343 | int | ||
| 344 | trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
| 345 | { | 363 | { | 
| 346 | int len = (PAGE_SIZE - 1) - s->len; | 364 | int len; | 
| 347 | va_list ap; | ||
| 348 | int ret; | 365 | int ret; | 
| 349 | 366 | ||
| 350 | if (!len) | 367 | if (!cnt) | 
| 351 | return 0; | ||
| 352 | |||
| 353 | va_start(ap, fmt); | ||
| 354 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | ||
| 355 | va_end(ap); | ||
| 356 | |||
| 357 | /* If we can't write it all, don't bother writing anything */ | ||
| 358 | if (ret >= len) | ||
| 359 | return 0; | ||
| 360 | |||
| 361 | s->len += ret; | ||
| 362 | |||
| 363 | return len; | ||
| 364 | } | ||
| 365 | |||
| 366 | /** | ||
| 367 | * trace_seq_puts - trace sequence printing of simple string | ||
| 368 | * @s: trace sequence descriptor | ||
| 369 | * @str: simple string to record | ||
| 370 | * | ||
| 371 | * The tracer may use either the sequence operations or its own | ||
| 372 | * copy to user routines. This function records a simple string | ||
| 373 | * into a special buffer (@s) for later retrieval by a sequencer | ||
| 374 | * or other mechanism. | ||
| 375 | */ | ||
| 376 | static int | ||
| 377 | trace_seq_puts(struct trace_seq *s, const char *str) | ||
| 378 | { | ||
| 379 | int len = strlen(str); | ||
| 380 | |||
| 381 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
| 382 | return 0; | ||
| 383 | |||
| 384 | memcpy(s->buffer + s->len, str, len); | ||
| 385 | s->len += len; | ||
| 386 | |||
| 387 | return len; | ||
| 388 | } | ||
| 389 | |||
| 390 | static int | ||
| 391 | trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
| 392 | { | ||
| 393 | if (s->len >= (PAGE_SIZE - 1)) | ||
| 394 | return 0; | ||
| 395 | |||
| 396 | s->buffer[s->len++] = c; | ||
| 397 | |||
| 398 | return 1; | ||
| 399 | } | ||
| 400 | |||
| 401 | static int | ||
| 402 | trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) | ||
| 403 | { | ||
| 404 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
| 405 | return 0; | 368 | return 0; | 
| 406 | 369 | ||
| 407 | memcpy(s->buffer + s->len, mem, len); | 370 | if (s->len <= s->readpos) | 
| 408 | s->len += len; | 371 | return -EBUSY; | 
| 409 | |||
| 410 | return len; | ||
| 411 | } | ||
| 412 | |||
| 413 | #define MAX_MEMHEX_BYTES 8 | ||
| 414 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
| 415 | |||
| 416 | static int | ||
| 417 | trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) | ||
| 418 | { | ||
| 419 | unsigned char hex[HEX_CHARS]; | ||
| 420 | unsigned char *data = mem; | ||
| 421 | int i, j; | ||
| 422 | |||
| 423 | #ifdef __BIG_ENDIAN | ||
| 424 | for (i = 0, j = 0; i < len; i++) { | ||
| 425 | #else | ||
| 426 | for (i = len-1, j = 0; i >= 0; i--) { | ||
| 427 | #endif | ||
| 428 | hex[j++] = hex_asc_hi(data[i]); | ||
| 429 | hex[j++] = hex_asc_lo(data[i]); | ||
| 430 | } | ||
| 431 | hex[j++] = ' '; | ||
| 432 | |||
| 433 | return trace_seq_putmem(s, hex, j); | ||
| 434 | } | ||
| 435 | |||
| 436 | static int | ||
| 437 | trace_seq_path(struct trace_seq *s, struct path *path) | ||
| 438 | { | ||
| 439 | unsigned char *p; | ||
| 440 | 372 | ||
| 441 | if (s->len >= (PAGE_SIZE - 1)) | 373 | len = s->len - s->readpos; | 
| 442 | return 0; | 374 | if (cnt > len) | 
| 443 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | 375 | cnt = len; | 
| 444 | if (!IS_ERR(p)) { | 376 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 
| 445 | p = mangle_path(s->buffer + s->len, p, "\n"); | 377 | if (ret == cnt) | 
| 446 | if (p) { | 378 | return -EFAULT; | 
| 447 | s->len = p - s->buffer; | ||
| 448 | return 1; | ||
| 449 | } | ||
| 450 | } else { | ||
| 451 | s->buffer[s->len++] = '?'; | ||
| 452 | return 1; | ||
| 453 | } | ||
| 454 | 379 | ||
| 455 | return 0; | 380 | cnt -= ret; | 
| 456 | } | ||
| 457 | 381 | ||
| 458 | static void | 382 | s->readpos += cnt; | 
| 459 | trace_seq_reset(struct trace_seq *s) | 383 | return cnt; | 
| 460 | { | ||
| 461 | s->len = 0; | ||
| 462 | s->readpos = 0; | ||
| 463 | } | 384 | } | 
| 464 | 385 | ||
| 465 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 386 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 
| 466 | { | 387 | { | 
| 467 | int len; | 388 | int len; | 
| 468 | int ret; | 389 | void *ret; | 
| 469 | 390 | ||
| 470 | if (s->len <= s->readpos) | 391 | if (s->len <= s->readpos) | 
| 471 | return -EBUSY; | 392 | return -EBUSY; | 
| @@ -473,11 +394,11 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | |||
| 473 | len = s->len - s->readpos; | 394 | len = s->len - s->readpos; | 
| 474 | if (cnt > len) | 395 | if (cnt > len) | 
| 475 | cnt = len; | 396 | cnt = len; | 
| 476 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 397 | ret = memcpy(buf, s->buffer + s->readpos, cnt); | 
| 477 | if (ret) | 398 | if (!ret) | 
| 478 | return -EFAULT; | 399 | return -EFAULT; | 
| 479 | 400 | ||
| 480 | s->readpos += len; | 401 | s->readpos += cnt; | 
| 481 | return cnt; | 402 | return cnt; | 
| 482 | } | 403 | } | 
| 483 | 404 | ||
| @@ -489,7 +410,7 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
| 489 | s->buffer[len] = 0; | 410 | s->buffer[len] = 0; | 
| 490 | seq_puts(m, s->buffer); | 411 | seq_puts(m, s->buffer); | 
| 491 | 412 | ||
| 492 | trace_seq_reset(s); | 413 | trace_seq_init(s); | 
| 493 | } | 414 | } | 
| 494 | 415 | ||
| 495 | /** | 416 | /** | 
| @@ -543,7 +464,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 543 | 464 | ||
| 544 | ftrace_enable_cpu(); | 465 | ftrace_enable_cpu(); | 
| 545 | 466 | ||
| 546 | WARN_ON_ONCE(ret); | 467 | WARN_ON_ONCE(ret && ret != -EAGAIN); | 
| 547 | 468 | ||
| 548 | __update_max_tr(tr, tsk, cpu); | 469 | __update_max_tr(tr, tsk, cpu); | 
| 549 | __raw_spin_unlock(&ftrace_max_lock); | 470 | __raw_spin_unlock(&ftrace_max_lock); | 
| @@ -556,6 +477,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 556 | * Register a new plugin tracer. | 477 | * Register a new plugin tracer. | 
| 557 | */ | 478 | */ | 
| 558 | int register_tracer(struct tracer *type) | 479 | int register_tracer(struct tracer *type) | 
| 480 | __releases(kernel_lock) | ||
| 481 | __acquires(kernel_lock) | ||
| 559 | { | 482 | { | 
| 560 | struct tracer *t; | 483 | struct tracer *t; | 
| 561 | int len; | 484 | int len; | 
| @@ -594,9 +517,12 @@ int register_tracer(struct tracer *type) | |||
| 594 | else | 517 | else | 
| 595 | if (!type->flags->opts) | 518 | if (!type->flags->opts) | 
| 596 | type->flags->opts = dummy_tracer_opt; | 519 | type->flags->opts = dummy_tracer_opt; | 
| 520 | if (!type->wait_pipe) | ||
| 521 | type->wait_pipe = default_wait_pipe; | ||
| 522 | |||
| 597 | 523 | ||
| 598 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 524 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 
| 599 | if (type->selftest) { | 525 | if (type->selftest && !tracing_selftest_disabled) { | 
| 600 | struct tracer *saved_tracer = current_trace; | 526 | struct tracer *saved_tracer = current_trace; | 
| 601 | struct trace_array *tr = &global_trace; | 527 | struct trace_array *tr = &global_trace; | 
| 602 | int i; | 528 | int i; | 
| @@ -638,8 +564,26 @@ int register_tracer(struct tracer *type) | |||
| 638 | out: | 564 | out: | 
| 639 | tracing_selftest_running = false; | 565 | tracing_selftest_running = false; | 
| 640 | mutex_unlock(&trace_types_lock); | 566 | mutex_unlock(&trace_types_lock); | 
| 641 | lock_kernel(); | ||
| 642 | 567 | ||
| 568 | if (ret || !default_bootup_tracer) | ||
| 569 | goto out_unlock; | ||
| 570 | |||
| 571 | if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) | ||
| 572 | goto out_unlock; | ||
| 573 | |||
| 574 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | ||
| 575 | /* Do we want this tracer to start on bootup? */ | ||
| 576 | tracing_set_tracer(type->name); | ||
| 577 | default_bootup_tracer = NULL; | ||
| 578 | /* disable other selftests, since this will break it. */ | ||
| 579 | tracing_selftest_disabled = 1; | ||
| 580 | #ifdef CONFIG_FTRACE_STARTUP_TEST | ||
| 581 | printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", | ||
| 582 | type->name); | ||
| 583 | #endif | ||
| 584 | |||
| 585 | out_unlock: | ||
| 586 | lock_kernel(); | ||
| 643 | return ret; | 587 | return ret; | 
| 644 | } | 588 | } | 
| 645 | 589 | ||
| @@ -658,6 +602,15 @@ void unregister_tracer(struct tracer *type) | |||
| 658 | 602 | ||
| 659 | found: | 603 | found: | 
| 660 | *t = (*t)->next; | 604 | *t = (*t)->next; | 
| 605 | |||
| 606 | if (type == current_trace && tracer_enabled) { | ||
| 607 | tracer_enabled = 0; | ||
| 608 | tracing_stop(); | ||
| 609 | if (current_trace->stop) | ||
| 610 | current_trace->stop(&global_trace); | ||
| 611 | current_trace = &nop_trace; | ||
| 612 | } | ||
| 613 | |||
| 661 | if (strlen(type->name) != max_tracer_type_len) | 614 | if (strlen(type->name) != max_tracer_type_len) | 
| 662 | goto out; | 615 | goto out; | 
| 663 | 616 | ||
| @@ -689,19 +642,20 @@ void tracing_reset_online_cpus(struct trace_array *tr) | |||
| 689 | } | 642 | } | 
| 690 | 643 | ||
| 691 | #define SAVED_CMDLINES 128 | 644 | #define SAVED_CMDLINES 128 | 
| 645 | #define NO_CMDLINE_MAP UINT_MAX | ||
| 692 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 646 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 
| 693 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 647 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 
| 694 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 648 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 
| 695 | static int cmdline_idx; | 649 | static int cmdline_idx; | 
| 696 | static DEFINE_SPINLOCK(trace_cmdline_lock); | 650 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 
| 697 | 651 | ||
| 698 | /* temporary disable recording */ | 652 | /* temporary disable recording */ | 
| 699 | atomic_t trace_record_cmdline_disabled __read_mostly; | 653 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 
| 700 | 654 | ||
| 701 | static void trace_init_cmdlines(void) | 655 | static void trace_init_cmdlines(void) | 
| 702 | { | 656 | { | 
| 703 | memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline)); | 657 | memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); | 
| 704 | memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid)); | 658 | memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); | 
| 705 | cmdline_idx = 0; | 659 | cmdline_idx = 0; | 
| 706 | } | 660 | } | 
| 707 | 661 | ||
| @@ -738,13 +692,12 @@ void tracing_start(void) | |||
| 738 | return; | 692 | return; | 
| 739 | 693 | ||
| 740 | spin_lock_irqsave(&tracing_start_lock, flags); | 694 | spin_lock_irqsave(&tracing_start_lock, flags); | 
| 741 | if (--trace_stop_count) | 695 | if (--trace_stop_count) { | 
| 742 | goto out; | 696 | if (trace_stop_count < 0) { | 
| 743 | 697 | /* Someone screwed up their debugging */ | |
| 744 | if (trace_stop_count < 0) { | 698 | WARN_ON_ONCE(1); | 
| 745 | /* Someone screwed up their debugging */ | 699 | trace_stop_count = 0; | 
| 746 | WARN_ON_ONCE(1); | 700 | } | 
| 747 | trace_stop_count = 0; | ||
| 748 | goto out; | 701 | goto out; | 
| 749 | } | 702 | } | 
| 750 | 703 | ||
| @@ -794,8 +747,7 @@ void trace_stop_cmdline_recording(void); | |||
| 794 | 747 | ||
| 795 | static void trace_save_cmdline(struct task_struct *tsk) | 748 | static void trace_save_cmdline(struct task_struct *tsk) | 
| 796 | { | 749 | { | 
| 797 | unsigned map; | 750 | unsigned pid, idx; | 
| 798 | unsigned idx; | ||
| 799 | 751 | ||
| 800 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | 752 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | 
| 801 | return; | 753 | return; | 
| @@ -806,17 +758,24 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 806 | * nor do we want to disable interrupts, | 758 | * nor do we want to disable interrupts, | 
| 807 | * so if we miss here, then better luck next time. | 759 | * so if we miss here, then better luck next time. | 
| 808 | */ | 760 | */ | 
| 809 | if (!spin_trylock(&trace_cmdline_lock)) | 761 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 
| 810 | return; | 762 | return; | 
| 811 | 763 | ||
| 812 | idx = map_pid_to_cmdline[tsk->pid]; | 764 | idx = map_pid_to_cmdline[tsk->pid]; | 
| 813 | if (idx >= SAVED_CMDLINES) { | 765 | if (idx == NO_CMDLINE_MAP) { | 
| 814 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; | 766 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; | 
| 815 | 767 | ||
| 816 | map = map_cmdline_to_pid[idx]; | 768 | /* | 
| 817 | if (map <= PID_MAX_DEFAULT) | 769 | * Check whether the cmdline buffer at idx has a pid | 
| 818 | map_pid_to_cmdline[map] = (unsigned)-1; | 770 | * mapped. We are going to overwrite that entry so we | 
| 771 | * need to clear the map_pid_to_cmdline. Otherwise we | ||
| 772 | * would read the new comm for the old pid. | ||
| 773 | */ | ||
| 774 | pid = map_cmdline_to_pid[idx]; | ||
| 775 | if (pid != NO_CMDLINE_MAP) | ||
| 776 | map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; | ||
| 819 | 777 | ||
| 778 | map_cmdline_to_pid[idx] = tsk->pid; | ||
| 820 | map_pid_to_cmdline[tsk->pid] = idx; | 779 | map_pid_to_cmdline[tsk->pid] = idx; | 
| 821 | 780 | ||
| 822 | cmdline_idx = idx; | 781 | cmdline_idx = idx; | 
| @@ -824,33 +783,37 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 824 | 783 | ||
| 825 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 784 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 
| 826 | 785 | ||
| 827 | spin_unlock(&trace_cmdline_lock); | 786 | __raw_spin_unlock(&trace_cmdline_lock); | 
| 828 | } | 787 | } | 
| 829 | 788 | ||
| 830 | char *trace_find_cmdline(int pid) | 789 | void trace_find_cmdline(int pid, char comm[]) | 
| 831 | { | 790 | { | 
| 832 | char *cmdline = "<...>"; | ||
| 833 | unsigned map; | 791 | unsigned map; | 
| 834 | 792 | ||
| 835 | if (!pid) | 793 | if (!pid) { | 
| 836 | return "<idle>"; | 794 | strcpy(comm, "<idle>"); | 
| 795 | return; | ||
| 796 | } | ||
| 837 | 797 | ||
| 838 | if (pid > PID_MAX_DEFAULT) | 798 | if (pid > PID_MAX_DEFAULT) { | 
| 839 | goto out; | 799 | strcpy(comm, "<...>"); | 
| 800 | return; | ||
| 801 | } | ||
| 840 | 802 | ||
| 803 | __raw_spin_lock(&trace_cmdline_lock); | ||
| 841 | map = map_pid_to_cmdline[pid]; | 804 | map = map_pid_to_cmdline[pid]; | 
| 842 | if (map >= SAVED_CMDLINES) | 805 | if (map != NO_CMDLINE_MAP) | 
| 843 | goto out; | 806 | strcpy(comm, saved_cmdlines[map]); | 
| 844 | 807 | else | |
| 845 | cmdline = saved_cmdlines[map]; | 808 | strcpy(comm, "<...>"); | 
| 846 | 809 | ||
| 847 | out: | 810 | __raw_spin_unlock(&trace_cmdline_lock); | 
| 848 | return cmdline; | ||
| 849 | } | 811 | } | 
| 850 | 812 | ||
| 851 | void tracing_record_cmdline(struct task_struct *tsk) | 813 | void tracing_record_cmdline(struct task_struct *tsk) | 
| 852 | { | 814 | { | 
| 853 | if (atomic_read(&trace_record_cmdline_disabled)) | 815 | if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || | 
| 816 | !tracing_is_on()) | ||
| 854 | return; | 817 | return; | 
| 855 | 818 | ||
| 856 | trace_save_cmdline(tsk); | 819 | trace_save_cmdline(tsk); | 
| @@ -864,7 +827,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 864 | 827 | ||
| 865 | entry->preempt_count = pc & 0xff; | 828 | entry->preempt_count = pc & 0xff; | 
| 866 | entry->pid = (tsk) ? tsk->pid : 0; | 829 | entry->pid = (tsk) ? tsk->pid : 0; | 
| 867 | entry->tgid = (tsk) ? tsk->tgid : 0; | 830 | entry->tgid = (tsk) ? tsk->tgid : 0; | 
| 868 | entry->flags = | 831 | entry->flags = | 
| 869 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 832 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 
| 870 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 833 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 
| @@ -876,78 +839,132 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 876 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 839 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 
| 877 | } | 840 | } | 
| 878 | 841 | ||
| 842 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | ||
| 843 | unsigned char type, | ||
| 844 | unsigned long len, | ||
| 845 | unsigned long flags, int pc) | ||
| 846 | { | ||
| 847 | struct ring_buffer_event *event; | ||
| 848 | |||
| 849 | event = ring_buffer_lock_reserve(tr->buffer, len); | ||
| 850 | if (event != NULL) { | ||
| 851 | struct trace_entry *ent = ring_buffer_event_data(event); | ||
| 852 | |||
| 853 | tracing_generic_entry_update(ent, flags, pc); | ||
| 854 | ent->type = type; | ||
| 855 | } | ||
| 856 | |||
| 857 | return event; | ||
| 858 | } | ||
| 859 | static void ftrace_trace_stack(struct trace_array *tr, | ||
| 860 | unsigned long flags, int skip, int pc); | ||
| 861 | static void ftrace_trace_userstack(struct trace_array *tr, | ||
| 862 | unsigned long flags, int pc); | ||
| 863 | |||
| 864 | static inline void __trace_buffer_unlock_commit(struct trace_array *tr, | ||
| 865 | struct ring_buffer_event *event, | ||
| 866 | unsigned long flags, int pc, | ||
| 867 | int wake) | ||
| 868 | { | ||
| 869 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 870 | |||
| 871 | ftrace_trace_stack(tr, flags, 6, pc); | ||
| 872 | ftrace_trace_userstack(tr, flags, pc); | ||
| 873 | |||
| 874 | if (wake) | ||
| 875 | trace_wake_up(); | ||
| 876 | } | ||
| 877 | |||
| 878 | void trace_buffer_unlock_commit(struct trace_array *tr, | ||
| 879 | struct ring_buffer_event *event, | ||
| 880 | unsigned long flags, int pc) | ||
| 881 | { | ||
| 882 | __trace_buffer_unlock_commit(tr, event, flags, pc, 1); | ||
| 883 | } | ||
| 884 | |||
| 885 | struct ring_buffer_event * | ||
| 886 | trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | ||
| 887 | unsigned long flags, int pc) | ||
| 888 | { | ||
| 889 | return trace_buffer_lock_reserve(&global_trace, | ||
| 890 | type, len, flags, pc); | ||
| 891 | } | ||
| 892 | |||
| 893 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | ||
| 894 | unsigned long flags, int pc) | ||
| 895 | { | ||
| 896 | return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | ||
| 897 | } | ||
| 898 | |||
| 899 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | ||
| 900 | unsigned long flags, int pc) | ||
| 901 | { | ||
| 902 | return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | ||
| 903 | } | ||
| 904 | |||
| 879 | void | 905 | void | 
| 880 | trace_function(struct trace_array *tr, struct trace_array_cpu *data, | 906 | trace_function(struct trace_array *tr, | 
| 881 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 907 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 
| 882 | int pc) | 908 | int pc) | 
| 883 | { | 909 | { | 
| 884 | struct ring_buffer_event *event; | 910 | struct ring_buffer_event *event; | 
| 885 | struct ftrace_entry *entry; | 911 | struct ftrace_entry *entry; | 
| 886 | unsigned long irq_flags; | ||
| 887 | 912 | ||
| 888 | /* If we are reading the ring buffer, don't trace */ | 913 | /* If we are reading the ring buffer, don't trace */ | 
| 889 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 914 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 
| 890 | return; | 915 | return; | 
| 891 | 916 | ||
| 892 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 917 | event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), | 
| 893 | &irq_flags); | 918 | flags, pc); | 
| 894 | if (!event) | 919 | if (!event) | 
| 895 | return; | 920 | return; | 
| 896 | entry = ring_buffer_event_data(event); | 921 | entry = ring_buffer_event_data(event); | 
| 897 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 898 | entry->ent.type = TRACE_FN; | ||
| 899 | entry->ip = ip; | 922 | entry->ip = ip; | 
| 900 | entry->parent_ip = parent_ip; | 923 | entry->parent_ip = parent_ip; | 
| 901 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 924 | ring_buffer_unlock_commit(tr->buffer, event); | 
| 902 | } | 925 | } | 
| 903 | 926 | ||
| 904 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 927 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| 905 | static void __trace_graph_entry(struct trace_array *tr, | 928 | static int __trace_graph_entry(struct trace_array *tr, | 
| 906 | struct trace_array_cpu *data, | ||
| 907 | struct ftrace_graph_ent *trace, | 929 | struct ftrace_graph_ent *trace, | 
| 908 | unsigned long flags, | 930 | unsigned long flags, | 
| 909 | int pc) | 931 | int pc) | 
| 910 | { | 932 | { | 
| 911 | struct ring_buffer_event *event; | 933 | struct ring_buffer_event *event; | 
| 912 | struct ftrace_graph_ent_entry *entry; | 934 | struct ftrace_graph_ent_entry *entry; | 
| 913 | unsigned long irq_flags; | ||
| 914 | 935 | ||
| 915 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 936 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 
| 916 | return; | 937 | return 0; | 
| 917 | 938 | ||
| 918 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | 939 | event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, | 
| 919 | &irq_flags); | 940 | sizeof(*entry), flags, pc); | 
| 920 | if (!event) | 941 | if (!event) | 
| 921 | return; | 942 | return 0; | 
| 922 | entry = ring_buffer_event_data(event); | 943 | entry = ring_buffer_event_data(event); | 
| 923 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 924 | entry->ent.type = TRACE_GRAPH_ENT; | ||
| 925 | entry->graph_ent = *trace; | 944 | entry->graph_ent = *trace; | 
| 926 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 945 | ring_buffer_unlock_commit(global_trace.buffer, event); | 
| 946 | |||
| 947 | return 1; | ||
| 927 | } | 948 | } | 
| 928 | 949 | ||
| 929 | static void __trace_graph_return(struct trace_array *tr, | 950 | static void __trace_graph_return(struct trace_array *tr, | 
| 930 | struct trace_array_cpu *data, | ||
| 931 | struct ftrace_graph_ret *trace, | 951 | struct ftrace_graph_ret *trace, | 
| 932 | unsigned long flags, | 952 | unsigned long flags, | 
| 933 | int pc) | 953 | int pc) | 
| 934 | { | 954 | { | 
| 935 | struct ring_buffer_event *event; | 955 | struct ring_buffer_event *event; | 
| 936 | struct ftrace_graph_ret_entry *entry; | 956 | struct ftrace_graph_ret_entry *entry; | 
| 937 | unsigned long irq_flags; | ||
| 938 | 957 | ||
| 939 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 958 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 
| 940 | return; | 959 | return; | 
| 941 | 960 | ||
| 942 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | 961 | event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET, | 
| 943 | &irq_flags); | 962 | sizeof(*entry), flags, pc); | 
| 944 | if (!event) | 963 | if (!event) | 
| 945 | return; | 964 | return; | 
| 946 | entry = ring_buffer_event_data(event); | 965 | entry = ring_buffer_event_data(event); | 
| 947 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 948 | entry->ent.type = TRACE_GRAPH_RET; | ||
| 949 | entry->ret = *trace; | 966 | entry->ret = *trace; | 
| 950 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 967 | ring_buffer_unlock_commit(global_trace.buffer, event); | 
| 951 | } | 968 | } | 
| 952 | #endif | 969 | #endif | 
| 953 | 970 | ||
| @@ -957,31 +974,23 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
| 957 | int pc) | 974 | int pc) | 
| 958 | { | 975 | { | 
| 959 | if (likely(!atomic_read(&data->disabled))) | 976 | if (likely(!atomic_read(&data->disabled))) | 
| 960 | trace_function(tr, data, ip, parent_ip, flags, pc); | 977 | trace_function(tr, ip, parent_ip, flags, pc); | 
| 961 | } | 978 | } | 
| 962 | 979 | ||
| 963 | static void ftrace_trace_stack(struct trace_array *tr, | 980 | static void __ftrace_trace_stack(struct trace_array *tr, | 
| 964 | struct trace_array_cpu *data, | 981 | unsigned long flags, | 
| 965 | unsigned long flags, | 982 | int skip, int pc) | 
| 966 | int skip, int pc) | ||
| 967 | { | 983 | { | 
| 968 | #ifdef CONFIG_STACKTRACE | 984 | #ifdef CONFIG_STACKTRACE | 
| 969 | struct ring_buffer_event *event; | 985 | struct ring_buffer_event *event; | 
| 970 | struct stack_entry *entry; | 986 | struct stack_entry *entry; | 
| 971 | struct stack_trace trace; | 987 | struct stack_trace trace; | 
| 972 | unsigned long irq_flags; | ||
| 973 | 988 | ||
| 974 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 989 | event = trace_buffer_lock_reserve(tr, TRACE_STACK, | 
| 975 | return; | 990 | sizeof(*entry), flags, pc); | 
| 976 | |||
| 977 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
| 978 | &irq_flags); | ||
| 979 | if (!event) | 991 | if (!event) | 
| 980 | return; | 992 | return; | 
| 981 | entry = ring_buffer_event_data(event); | 993 | entry = ring_buffer_event_data(event); | 
| 982 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 983 | entry->ent.type = TRACE_STACK; | ||
| 984 | |||
| 985 | memset(&entry->caller, 0, sizeof(entry->caller)); | 994 | memset(&entry->caller, 0, sizeof(entry->caller)); | 
| 986 | 995 | ||
| 987 | trace.nr_entries = 0; | 996 | trace.nr_entries = 0; | 
| @@ -990,38 +999,43 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
| 990 | trace.entries = entry->caller; | 999 | trace.entries = entry->caller; | 
| 991 | 1000 | ||
| 992 | save_stack_trace(&trace); | 1001 | save_stack_trace(&trace); | 
| 993 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1002 | ring_buffer_unlock_commit(tr->buffer, event); | 
| 994 | #endif | 1003 | #endif | 
| 995 | } | 1004 | } | 
| 996 | 1005 | ||
| 1006 | static void ftrace_trace_stack(struct trace_array *tr, | ||
| 1007 | unsigned long flags, | ||
| 1008 | int skip, int pc) | ||
| 1009 | { | ||
| 1010 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
| 1011 | return; | ||
| 1012 | |||
| 1013 | __ftrace_trace_stack(tr, flags, skip, pc); | ||
| 1014 | } | ||
| 1015 | |||
| 997 | void __trace_stack(struct trace_array *tr, | 1016 | void __trace_stack(struct trace_array *tr, | 
| 998 | struct trace_array_cpu *data, | ||
| 999 | unsigned long flags, | 1017 | unsigned long flags, | 
| 1000 | int skip) | 1018 | int skip, int pc) | 
| 1001 | { | 1019 | { | 
| 1002 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | 1020 | __ftrace_trace_stack(tr, flags, skip, pc); | 
| 1003 | } | 1021 | } | 
| 1004 | 1022 | ||
| 1005 | static void ftrace_trace_userstack(struct trace_array *tr, | 1023 | static void ftrace_trace_userstack(struct trace_array *tr, | 
| 1006 | struct trace_array_cpu *data, | 1024 | unsigned long flags, int pc) | 
| 1007 | unsigned long flags, int pc) | ||
| 1008 | { | 1025 | { | 
| 1009 | #ifdef CONFIG_STACKTRACE | 1026 | #ifdef CONFIG_STACKTRACE | 
| 1010 | struct ring_buffer_event *event; | 1027 | struct ring_buffer_event *event; | 
| 1011 | struct userstack_entry *entry; | 1028 | struct userstack_entry *entry; | 
| 1012 | struct stack_trace trace; | 1029 | struct stack_trace trace; | 
| 1013 | unsigned long irq_flags; | ||
| 1014 | 1030 | ||
| 1015 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1031 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 
| 1016 | return; | 1032 | return; | 
| 1017 | 1033 | ||
| 1018 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1034 | event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, | 
| 1019 | &irq_flags); | 1035 | sizeof(*entry), flags, pc); | 
| 1020 | if (!event) | 1036 | if (!event) | 
| 1021 | return; | 1037 | return; | 
| 1022 | entry = ring_buffer_event_data(event); | 1038 | entry = ring_buffer_event_data(event); | 
| 1023 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 1024 | entry->ent.type = TRACE_USER_STACK; | ||
| 1025 | 1039 | ||
| 1026 | memset(&entry->caller, 0, sizeof(entry->caller)); | 1040 | memset(&entry->caller, 0, sizeof(entry->caller)); | 
| 1027 | 1041 | ||
| @@ -1031,70 +1045,58 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
| 1031 | trace.entries = entry->caller; | 1045 | trace.entries = entry->caller; | 
| 1032 | 1046 | ||
| 1033 | save_stack_trace_user(&trace); | 1047 | save_stack_trace_user(&trace); | 
| 1034 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1048 | ring_buffer_unlock_commit(tr->buffer, event); | 
| 1035 | #endif | 1049 | #endif | 
| 1036 | } | 1050 | } | 
| 1037 | 1051 | ||
| 1038 | void __trace_userstack(struct trace_array *tr, | 1052 | #ifdef UNUSED | 
| 1039 | struct trace_array_cpu *data, | 1053 | static void __trace_userstack(struct trace_array *tr, unsigned long flags) | 
| 1040 | unsigned long flags) | ||
| 1041 | { | 1054 | { | 
| 1042 | ftrace_trace_userstack(tr, data, flags, preempt_count()); | 1055 | ftrace_trace_userstack(tr, flags, preempt_count()); | 
| 1043 | } | 1056 | } | 
| 1057 | #endif /* UNUSED */ | ||
| 1044 | 1058 | ||
| 1045 | static void | 1059 | static void | 
| 1046 | ftrace_trace_special(void *__tr, void *__data, | 1060 | ftrace_trace_special(void *__tr, | 
| 1047 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 1061 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 
| 1048 | int pc) | 1062 | int pc) | 
| 1049 | { | 1063 | { | 
| 1050 | struct ring_buffer_event *event; | 1064 | struct ring_buffer_event *event; | 
| 1051 | struct trace_array_cpu *data = __data; | ||
| 1052 | struct trace_array *tr = __tr; | 1065 | struct trace_array *tr = __tr; | 
| 1053 | struct special_entry *entry; | 1066 | struct special_entry *entry; | 
| 1054 | unsigned long irq_flags; | ||
| 1055 | 1067 | ||
| 1056 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1068 | event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, | 
| 1057 | &irq_flags); | 1069 | sizeof(*entry), 0, pc); | 
| 1058 | if (!event) | 1070 | if (!event) | 
| 1059 | return; | 1071 | return; | 
| 1060 | entry = ring_buffer_event_data(event); | 1072 | entry = ring_buffer_event_data(event); | 
| 1061 | tracing_generic_entry_update(&entry->ent, 0, pc); | ||
| 1062 | entry->ent.type = TRACE_SPECIAL; | ||
| 1063 | entry->arg1 = arg1; | 1073 | entry->arg1 = arg1; | 
| 1064 | entry->arg2 = arg2; | 1074 | entry->arg2 = arg2; | 
| 1065 | entry->arg3 = arg3; | 1075 | entry->arg3 = arg3; | 
| 1066 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1076 | trace_buffer_unlock_commit(tr, event, 0, pc); | 
| 1067 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); | ||
| 1068 | ftrace_trace_userstack(tr, data, irq_flags, pc); | ||
| 1069 | |||
| 1070 | trace_wake_up(); | ||
| 1071 | } | 1077 | } | 
| 1072 | 1078 | ||
| 1073 | void | 1079 | void | 
| 1074 | __trace_special(void *__tr, void *__data, | 1080 | __trace_special(void *__tr, void *__data, | 
| 1075 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | 1081 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | 
| 1076 | { | 1082 | { | 
| 1077 | ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); | 1083 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); | 
| 1078 | } | 1084 | } | 
| 1079 | 1085 | ||
| 1080 | void | 1086 | void | 
| 1081 | tracing_sched_switch_trace(struct trace_array *tr, | 1087 | tracing_sched_switch_trace(struct trace_array *tr, | 
| 1082 | struct trace_array_cpu *data, | ||
| 1083 | struct task_struct *prev, | 1088 | struct task_struct *prev, | 
| 1084 | struct task_struct *next, | 1089 | struct task_struct *next, | 
| 1085 | unsigned long flags, int pc) | 1090 | unsigned long flags, int pc) | 
| 1086 | { | 1091 | { | 
| 1087 | struct ring_buffer_event *event; | 1092 | struct ring_buffer_event *event; | 
| 1088 | struct ctx_switch_entry *entry; | 1093 | struct ctx_switch_entry *entry; | 
| 1089 | unsigned long irq_flags; | ||
| 1090 | 1094 | ||
| 1091 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1095 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, | 
| 1092 | &irq_flags); | 1096 | sizeof(*entry), flags, pc); | 
| 1093 | if (!event) | 1097 | if (!event) | 
| 1094 | return; | 1098 | return; | 
| 1095 | entry = ring_buffer_event_data(event); | 1099 | entry = ring_buffer_event_data(event); | 
| 1096 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 1097 | entry->ent.type = TRACE_CTX; | ||
| 1098 | entry->prev_pid = prev->pid; | 1100 | entry->prev_pid = prev->pid; | 
| 1099 | entry->prev_prio = prev->prio; | 1101 | entry->prev_prio = prev->prio; | 
| 1100 | entry->prev_state = prev->state; | 1102 | entry->prev_state = prev->state; | 
| @@ -1102,29 +1104,23 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
| 1102 | entry->next_prio = next->prio; | 1104 | entry->next_prio = next->prio; | 
| 1103 | entry->next_state = next->state; | 1105 | entry->next_state = next->state; | 
| 1104 | entry->next_cpu = task_cpu(next); | 1106 | entry->next_cpu = task_cpu(next); | 
| 1105 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1107 | trace_buffer_unlock_commit(tr, event, flags, pc); | 
| 1106 | ftrace_trace_stack(tr, data, flags, 5, pc); | ||
| 1107 | ftrace_trace_userstack(tr, data, flags, pc); | ||
| 1108 | } | 1108 | } | 
| 1109 | 1109 | ||
| 1110 | void | 1110 | void | 
| 1111 | tracing_sched_wakeup_trace(struct trace_array *tr, | 1111 | tracing_sched_wakeup_trace(struct trace_array *tr, | 
| 1112 | struct trace_array_cpu *data, | ||
| 1113 | struct task_struct *wakee, | 1112 | struct task_struct *wakee, | 
| 1114 | struct task_struct *curr, | 1113 | struct task_struct *curr, | 
| 1115 | unsigned long flags, int pc) | 1114 | unsigned long flags, int pc) | 
| 1116 | { | 1115 | { | 
| 1117 | struct ring_buffer_event *event; | 1116 | struct ring_buffer_event *event; | 
| 1118 | struct ctx_switch_entry *entry; | 1117 | struct ctx_switch_entry *entry; | 
| 1119 | unsigned long irq_flags; | ||
| 1120 | 1118 | ||
| 1121 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1119 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, | 
| 1122 | &irq_flags); | 1120 | sizeof(*entry), flags, pc); | 
| 1123 | if (!event) | 1121 | if (!event) | 
| 1124 | return; | 1122 | return; | 
| 1125 | entry = ring_buffer_event_data(event); | 1123 | entry = ring_buffer_event_data(event); | 
| 1126 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
| 1127 | entry->ent.type = TRACE_WAKE; | ||
| 1128 | entry->prev_pid = curr->pid; | 1124 | entry->prev_pid = curr->pid; | 
| 1129 | entry->prev_prio = curr->prio; | 1125 | entry->prev_prio = curr->prio; | 
| 1130 | entry->prev_state = curr->state; | 1126 | entry->prev_state = curr->state; | 
| @@ -1132,11 +1128,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
| 1132 | entry->next_prio = wakee->prio; | 1128 | entry->next_prio = wakee->prio; | 
| 1133 | entry->next_state = wakee->state; | 1129 | entry->next_state = wakee->state; | 
| 1134 | entry->next_cpu = task_cpu(wakee); | 1130 | entry->next_cpu = task_cpu(wakee); | 
| 1135 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
| 1136 | ftrace_trace_stack(tr, data, flags, 6, pc); | ||
| 1137 | ftrace_trace_userstack(tr, data, flags, pc); | ||
| 1138 | 1131 | ||
| 1139 | trace_wake_up(); | 1132 | ring_buffer_unlock_commit(tr->buffer, event); | 
| 1133 | ftrace_trace_stack(tr, flags, 6, pc); | ||
| 1134 | ftrace_trace_userstack(tr, flags, pc); | ||
| 1140 | } | 1135 | } | 
| 1141 | 1136 | ||
| 1142 | void | 1137 | void | 
| @@ -1157,66 +1152,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
| 1157 | data = tr->data[cpu]; | 1152 | data = tr->data[cpu]; | 
| 1158 | 1153 | ||
| 1159 | if (likely(atomic_inc_return(&data->disabled) == 1)) | 1154 | if (likely(atomic_inc_return(&data->disabled) == 1)) | 
| 1160 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); | 1155 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); | 
| 1161 | |||
| 1162 | atomic_dec(&data->disabled); | ||
| 1163 | local_irq_restore(flags); | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | #ifdef CONFIG_FUNCTION_TRACER | ||
| 1167 | static void | ||
| 1168 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | ||
| 1169 | { | ||
| 1170 | struct trace_array *tr = &global_trace; | ||
| 1171 | struct trace_array_cpu *data; | ||
| 1172 | unsigned long flags; | ||
| 1173 | long disabled; | ||
| 1174 | int cpu, resched; | ||
| 1175 | int pc; | ||
| 1176 | |||
| 1177 | if (unlikely(!ftrace_function_enabled)) | ||
| 1178 | return; | ||
| 1179 | |||
| 1180 | pc = preempt_count(); | ||
| 1181 | resched = ftrace_preempt_disable(); | ||
| 1182 | local_save_flags(flags); | ||
| 1183 | cpu = raw_smp_processor_id(); | ||
| 1184 | data = tr->data[cpu]; | ||
| 1185 | disabled = atomic_inc_return(&data->disabled); | ||
| 1186 | |||
| 1187 | if (likely(disabled == 1)) | ||
| 1188 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
| 1189 | |||
| 1190 | atomic_dec(&data->disabled); | ||
| 1191 | ftrace_preempt_enable(resched); | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | static void | ||
| 1195 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
| 1196 | { | ||
| 1197 | struct trace_array *tr = &global_trace; | ||
| 1198 | struct trace_array_cpu *data; | ||
| 1199 | unsigned long flags; | ||
| 1200 | long disabled; | ||
| 1201 | int cpu; | ||
| 1202 | int pc; | ||
| 1203 | |||
| 1204 | if (unlikely(!ftrace_function_enabled)) | ||
| 1205 | return; | ||
| 1206 | |||
| 1207 | /* | ||
| 1208 | * Need to use raw, since this must be called before the | ||
| 1209 | * recursive protection is performed. | ||
| 1210 | */ | ||
| 1211 | local_irq_save(flags); | ||
| 1212 | cpu = raw_smp_processor_id(); | ||
| 1213 | data = tr->data[cpu]; | ||
| 1214 | disabled = atomic_inc_return(&data->disabled); | ||
| 1215 | |||
| 1216 | if (likely(disabled == 1)) { | ||
| 1217 | pc = preempt_count(); | ||
| 1218 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
| 1219 | } | ||
| 1220 | 1156 | ||
| 1221 | atomic_dec(&data->disabled); | 1157 | atomic_dec(&data->disabled); | 
| 1222 | local_irq_restore(flags); | 1158 | local_irq_restore(flags); | 
| @@ -1229,6 +1165,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
| 1229 | struct trace_array_cpu *data; | 1165 | struct trace_array_cpu *data; | 
| 1230 | unsigned long flags; | 1166 | unsigned long flags; | 
| 1231 | long disabled; | 1167 | long disabled; | 
| 1168 | int ret; | ||
| 1232 | int cpu; | 1169 | int cpu; | 
| 1233 | int pc; | 1170 | int pc; | 
| 1234 | 1171 | ||
| @@ -1244,15 +1181,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
| 1244 | disabled = atomic_inc_return(&data->disabled); | 1181 | disabled = atomic_inc_return(&data->disabled); | 
| 1245 | if (likely(disabled == 1)) { | 1182 | if (likely(disabled == 1)) { | 
| 1246 | pc = preempt_count(); | 1183 | pc = preempt_count(); | 
| 1247 | __trace_graph_entry(tr, data, trace, flags, pc); | 1184 | ret = __trace_graph_entry(tr, trace, flags, pc); | 
| 1185 | } else { | ||
| 1186 | ret = 0; | ||
| 1248 | } | 1187 | } | 
| 1249 | /* Only do the atomic if it is not already set */ | 1188 | /* Only do the atomic if it is not already set */ | 
| 1250 | if (!test_tsk_trace_graph(current)) | 1189 | if (!test_tsk_trace_graph(current)) | 
| 1251 | set_tsk_trace_graph(current); | 1190 | set_tsk_trace_graph(current); | 
| 1191 | |||
| 1252 | atomic_dec(&data->disabled); | 1192 | atomic_dec(&data->disabled); | 
| 1253 | local_irq_restore(flags); | 1193 | local_irq_restore(flags); | 
| 1254 | 1194 | ||
| 1255 | return 1; | 1195 | return ret; | 
| 1256 | } | 1196 | } | 
| 1257 | 1197 | ||
| 1258 | void trace_graph_return(struct ftrace_graph_ret *trace) | 1198 | void trace_graph_return(struct ftrace_graph_ret *trace) | 
| @@ -1270,7 +1210,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
| 1270 | disabled = atomic_inc_return(&data->disabled); | 1210 | disabled = atomic_inc_return(&data->disabled); | 
| 1271 | if (likely(disabled == 1)) { | 1211 | if (likely(disabled == 1)) { | 
| 1272 | pc = preempt_count(); | 1212 | pc = preempt_count(); | 
| 1273 | __trace_graph_return(tr, data, trace, flags, pc); | 1213 | __trace_graph_return(tr, trace, flags, pc); | 
| 1274 | } | 1214 | } | 
| 1275 | if (!trace->depth) | 1215 | if (!trace->depth) | 
| 1276 | clear_tsk_trace_graph(current); | 1216 | clear_tsk_trace_graph(current); | 
| @@ -1279,30 +1219,122 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
| 1279 | } | 1219 | } | 
| 1280 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1220 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 
| 1281 | 1221 | ||
| 1282 | static struct ftrace_ops trace_ops __read_mostly = | ||
| 1283 | { | ||
| 1284 | .func = function_trace_call, | ||
| 1285 | }; | ||
| 1286 | 1222 | ||
| 1287 | void tracing_start_function_trace(void) | 1223 | /** | 
| 1224 | * trace_vbprintk - write binary msg to tracing buffer | ||
| 1225 | * | ||
| 1226 | */ | ||
| 1227 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | ||
| 1288 | { | 1228 | { | 
| 1289 | ftrace_function_enabled = 0; | 1229 | static raw_spinlock_t trace_buf_lock = | 
| 1230 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
| 1231 | static u32 trace_buf[TRACE_BUF_SIZE]; | ||
| 1290 | 1232 | ||
| 1291 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | 1233 | struct ring_buffer_event *event; | 
| 1292 | trace_ops.func = function_trace_call_preempt_only; | 1234 | struct trace_array *tr = &global_trace; | 
| 1293 | else | 1235 | struct trace_array_cpu *data; | 
| 1294 | trace_ops.func = function_trace_call; | 1236 | struct bprint_entry *entry; | 
| 1237 | unsigned long flags; | ||
| 1238 | int resched; | ||
| 1239 | int cpu, len = 0, size, pc; | ||
| 1240 | |||
| 1241 | if (unlikely(tracing_selftest_running || tracing_disabled)) | ||
| 1242 | return 0; | ||
| 1243 | |||
| 1244 | /* Don't pollute graph traces with trace_vprintk internals */ | ||
| 1245 | pause_graph_tracing(); | ||
| 1246 | |||
| 1247 | pc = preempt_count(); | ||
| 1248 | resched = ftrace_preempt_disable(); | ||
| 1249 | cpu = raw_smp_processor_id(); | ||
| 1250 | data = tr->data[cpu]; | ||
| 1251 | |||
| 1252 | if (unlikely(atomic_read(&data->disabled))) | ||
| 1253 | goto out; | ||
| 1254 | |||
| 1255 | /* Lockdep uses trace_printk for lock tracing */ | ||
| 1256 | local_irq_save(flags); | ||
| 1257 | __raw_spin_lock(&trace_buf_lock); | ||
| 1258 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
| 1259 | |||
| 1260 | if (len > TRACE_BUF_SIZE || len < 0) | ||
| 1261 | goto out_unlock; | ||
| 1262 | |||
| 1263 | size = sizeof(*entry) + sizeof(u32) * len; | ||
| 1264 | event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); | ||
| 1265 | if (!event) | ||
| 1266 | goto out_unlock; | ||
| 1267 | entry = ring_buffer_event_data(event); | ||
| 1268 | entry->ip = ip; | ||
| 1269 | entry->fmt = fmt; | ||
| 1270 | |||
| 1271 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | ||
| 1272 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 1273 | |||
| 1274 | out_unlock: | ||
| 1275 | __raw_spin_unlock(&trace_buf_lock); | ||
| 1276 | local_irq_restore(flags); | ||
| 1277 | |||
| 1278 | out: | ||
| 1279 | ftrace_preempt_enable(resched); | ||
| 1280 | unpause_graph_tracing(); | ||
| 1295 | 1281 | ||
| 1296 | register_ftrace_function(&trace_ops); | 1282 | return len; | 
| 1297 | ftrace_function_enabled = 1; | ||
| 1298 | } | 1283 | } | 
| 1284 | EXPORT_SYMBOL_GPL(trace_vbprintk); | ||
| 1299 | 1285 | ||
| 1300 | void tracing_stop_function_trace(void) | 1286 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 
| 1301 | { | 1287 | { | 
| 1302 | ftrace_function_enabled = 0; | 1288 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 
| 1303 | unregister_ftrace_function(&trace_ops); | 1289 | static char trace_buf[TRACE_BUF_SIZE]; | 
| 1290 | |||
| 1291 | struct ring_buffer_event *event; | ||
| 1292 | struct trace_array *tr = &global_trace; | ||
| 1293 | struct trace_array_cpu *data; | ||
| 1294 | int cpu, len = 0, size, pc; | ||
| 1295 | struct print_entry *entry; | ||
| 1296 | unsigned long irq_flags; | ||
| 1297 | |||
| 1298 | if (tracing_disabled || tracing_selftest_running) | ||
| 1299 | return 0; | ||
| 1300 | |||
| 1301 | pc = preempt_count(); | ||
| 1302 | preempt_disable_notrace(); | ||
| 1303 | cpu = raw_smp_processor_id(); | ||
| 1304 | data = tr->data[cpu]; | ||
| 1305 | |||
| 1306 | if (unlikely(atomic_read(&data->disabled))) | ||
| 1307 | goto out; | ||
| 1308 | |||
| 1309 | pause_graph_tracing(); | ||
| 1310 | raw_local_irq_save(irq_flags); | ||
| 1311 | __raw_spin_lock(&trace_buf_lock); | ||
| 1312 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
| 1313 | |||
| 1314 | len = min(len, TRACE_BUF_SIZE-1); | ||
| 1315 | trace_buf[len] = 0; | ||
| 1316 | |||
| 1317 | size = sizeof(*entry) + len + 1; | ||
| 1318 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | ||
| 1319 | if (!event) | ||
| 1320 | goto out_unlock; | ||
| 1321 | entry = ring_buffer_event_data(event); | ||
| 1322 | entry->ip = ip; | ||
| 1323 | |||
| 1324 | memcpy(&entry->buf, trace_buf, len); | ||
| 1325 | entry->buf[len] = 0; | ||
| 1326 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 1327 | |||
| 1328 | out_unlock: | ||
| 1329 | __raw_spin_unlock(&trace_buf_lock); | ||
| 1330 | raw_local_irq_restore(irq_flags); | ||
| 1331 | unpause_graph_tracing(); | ||
| 1332 | out: | ||
| 1333 | preempt_enable_notrace(); | ||
| 1334 | |||
| 1335 | return len; | ||
| 1304 | } | 1336 | } | 
| 1305 | #endif | 1337 | EXPORT_SYMBOL_GPL(trace_vprintk); | 
| 1306 | 1338 | ||
| 1307 | enum trace_file_type { | 1339 | enum trace_file_type { | 
| 1308 | TRACE_FILE_LAT_FMT = 1, | 1340 | TRACE_FILE_LAT_FMT = 1, | 
| @@ -1345,10 +1377,25 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
| 1345 | { | 1377 | { | 
| 1346 | struct ring_buffer *buffer = iter->tr->buffer; | 1378 | struct ring_buffer *buffer = iter->tr->buffer; | 
| 1347 | struct trace_entry *ent, *next = NULL; | 1379 | struct trace_entry *ent, *next = NULL; | 
| 1380 | int cpu_file = iter->cpu_file; | ||
| 1348 | u64 next_ts = 0, ts; | 1381 | u64 next_ts = 0, ts; | 
| 1349 | int next_cpu = -1; | 1382 | int next_cpu = -1; | 
| 1350 | int cpu; | 1383 | int cpu; | 
| 1351 | 1384 | ||
| 1385 | /* | ||
| 1386 | * If we are in a per_cpu trace file, don't bother by iterating over | ||
| 1387 | * all cpu and peek directly. | ||
| 1388 | */ | ||
| 1389 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | ||
| 1390 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | ||
| 1391 | return NULL; | ||
| 1392 | ent = peek_next_entry(iter, cpu_file, ent_ts); | ||
| 1393 | if (ent_cpu) | ||
| 1394 | *ent_cpu = cpu_file; | ||
| 1395 | |||
| 1396 | return ent; | ||
| 1397 | } | ||
| 1398 | |||
| 1352 | for_each_tracing_cpu(cpu) { | 1399 | for_each_tracing_cpu(cpu) { | 
| 1353 | 1400 | ||
| 1354 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1401 | if (ring_buffer_empty_cpu(buffer, cpu)) | 
| @@ -1376,8 +1423,8 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
| 1376 | } | 1423 | } | 
| 1377 | 1424 | ||
| 1378 | /* Find the next real entry, without updating the iterator itself */ | 1425 | /* Find the next real entry, without updating the iterator itself */ | 
| 1379 | static struct trace_entry * | 1426 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 
| 1380 | find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1427 | int *ent_cpu, u64 *ent_ts) | 
| 1381 | { | 1428 | { | 
| 1382 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1429 | return __find_next_entry(iter, ent_cpu, ent_ts); | 
| 1383 | } | 1430 | } | 
| @@ -1426,19 +1473,32 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1426 | return ent; | 1473 | return ent; | 
| 1427 | } | 1474 | } | 
| 1428 | 1475 | ||
| 1476 | /* | ||
| 1477 | * No necessary locking here. The worst thing which can | ||
| 1478 | * happen is loosing events consumed at the same time | ||
| 1479 | * by a trace_pipe reader. | ||
| 1480 | * Other than that, we don't risk to crash the ring buffer | ||
| 1481 | * because it serializes the readers. | ||
| 1482 | * | ||
| 1483 | * The current tracer is copied to avoid a global locking | ||
| 1484 | * all around. | ||
| 1485 | */ | ||
| 1429 | static void *s_start(struct seq_file *m, loff_t *pos) | 1486 | static void *s_start(struct seq_file *m, loff_t *pos) | 
| 1430 | { | 1487 | { | 
| 1431 | struct trace_iterator *iter = m->private; | 1488 | struct trace_iterator *iter = m->private; | 
| 1489 | static struct tracer *old_tracer; | ||
| 1490 | int cpu_file = iter->cpu_file; | ||
| 1432 | void *p = NULL; | 1491 | void *p = NULL; | 
| 1433 | loff_t l = 0; | 1492 | loff_t l = 0; | 
| 1434 | int cpu; | 1493 | int cpu; | 
| 1435 | 1494 | ||
| 1495 | /* copy the tracer to avoid using a global lock all around */ | ||
| 1436 | mutex_lock(&trace_types_lock); | 1496 | mutex_lock(&trace_types_lock); | 
| 1437 | 1497 | if (unlikely(old_tracer != current_trace && current_trace)) { | |
| 1438 | if (!current_trace || current_trace != iter->trace) { | 1498 | old_tracer = current_trace; | 
| 1439 | mutex_unlock(&trace_types_lock); | 1499 | *iter->trace = *current_trace; | 
| 1440 | return NULL; | ||
| 1441 | } | 1500 | } | 
| 1501 | mutex_unlock(&trace_types_lock); | ||
| 1442 | 1502 | ||
| 1443 | atomic_inc(&trace_record_cmdline_disabled); | 1503 | atomic_inc(&trace_record_cmdline_disabled); | 
| 1444 | 1504 | ||
| @@ -1449,9 +1509,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1449 | 1509 | ||
| 1450 | ftrace_disable_cpu(); | 1510 | ftrace_disable_cpu(); | 
| 1451 | 1511 | ||
| 1452 | for_each_tracing_cpu(cpu) { | 1512 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 
| 1453 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | 1513 | for_each_tracing_cpu(cpu) | 
| 1454 | } | 1514 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | 
| 1515 | } else | ||
| 1516 | ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); | ||
| 1517 | |||
| 1455 | 1518 | ||
| 1456 | ftrace_enable_cpu(); | 1519 | ftrace_enable_cpu(); | 
| 1457 | 1520 | ||
| @@ -1469,155 +1532,6 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1469 | static void s_stop(struct seq_file *m, void *p) | 1532 | static void s_stop(struct seq_file *m, void *p) | 
| 1470 | { | 1533 | { | 
| 1471 | atomic_dec(&trace_record_cmdline_disabled); | 1534 | atomic_dec(&trace_record_cmdline_disabled); | 
| 1472 | mutex_unlock(&trace_types_lock); | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | #ifdef CONFIG_KRETPROBES | ||
| 1476 | static inline const char *kretprobed(const char *name) | ||
| 1477 | { | ||
| 1478 | static const char tramp_name[] = "kretprobe_trampoline"; | ||
| 1479 | int size = sizeof(tramp_name); | ||
| 1480 | |||
| 1481 | if (strncmp(tramp_name, name, size) == 0) | ||
| 1482 | return "[unknown/kretprobe'd]"; | ||
| 1483 | return name; | ||
| 1484 | } | ||
| 1485 | #else | ||
| 1486 | static inline const char *kretprobed(const char *name) | ||
| 1487 | { | ||
| 1488 | return name; | ||
| 1489 | } | ||
| 1490 | #endif /* CONFIG_KRETPROBES */ | ||
| 1491 | |||
| 1492 | static int | ||
| 1493 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | ||
| 1494 | { | ||
| 1495 | #ifdef CONFIG_KALLSYMS | ||
| 1496 | char str[KSYM_SYMBOL_LEN]; | ||
| 1497 | const char *name; | ||
| 1498 | |||
| 1499 | kallsyms_lookup(address, NULL, NULL, NULL, str); | ||
| 1500 | |||
| 1501 | name = kretprobed(str); | ||
| 1502 | |||
| 1503 | return trace_seq_printf(s, fmt, name); | ||
| 1504 | #endif | ||
| 1505 | return 1; | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | static int | ||
| 1509 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | ||
| 1510 | unsigned long address) | ||
| 1511 | { | ||
| 1512 | #ifdef CONFIG_KALLSYMS | ||
| 1513 | char str[KSYM_SYMBOL_LEN]; | ||
| 1514 | const char *name; | ||
| 1515 | |||
| 1516 | sprint_symbol(str, address); | ||
| 1517 | name = kretprobed(str); | ||
| 1518 | |||
| 1519 | return trace_seq_printf(s, fmt, name); | ||
| 1520 | #endif | ||
| 1521 | return 1; | ||
| 1522 | } | ||
| 1523 | |||
| 1524 | #ifndef CONFIG_64BIT | ||
| 1525 | # define IP_FMT "%08lx" | ||
| 1526 | #else | ||
| 1527 | # define IP_FMT "%016lx" | ||
| 1528 | #endif | ||
| 1529 | |||
| 1530 | int | ||
| 1531 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | ||
| 1532 | { | ||
| 1533 | int ret; | ||
| 1534 | |||
| 1535 | if (!ip) | ||
| 1536 | return trace_seq_printf(s, "0"); | ||
| 1537 | |||
| 1538 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | ||
| 1539 | ret = seq_print_sym_offset(s, "%s", ip); | ||
| 1540 | else | ||
| 1541 | ret = seq_print_sym_short(s, "%s", ip); | ||
| 1542 | |||
| 1543 | if (!ret) | ||
| 1544 | return 0; | ||
| 1545 | |||
| 1546 | if (sym_flags & TRACE_ITER_SYM_ADDR) | ||
| 1547 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
| 1548 | return ret; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
| 1552 | unsigned long ip, unsigned long sym_flags) | ||
| 1553 | { | ||
| 1554 | struct file *file = NULL; | ||
| 1555 | unsigned long vmstart = 0; | ||
| 1556 | int ret = 1; | ||
| 1557 | |||
| 1558 | if (mm) { | ||
| 1559 | const struct vm_area_struct *vma; | ||
| 1560 | |||
| 1561 | down_read(&mm->mmap_sem); | ||
| 1562 | vma = find_vma(mm, ip); | ||
| 1563 | if (vma) { | ||
| 1564 | file = vma->vm_file; | ||
| 1565 | vmstart = vma->vm_start; | ||
| 1566 | } | ||
| 1567 | if (file) { | ||
| 1568 | ret = trace_seq_path(s, &file->f_path); | ||
| 1569 | if (ret) | ||
| 1570 | ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart); | ||
| 1571 | } | ||
| 1572 | up_read(&mm->mmap_sem); | ||
| 1573 | } | ||
| 1574 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | ||
| 1575 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
| 1576 | return ret; | ||
| 1577 | } | ||
| 1578 | |||
| 1579 | static int | ||
| 1580 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
| 1581 | unsigned long sym_flags) | ||
| 1582 | { | ||
| 1583 | struct mm_struct *mm = NULL; | ||
| 1584 | int ret = 1; | ||
| 1585 | unsigned int i; | ||
| 1586 | |||
| 1587 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | ||
| 1588 | struct task_struct *task; | ||
| 1589 | /* | ||
| 1590 | * we do the lookup on the thread group leader, | ||
| 1591 | * since individual threads might have already quit! | ||
| 1592 | */ | ||
| 1593 | rcu_read_lock(); | ||
| 1594 | task = find_task_by_vpid(entry->ent.tgid); | ||
| 1595 | if (task) | ||
| 1596 | mm = get_task_mm(task); | ||
| 1597 | rcu_read_unlock(); | ||
| 1598 | } | ||
| 1599 | |||
| 1600 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
| 1601 | unsigned long ip = entry->caller[i]; | ||
| 1602 | |||
| 1603 | if (ip == ULONG_MAX || !ret) | ||
| 1604 | break; | ||
| 1605 | if (i && ret) | ||
| 1606 | ret = trace_seq_puts(s, " <- "); | ||
| 1607 | if (!ip) { | ||
| 1608 | if (ret) | ||
| 1609 | ret = trace_seq_puts(s, "??"); | ||
| 1610 | continue; | ||
| 1611 | } | ||
| 1612 | if (!ret) | ||
| 1613 | break; | ||
| 1614 | if (ret) | ||
| 1615 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
| 1616 | } | ||
| 1617 | |||
| 1618 | if (mm) | ||
| 1619 | mmput(mm); | ||
| 1620 | return ret; | ||
| 1621 | } | 1535 | } | 
| 1622 | 1536 | ||
| 1623 | static void print_lat_help_header(struct seq_file *m) | 1537 | static void print_lat_help_header(struct seq_file *m) | 
| @@ -1658,11 +1572,11 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
| 1658 | total = entries + | 1572 | total = entries + | 
| 1659 | ring_buffer_overruns(iter->tr->buffer); | 1573 | ring_buffer_overruns(iter->tr->buffer); | 
| 1660 | 1574 | ||
| 1661 | seq_printf(m, "%s latency trace v1.1.5 on %s\n", | 1575 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 
| 1662 | name, UTS_RELEASE); | 1576 | name, UTS_RELEASE); | 
| 1663 | seq_puts(m, "-----------------------------------" | 1577 | seq_puts(m, "# -----------------------------------" | 
| 1664 | "---------------------------------\n"); | 1578 | "---------------------------------\n"); | 
| 1665 | seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |" | 1579 | seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" | 
| 1666 | " (M:%s VP:%d, KP:%d, SP:%d HP:%d", | 1580 | " (M:%s VP:%d, KP:%d, SP:%d HP:%d", | 
| 1667 | nsecs_to_usecs(data->saved_latency), | 1581 | nsecs_to_usecs(data->saved_latency), | 
| 1668 | entries, | 1582 | entries, | 
| @@ -1684,121 +1598,24 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
| 1684 | #else | 1598 | #else | 
| 1685 | seq_puts(m, ")\n"); | 1599 | seq_puts(m, ")\n"); | 
| 1686 | #endif | 1600 | #endif | 
| 1687 | seq_puts(m, " -----------------\n"); | 1601 | seq_puts(m, "# -----------------\n"); | 
| 1688 | seq_printf(m, " | task: %.16s-%d " | 1602 | seq_printf(m, "# | task: %.16s-%d " | 
| 1689 | "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", | 1603 | "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", | 
| 1690 | data->comm, data->pid, data->uid, data->nice, | 1604 | data->comm, data->pid, data->uid, data->nice, | 
| 1691 | data->policy, data->rt_priority); | 1605 | data->policy, data->rt_priority); | 
| 1692 | seq_puts(m, " -----------------\n"); | 1606 | seq_puts(m, "# -----------------\n"); | 
| 1693 | 1607 | ||
| 1694 | if (data->critical_start) { | 1608 | if (data->critical_start) { | 
| 1695 | seq_puts(m, " => started at: "); | 1609 | seq_puts(m, "# => started at: "); | 
| 1696 | seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); | 1610 | seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); | 
| 1697 | trace_print_seq(m, &iter->seq); | 1611 | trace_print_seq(m, &iter->seq); | 
| 1698 | seq_puts(m, "\n => ended at: "); | 1612 | seq_puts(m, "\n# => ended at: "); | 
| 1699 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 1613 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 
| 1700 | trace_print_seq(m, &iter->seq); | 1614 | trace_print_seq(m, &iter->seq); | 
| 1701 | seq_puts(m, "\n"); | 1615 | seq_puts(m, "#\n"); | 
| 1702 | } | ||
| 1703 | |||
| 1704 | seq_puts(m, "\n"); | ||
| 1705 | } | ||
| 1706 | |||
| 1707 | static void | ||
| 1708 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | ||
| 1709 | { | ||
| 1710 | int hardirq, softirq; | ||
| 1711 | char *comm; | ||
| 1712 | |||
| 1713 | comm = trace_find_cmdline(entry->pid); | ||
| 1714 | |||
| 1715 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); | ||
| 1716 | trace_seq_printf(s, "%3d", cpu); | ||
| 1717 | trace_seq_printf(s, "%c%c", | ||
| 1718 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | ||
| 1719 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.', | ||
| 1720 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); | ||
| 1721 | |||
| 1722 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | ||
| 1723 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | ||
| 1724 | if (hardirq && softirq) { | ||
| 1725 | trace_seq_putc(s, 'H'); | ||
| 1726 | } else { | ||
| 1727 | if (hardirq) { | ||
| 1728 | trace_seq_putc(s, 'h'); | ||
| 1729 | } else { | ||
| 1730 | if (softirq) | ||
| 1731 | trace_seq_putc(s, 's'); | ||
| 1732 | else | ||
| 1733 | trace_seq_putc(s, '.'); | ||
| 1734 | } | ||
| 1735 | } | ||
| 1736 | |||
| 1737 | if (entry->preempt_count) | ||
| 1738 | trace_seq_printf(s, "%x", entry->preempt_count); | ||
| 1739 | else | ||
| 1740 | trace_seq_puts(s, "."); | ||
| 1741 | } | ||
| 1742 | |||
| 1743 | unsigned long preempt_mark_thresh = 100; | ||
| 1744 | |||
| 1745 | static void | ||
| 1746 | lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, | ||
| 1747 | unsigned long rel_usecs) | ||
| 1748 | { | ||
| 1749 | trace_seq_printf(s, " %4lldus", abs_usecs); | ||
| 1750 | if (rel_usecs > preempt_mark_thresh) | ||
| 1751 | trace_seq_puts(s, "!: "); | ||
| 1752 | else if (rel_usecs > 1) | ||
| 1753 | trace_seq_puts(s, "+: "); | ||
| 1754 | else | ||
| 1755 | trace_seq_puts(s, " : "); | ||
| 1756 | } | ||
| 1757 | |||
| 1758 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | ||
| 1759 | |||
| 1760 | static int task_state_char(unsigned long state) | ||
| 1761 | { | ||
| 1762 | int bit = state ? __ffs(state) + 1 : 0; | ||
| 1763 | |||
| 1764 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | /* | ||
| 1768 | * The message is supposed to contain an ending newline. | ||
| 1769 | * If the printing stops prematurely, try to add a newline of our own. | ||
| 1770 | */ | ||
| 1771 | void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | ||
| 1772 | { | ||
| 1773 | struct trace_entry *ent; | ||
| 1774 | struct trace_field_cont *cont; | ||
| 1775 | bool ok = true; | ||
| 1776 | |||
| 1777 | ent = peek_next_entry(iter, iter->cpu, NULL); | ||
| 1778 | if (!ent || ent->type != TRACE_CONT) { | ||
| 1779 | trace_seq_putc(s, '\n'); | ||
| 1780 | return; | ||
| 1781 | } | 1616 | } | 
| 1782 | 1617 | ||
| 1783 | do { | 1618 | seq_puts(m, "#\n"); | 
| 1784 | cont = (struct trace_field_cont *)ent; | ||
| 1785 | if (ok) | ||
| 1786 | ok = (trace_seq_printf(s, "%s", cont->buf) > 0); | ||
| 1787 | |||
| 1788 | ftrace_disable_cpu(); | ||
| 1789 | |||
| 1790 | if (iter->buffer_iter[iter->cpu]) | ||
| 1791 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
| 1792 | else | ||
| 1793 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
| 1794 | |||
| 1795 | ftrace_enable_cpu(); | ||
| 1796 | |||
| 1797 | ent = peek_next_entry(iter, iter->cpu, NULL); | ||
| 1798 | } while (ent && ent->type == TRACE_CONT); | ||
| 1799 | |||
| 1800 | if (!ok) | ||
| 1801 | trace_seq_putc(s, '\n'); | ||
| 1802 | } | 1619 | } | 
| 1803 | 1620 | ||
| 1804 | static void test_cpu_buff_start(struct trace_iterator *iter) | 1621 | static void test_cpu_buff_start(struct trace_iterator *iter) | 
| @@ -1818,472 +1635,89 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
| 1818 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | 1635 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | 
| 1819 | } | 1636 | } | 
| 1820 | 1637 | ||
| 1821 | static enum print_line_t | ||
| 1822 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | ||
| 1823 | { | ||
| 1824 | struct trace_seq *s = &iter->seq; | ||
| 1825 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | ||
| 1826 | struct trace_entry *next_entry; | ||
| 1827 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); | ||
| 1828 | struct trace_entry *entry = iter->ent; | ||
| 1829 | unsigned long abs_usecs; | ||
| 1830 | unsigned long rel_usecs; | ||
| 1831 | u64 next_ts; | ||
| 1832 | char *comm; | ||
| 1833 | int S, T; | ||
| 1834 | int i; | ||
| 1835 | |||
| 1836 | if (entry->type == TRACE_CONT) | ||
| 1837 | return TRACE_TYPE_HANDLED; | ||
| 1838 | |||
| 1839 | test_cpu_buff_start(iter); | ||
| 1840 | |||
| 1841 | next_entry = find_next_entry(iter, NULL, &next_ts); | ||
| 1842 | if (!next_entry) | ||
| 1843 | next_ts = iter->ts; | ||
| 1844 | rel_usecs = ns2usecs(next_ts - iter->ts); | ||
| 1845 | abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); | ||
| 1846 | |||
| 1847 | if (verbose) { | ||
| 1848 | comm = trace_find_cmdline(entry->pid); | ||
| 1849 | trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" | ||
| 1850 | " %ld.%03ldms (+%ld.%03ldms): ", | ||
| 1851 | comm, | ||
| 1852 | entry->pid, cpu, entry->flags, | ||
| 1853 | entry->preempt_count, trace_idx, | ||
| 1854 | ns2usecs(iter->ts), | ||
| 1855 | abs_usecs/1000, | ||
| 1856 | abs_usecs % 1000, rel_usecs/1000, | ||
| 1857 | rel_usecs % 1000); | ||
| 1858 | } else { | ||
| 1859 | lat_print_generic(s, entry, cpu); | ||
| 1860 | lat_print_timestamp(s, abs_usecs, rel_usecs); | ||
| 1861 | } | ||
| 1862 | switch (entry->type) { | ||
| 1863 | case TRACE_FN: { | ||
| 1864 | struct ftrace_entry *field; | ||
| 1865 | |||
| 1866 | trace_assign_type(field, entry); | ||
| 1867 | |||
| 1868 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
| 1869 | trace_seq_puts(s, " ("); | ||
| 1870 | seq_print_ip_sym(s, field->parent_ip, sym_flags); | ||
| 1871 | trace_seq_puts(s, ")\n"); | ||
| 1872 | break; | ||
| 1873 | } | ||
| 1874 | case TRACE_CTX: | ||
| 1875 | case TRACE_WAKE: { | ||
| 1876 | struct ctx_switch_entry *field; | ||
| 1877 | |||
| 1878 | trace_assign_type(field, entry); | ||
| 1879 | |||
| 1880 | T = task_state_char(field->next_state); | ||
| 1881 | S = task_state_char(field->prev_state); | ||
| 1882 | comm = trace_find_cmdline(field->next_pid); | ||
| 1883 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | ||
| 1884 | field->prev_pid, | ||
| 1885 | field->prev_prio, | ||
| 1886 | S, entry->type == TRACE_CTX ? "==>" : " +", | ||
| 1887 | field->next_cpu, | ||
| 1888 | field->next_pid, | ||
| 1889 | field->next_prio, | ||
| 1890 | T, comm); | ||
| 1891 | break; | ||
| 1892 | } | ||
| 1893 | case TRACE_SPECIAL: { | ||
| 1894 | struct special_entry *field; | ||
| 1895 | |||
| 1896 | trace_assign_type(field, entry); | ||
| 1897 | |||
| 1898 | trace_seq_printf(s, "# %ld %ld %ld\n", | ||
| 1899 | field->arg1, | ||
| 1900 | field->arg2, | ||
| 1901 | field->arg3); | ||
| 1902 | break; | ||
| 1903 | } | ||
| 1904 | case TRACE_STACK: { | ||
| 1905 | struct stack_entry *field; | ||
| 1906 | |||
| 1907 | trace_assign_type(field, entry); | ||
| 1908 | |||
| 1909 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
| 1910 | if (i) | ||
| 1911 | trace_seq_puts(s, " <= "); | ||
| 1912 | seq_print_ip_sym(s, field->caller[i], sym_flags); | ||
| 1913 | } | ||
| 1914 | trace_seq_puts(s, "\n"); | ||
| 1915 | break; | ||
| 1916 | } | ||
| 1917 | case TRACE_PRINT: { | ||
| 1918 | struct print_entry *field; | ||
| 1919 | |||
| 1920 | trace_assign_type(field, entry); | ||
| 1921 | |||
| 1922 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
| 1923 | trace_seq_printf(s, ": %s", field->buf); | ||
| 1924 | if (entry->flags & TRACE_FLAG_CONT) | ||
| 1925 | trace_seq_print_cont(s, iter); | ||
| 1926 | break; | ||
| 1927 | } | ||
| 1928 | case TRACE_BRANCH: { | ||
| 1929 | struct trace_branch *field; | ||
| 1930 | |||
| 1931 | trace_assign_type(field, entry); | ||
| 1932 | |||
| 1933 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
| 1934 | field->correct ? " ok " : " MISS ", | ||
| 1935 | field->func, | ||
| 1936 | field->file, | ||
| 1937 | field->line); | ||
| 1938 | break; | ||
| 1939 | } | ||
| 1940 | case TRACE_USER_STACK: { | ||
| 1941 | struct userstack_entry *field; | ||
| 1942 | |||
| 1943 | trace_assign_type(field, entry); | ||
| 1944 | |||
| 1945 | seq_print_userip_objs(field, s, sym_flags); | ||
| 1946 | trace_seq_putc(s, '\n'); | ||
| 1947 | break; | ||
| 1948 | } | ||
| 1949 | default: | ||
| 1950 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | ||
| 1951 | } | ||
| 1952 | return TRACE_TYPE_HANDLED; | ||
| 1953 | } | ||
| 1954 | |||
| 1955 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | 1638 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | 
| 1956 | { | 1639 | { | 
| 1957 | struct trace_seq *s = &iter->seq; | 1640 | struct trace_seq *s = &iter->seq; | 
| 1958 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1641 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 
| 1959 | struct trace_entry *entry; | 1642 | struct trace_entry *entry; | 
| 1960 | unsigned long usec_rem; | 1643 | struct trace_event *event; | 
| 1961 | unsigned long long t; | ||
| 1962 | unsigned long secs; | ||
| 1963 | char *comm; | ||
| 1964 | int ret; | ||
| 1965 | int S, T; | ||
| 1966 | int i; | ||
| 1967 | 1644 | ||
| 1968 | entry = iter->ent; | 1645 | entry = iter->ent; | 
| 1969 | 1646 | ||
| 1970 | if (entry->type == TRACE_CONT) | ||
| 1971 | return TRACE_TYPE_HANDLED; | ||
| 1972 | |||
| 1973 | test_cpu_buff_start(iter); | 1647 | test_cpu_buff_start(iter); | 
| 1974 | 1648 | ||
| 1975 | comm = trace_find_cmdline(iter->ent->pid); | 1649 | event = ftrace_find_event(entry->type); | 
| 1976 | |||
| 1977 | t = ns2usecs(iter->ts); | ||
| 1978 | usec_rem = do_div(t, 1000000ULL); | ||
| 1979 | secs = (unsigned long)t; | ||
| 1980 | |||
| 1981 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); | ||
| 1982 | if (!ret) | ||
| 1983 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1984 | ret = trace_seq_printf(s, "[%03d] ", iter->cpu); | ||
| 1985 | if (!ret) | ||
| 1986 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1987 | ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); | ||
| 1988 | if (!ret) | ||
| 1989 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1990 | |||
| 1991 | switch (entry->type) { | ||
| 1992 | case TRACE_FN: { | ||
| 1993 | struct ftrace_entry *field; | ||
| 1994 | |||
| 1995 | trace_assign_type(field, entry); | ||
| 1996 | |||
| 1997 | ret = seq_print_ip_sym(s, field->ip, sym_flags); | ||
| 1998 | if (!ret) | ||
| 1999 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2000 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && | ||
| 2001 | field->parent_ip) { | ||
| 2002 | ret = trace_seq_printf(s, " <-"); | ||
| 2003 | if (!ret) | ||
| 2004 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2005 | ret = seq_print_ip_sym(s, | ||
| 2006 | field->parent_ip, | ||
| 2007 | sym_flags); | ||
| 2008 | if (!ret) | ||
| 2009 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2010 | } | ||
| 2011 | ret = trace_seq_printf(s, "\n"); | ||
| 2012 | if (!ret) | ||
| 2013 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2014 | break; | ||
| 2015 | } | ||
| 2016 | case TRACE_CTX: | ||
| 2017 | case TRACE_WAKE: { | ||
| 2018 | struct ctx_switch_entry *field; | ||
| 2019 | |||
| 2020 | trace_assign_type(field, entry); | ||
| 2021 | |||
| 2022 | T = task_state_char(field->next_state); | ||
| 2023 | S = task_state_char(field->prev_state); | ||
| 2024 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", | ||
| 2025 | field->prev_pid, | ||
| 2026 | field->prev_prio, | ||
| 2027 | S, | ||
| 2028 | entry->type == TRACE_CTX ? "==>" : " +", | ||
| 2029 | field->next_cpu, | ||
| 2030 | field->next_pid, | ||
| 2031 | field->next_prio, | ||
| 2032 | T); | ||
| 2033 | if (!ret) | ||
| 2034 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2035 | break; | ||
| 2036 | } | ||
| 2037 | case TRACE_SPECIAL: { | ||
| 2038 | struct special_entry *field; | ||
| 2039 | |||
| 2040 | trace_assign_type(field, entry); | ||
| 2041 | 1650 | ||
| 2042 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | 1651 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| 2043 | field->arg1, | 1652 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 
| 2044 | field->arg2, | 1653 | if (!trace_print_lat_context(iter)) | 
| 2045 | field->arg3); | 1654 | goto partial; | 
| 2046 | if (!ret) | 1655 | } else { | 
| 2047 | return TRACE_TYPE_PARTIAL_LINE; | 1656 | if (!trace_print_context(iter)) | 
| 2048 | break; | 1657 | goto partial; | 
| 2049 | } | ||
| 2050 | case TRACE_STACK: { | ||
| 2051 | struct stack_entry *field; | ||
| 2052 | |||
| 2053 | trace_assign_type(field, entry); | ||
| 2054 | |||
| 2055 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
| 2056 | if (i) { | ||
| 2057 | ret = trace_seq_puts(s, " <= "); | ||
| 2058 | if (!ret) | ||
| 2059 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2060 | } | ||
| 2061 | ret = seq_print_ip_sym(s, field->caller[i], | ||
| 2062 | sym_flags); | ||
| 2063 | if (!ret) | ||
| 2064 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2065 | } | 1658 | } | 
| 2066 | ret = trace_seq_puts(s, "\n"); | ||
| 2067 | if (!ret) | ||
| 2068 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2069 | break; | ||
| 2070 | } | ||
| 2071 | case TRACE_PRINT: { | ||
| 2072 | struct print_entry *field; | ||
| 2073 | |||
| 2074 | trace_assign_type(field, entry); | ||
| 2075 | |||
| 2076 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
| 2077 | trace_seq_printf(s, ": %s", field->buf); | ||
| 2078 | if (entry->flags & TRACE_FLAG_CONT) | ||
| 2079 | trace_seq_print_cont(s, iter); | ||
| 2080 | break; | ||
| 2081 | } | ||
| 2082 | case TRACE_GRAPH_RET: { | ||
| 2083 | return print_graph_function(iter); | ||
| 2084 | } | ||
| 2085 | case TRACE_GRAPH_ENT: { | ||
| 2086 | return print_graph_function(iter); | ||
| 2087 | } | 1659 | } | 
| 2088 | case TRACE_BRANCH: { | ||
| 2089 | struct trace_branch *field; | ||
| 2090 | 1660 | ||
| 2091 | trace_assign_type(field, entry); | 1661 | if (event) | 
| 1662 | return event->trace(iter, sym_flags); | ||
| 2092 | 1663 | ||
| 2093 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | 1664 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 
| 2094 | field->correct ? " ok " : " MISS ", | 1665 | goto partial; | 
| 2095 | field->func, | ||
| 2096 | field->file, | ||
| 2097 | field->line); | ||
| 2098 | break; | ||
| 2099 | } | ||
| 2100 | case TRACE_USER_STACK: { | ||
| 2101 | struct userstack_entry *field; | ||
| 2102 | |||
| 2103 | trace_assign_type(field, entry); | ||
| 2104 | 1666 | ||
| 2105 | ret = seq_print_userip_objs(field, s, sym_flags); | ||
| 2106 | if (!ret) | ||
| 2107 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2108 | ret = trace_seq_putc(s, '\n'); | ||
| 2109 | if (!ret) | ||
| 2110 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2111 | break; | ||
| 2112 | } | ||
| 2113 | } | ||
| 2114 | return TRACE_TYPE_HANDLED; | 1667 | return TRACE_TYPE_HANDLED; | 
| 1668 | partial: | ||
| 1669 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2115 | } | 1670 | } | 
| 2116 | 1671 | ||
| 2117 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 1672 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 
| 2118 | { | 1673 | { | 
| 2119 | struct trace_seq *s = &iter->seq; | 1674 | struct trace_seq *s = &iter->seq; | 
| 2120 | struct trace_entry *entry; | 1675 | struct trace_entry *entry; | 
| 2121 | int ret; | 1676 | struct trace_event *event; | 
| 2122 | int S, T; | ||
| 2123 | 1677 | ||
| 2124 | entry = iter->ent; | 1678 | entry = iter->ent; | 
| 2125 | 1679 | ||
| 2126 | if (entry->type == TRACE_CONT) | 1680 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| 2127 | return TRACE_TYPE_HANDLED; | 1681 | if (!trace_seq_printf(s, "%d %d %llu ", | 
| 2128 | 1682 | entry->pid, iter->cpu, iter->ts)) | |
| 2129 | ret = trace_seq_printf(s, "%d %d %llu ", | 1683 | goto partial; | 
| 2130 | entry->pid, iter->cpu, iter->ts); | ||
| 2131 | if (!ret) | ||
| 2132 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2133 | |||
| 2134 | switch (entry->type) { | ||
| 2135 | case TRACE_FN: { | ||
| 2136 | struct ftrace_entry *field; | ||
| 2137 | |||
| 2138 | trace_assign_type(field, entry); | ||
| 2139 | |||
| 2140 | ret = trace_seq_printf(s, "%x %x\n", | ||
| 2141 | field->ip, | ||
| 2142 | field->parent_ip); | ||
| 2143 | if (!ret) | ||
| 2144 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2145 | break; | ||
| 2146 | } | ||
| 2147 | case TRACE_CTX: | ||
| 2148 | case TRACE_WAKE: { | ||
| 2149 | struct ctx_switch_entry *field; | ||
| 2150 | |||
| 2151 | trace_assign_type(field, entry); | ||
| 2152 | |||
| 2153 | T = task_state_char(field->next_state); | ||
| 2154 | S = entry->type == TRACE_WAKE ? '+' : | ||
| 2155 | task_state_char(field->prev_state); | ||
| 2156 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", | ||
| 2157 | field->prev_pid, | ||
| 2158 | field->prev_prio, | ||
| 2159 | S, | ||
| 2160 | field->next_cpu, | ||
| 2161 | field->next_pid, | ||
| 2162 | field->next_prio, | ||
| 2163 | T); | ||
| 2164 | if (!ret) | ||
| 2165 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2166 | break; | ||
| 2167 | } | 1684 | } | 
| 2168 | case TRACE_SPECIAL: | ||
| 2169 | case TRACE_USER_STACK: | ||
| 2170 | case TRACE_STACK: { | ||
| 2171 | struct special_entry *field; | ||
| 2172 | 1685 | ||
| 2173 | trace_assign_type(field, entry); | 1686 | event = ftrace_find_event(entry->type); | 
| 1687 | if (event) | ||
| 1688 | return event->raw(iter, 0); | ||
| 2174 | 1689 | ||
| 2175 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | 1690 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 
| 2176 | field->arg1, | 1691 | goto partial; | 
| 2177 | field->arg2, | ||
| 2178 | field->arg3); | ||
| 2179 | if (!ret) | ||
| 2180 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2181 | break; | ||
| 2182 | } | ||
| 2183 | case TRACE_PRINT: { | ||
| 2184 | struct print_entry *field; | ||
| 2185 | |||
| 2186 | trace_assign_type(field, entry); | ||
| 2187 | 1692 | ||
| 2188 | trace_seq_printf(s, "# %lx %s", field->ip, field->buf); | ||
| 2189 | if (entry->flags & TRACE_FLAG_CONT) | ||
| 2190 | trace_seq_print_cont(s, iter); | ||
| 2191 | break; | ||
| 2192 | } | ||
| 2193 | } | ||
| 2194 | return TRACE_TYPE_HANDLED; | 1693 | return TRACE_TYPE_HANDLED; | 
| 1694 | partial: | ||
| 1695 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2195 | } | 1696 | } | 
| 2196 | 1697 | ||
| 2197 | #define SEQ_PUT_FIELD_RET(s, x) \ | ||
| 2198 | do { \ | ||
| 2199 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | ||
| 2200 | return 0; \ | ||
| 2201 | } while (0) | ||
| 2202 | |||
| 2203 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | ||
| 2204 | do { \ | ||
| 2205 | BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ | ||
| 2206 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | ||
| 2207 | return 0; \ | ||
| 2208 | } while (0) | ||
| 2209 | |||
| 2210 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 1698 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 
| 2211 | { | 1699 | { | 
| 2212 | struct trace_seq *s = &iter->seq; | 1700 | struct trace_seq *s = &iter->seq; | 
| 2213 | unsigned char newline = '\n'; | 1701 | unsigned char newline = '\n'; | 
| 2214 | struct trace_entry *entry; | 1702 | struct trace_entry *entry; | 
| 2215 | int S, T; | 1703 | struct trace_event *event; | 
| 2216 | 1704 | ||
| 2217 | entry = iter->ent; | 1705 | entry = iter->ent; | 
| 2218 | 1706 | ||
| 2219 | if (entry->type == TRACE_CONT) | 1707 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| 2220 | return TRACE_TYPE_HANDLED; | 1708 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 
| 2221 | 1709 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | |
| 2222 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 1710 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | 
| 2223 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | ||
| 2224 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | ||
| 2225 | |||
| 2226 | switch (entry->type) { | ||
| 2227 | case TRACE_FN: { | ||
| 2228 | struct ftrace_entry *field; | ||
| 2229 | |||
| 2230 | trace_assign_type(field, entry); | ||
| 2231 | |||
| 2232 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | ||
| 2233 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | ||
| 2234 | break; | ||
| 2235 | } | ||
| 2236 | case TRACE_CTX: | ||
| 2237 | case TRACE_WAKE: { | ||
| 2238 | struct ctx_switch_entry *field; | ||
| 2239 | |||
| 2240 | trace_assign_type(field, entry); | ||
| 2241 | |||
| 2242 | T = task_state_char(field->next_state); | ||
| 2243 | S = entry->type == TRACE_WAKE ? '+' : | ||
| 2244 | task_state_char(field->prev_state); | ||
| 2245 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | ||
| 2246 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | ||
| 2247 | SEQ_PUT_HEX_FIELD_RET(s, S); | ||
| 2248 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); | ||
| 2249 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); | ||
| 2250 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); | ||
| 2251 | SEQ_PUT_HEX_FIELD_RET(s, T); | ||
| 2252 | break; | ||
| 2253 | } | 1711 | } | 
| 2254 | case TRACE_SPECIAL: | ||
| 2255 | case TRACE_USER_STACK: | ||
| 2256 | case TRACE_STACK: { | ||
| 2257 | struct special_entry *field; | ||
| 2258 | 1712 | ||
| 2259 | trace_assign_type(field, entry); | 1713 | event = ftrace_find_event(entry->type); | 
| 2260 | 1714 | if (event) { | |
| 2261 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | 1715 | enum print_line_t ret = event->hex(iter, 0); | 
| 2262 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | 1716 | if (ret != TRACE_TYPE_HANDLED) | 
| 2263 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | 1717 | return ret; | 
| 2264 | break; | ||
| 2265 | } | ||
| 2266 | } | 1718 | } | 
| 2267 | SEQ_PUT_FIELD_RET(s, newline); | ||
| 2268 | |||
| 2269 | return TRACE_TYPE_HANDLED; | ||
| 2270 | } | ||
| 2271 | |||
| 2272 | static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | ||
| 2273 | { | ||
| 2274 | struct trace_seq *s = &iter->seq; | ||
| 2275 | struct trace_entry *entry = iter->ent; | ||
| 2276 | struct print_entry *field; | ||
| 2277 | int ret; | ||
| 2278 | |||
| 2279 | trace_assign_type(field, entry); | ||
| 2280 | |||
| 2281 | ret = trace_seq_printf(s, field->buf); | ||
| 2282 | if (!ret) | ||
| 2283 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2284 | 1719 | ||
| 2285 | if (entry->flags & TRACE_FLAG_CONT) | 1720 | SEQ_PUT_FIELD_RET(s, newline); | 
| 2286 | trace_seq_print_cont(s, iter); | ||
| 2287 | 1721 | ||
| 2288 | return TRACE_TYPE_HANDLED; | 1722 | return TRACE_TYPE_HANDLED; | 
| 2289 | } | 1723 | } | 
| @@ -2292,59 +1726,37 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
| 2292 | { | 1726 | { | 
| 2293 | struct trace_seq *s = &iter->seq; | 1727 | struct trace_seq *s = &iter->seq; | 
| 2294 | struct trace_entry *entry; | 1728 | struct trace_entry *entry; | 
| 1729 | struct trace_event *event; | ||
| 2295 | 1730 | ||
| 2296 | entry = iter->ent; | 1731 | entry = iter->ent; | 
| 2297 | 1732 | ||
| 2298 | if (entry->type == TRACE_CONT) | 1733 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 
| 2299 | return TRACE_TYPE_HANDLED; | 1734 | SEQ_PUT_FIELD_RET(s, entry->pid); | 
| 2300 | 1735 | SEQ_PUT_FIELD_RET(s, iter->cpu); | |
| 2301 | SEQ_PUT_FIELD_RET(s, entry->pid); | 1736 | SEQ_PUT_FIELD_RET(s, iter->ts); | 
| 2302 | SEQ_PUT_FIELD_RET(s, entry->cpu); | ||
| 2303 | SEQ_PUT_FIELD_RET(s, iter->ts); | ||
| 2304 | |||
| 2305 | switch (entry->type) { | ||
| 2306 | case TRACE_FN: { | ||
| 2307 | struct ftrace_entry *field; | ||
| 2308 | |||
| 2309 | trace_assign_type(field, entry); | ||
| 2310 | |||
| 2311 | SEQ_PUT_FIELD_RET(s, field->ip); | ||
| 2312 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | ||
| 2313 | break; | ||
| 2314 | } | ||
| 2315 | case TRACE_CTX: { | ||
| 2316 | struct ctx_switch_entry *field; | ||
| 2317 | |||
| 2318 | trace_assign_type(field, entry); | ||
| 2319 | |||
| 2320 | SEQ_PUT_FIELD_RET(s, field->prev_pid); | ||
| 2321 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | ||
| 2322 | SEQ_PUT_FIELD_RET(s, field->prev_state); | ||
| 2323 | SEQ_PUT_FIELD_RET(s, field->next_pid); | ||
| 2324 | SEQ_PUT_FIELD_RET(s, field->next_prio); | ||
| 2325 | SEQ_PUT_FIELD_RET(s, field->next_state); | ||
| 2326 | break; | ||
| 2327 | } | 1737 | } | 
| 2328 | case TRACE_SPECIAL: | ||
| 2329 | case TRACE_USER_STACK: | ||
| 2330 | case TRACE_STACK: { | ||
| 2331 | struct special_entry *field; | ||
| 2332 | 1738 | ||
| 2333 | trace_assign_type(field, entry); | 1739 | event = ftrace_find_event(entry->type); | 
| 2334 | 1740 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; | |
| 2335 | SEQ_PUT_FIELD_RET(s, field->arg1); | ||
| 2336 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
| 2337 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
| 2338 | break; | ||
| 2339 | } | ||
| 2340 | } | ||
| 2341 | return 1; | ||
| 2342 | } | 1741 | } | 
| 2343 | 1742 | ||
| 2344 | static int trace_empty(struct trace_iterator *iter) | 1743 | static int trace_empty(struct trace_iterator *iter) | 
| 2345 | { | 1744 | { | 
| 2346 | int cpu; | 1745 | int cpu; | 
| 2347 | 1746 | ||
| 1747 | /* If we are looking at one CPU buffer, only check that one */ | ||
| 1748 | if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { | ||
| 1749 | cpu = iter->cpu_file; | ||
| 1750 | if (iter->buffer_iter[cpu]) { | ||
| 1751 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | ||
| 1752 | return 0; | ||
| 1753 | } else { | ||
| 1754 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | ||
| 1755 | return 0; | ||
| 1756 | } | ||
| 1757 | return 1; | ||
| 1758 | } | ||
| 1759 | |||
| 2348 | for_each_tracing_cpu(cpu) { | 1760 | for_each_tracing_cpu(cpu) { | 
| 2349 | if (iter->buffer_iter[cpu]) { | 1761 | if (iter->buffer_iter[cpu]) { | 
| 2350 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 1762 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | 
| @@ -2368,10 +1780,15 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 2368 | return ret; | 1780 | return ret; | 
| 2369 | } | 1781 | } | 
| 2370 | 1782 | ||
| 1783 | if (iter->ent->type == TRACE_BPRINT && | ||
| 1784 | trace_flags & TRACE_ITER_PRINTK && | ||
| 1785 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | ||
| 1786 | return trace_print_bprintk_msg_only(iter); | ||
| 1787 | |||
| 2371 | if (iter->ent->type == TRACE_PRINT && | 1788 | if (iter->ent->type == TRACE_PRINT && | 
| 2372 | trace_flags & TRACE_ITER_PRINTK && | 1789 | trace_flags & TRACE_ITER_PRINTK && | 
| 2373 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 1790 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | 
| 2374 | return print_printk_msg_only(iter); | 1791 | return trace_print_printk_msg_only(iter); | 
| 2375 | 1792 | ||
| 2376 | if (trace_flags & TRACE_ITER_BIN) | 1793 | if (trace_flags & TRACE_ITER_BIN) | 
| 2377 | return print_bin_fmt(iter); | 1794 | return print_bin_fmt(iter); | 
| @@ -2382,9 +1799,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 2382 | if (trace_flags & TRACE_ITER_RAW) | 1799 | if (trace_flags & TRACE_ITER_RAW) | 
| 2383 | return print_raw_fmt(iter); | 1800 | return print_raw_fmt(iter); | 
| 2384 | 1801 | ||
| 2385 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) | ||
| 2386 | return print_lat_fmt(iter, iter->idx, iter->cpu); | ||
| 2387 | |||
| 2388 | return print_trace_fmt(iter); | 1802 | return print_trace_fmt(iter); | 
| 2389 | } | 1803 | } | 
| 2390 | 1804 | ||
| @@ -2426,30 +1840,40 @@ static struct seq_operations tracer_seq_ops = { | |||
| 2426 | }; | 1840 | }; | 
| 2427 | 1841 | ||
| 2428 | static struct trace_iterator * | 1842 | static struct trace_iterator * | 
| 2429 | __tracing_open(struct inode *inode, struct file *file, int *ret) | 1843 | __tracing_open(struct inode *inode, struct file *file) | 
| 2430 | { | 1844 | { | 
| 1845 | long cpu_file = (long) inode->i_private; | ||
| 1846 | void *fail_ret = ERR_PTR(-ENOMEM); | ||
| 2431 | struct trace_iterator *iter; | 1847 | struct trace_iterator *iter; | 
| 2432 | struct seq_file *m; | 1848 | struct seq_file *m; | 
| 2433 | int cpu; | 1849 | int cpu, ret; | 
| 2434 | 1850 | ||
| 2435 | if (tracing_disabled) { | 1851 | if (tracing_disabled) | 
| 2436 | *ret = -ENODEV; | 1852 | return ERR_PTR(-ENODEV); | 
| 2437 | return NULL; | ||
| 2438 | } | ||
| 2439 | 1853 | ||
| 2440 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 1854 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
| 2441 | if (!iter) { | 1855 | if (!iter) | 
| 2442 | *ret = -ENOMEM; | 1856 | return ERR_PTR(-ENOMEM); | 
| 2443 | goto out; | ||
| 2444 | } | ||
| 2445 | 1857 | ||
| 1858 | /* | ||
| 1859 | * We make a copy of the current tracer to avoid concurrent | ||
| 1860 | * changes on it while we are reading. | ||
| 1861 | */ | ||
| 2446 | mutex_lock(&trace_types_lock); | 1862 | mutex_lock(&trace_types_lock); | 
| 1863 | iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); | ||
| 1864 | if (!iter->trace) | ||
| 1865 | goto fail; | ||
| 1866 | |||
| 1867 | if (current_trace) | ||
| 1868 | *iter->trace = *current_trace; | ||
| 1869 | |||
| 2447 | if (current_trace && current_trace->print_max) | 1870 | if (current_trace && current_trace->print_max) | 
| 2448 | iter->tr = &max_tr; | 1871 | iter->tr = &max_tr; | 
| 2449 | else | 1872 | else | 
| 2450 | iter->tr = inode->i_private; | 1873 | iter->tr = &global_trace; | 
| 2451 | iter->trace = current_trace; | ||
| 2452 | iter->pos = -1; | 1874 | iter->pos = -1; | 
| 1875 | mutex_init(&iter->mutex); | ||
| 1876 | iter->cpu_file = cpu_file; | ||
| 2453 | 1877 | ||
| 2454 | /* Notify the tracer early; before we stop tracing. */ | 1878 | /* Notify the tracer early; before we stop tracing. */ | 
| 2455 | if (iter->trace && iter->trace->open) | 1879 | if (iter->trace && iter->trace->open) | 
| @@ -2459,20 +1883,24 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
| 2459 | if (ring_buffer_overruns(iter->tr->buffer)) | 1883 | if (ring_buffer_overruns(iter->tr->buffer)) | 
| 2460 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 1884 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 
| 2461 | 1885 | ||
| 1886 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | ||
| 1887 | for_each_tracing_cpu(cpu) { | ||
| 2462 | 1888 | ||
| 2463 | for_each_tracing_cpu(cpu) { | 1889 | iter->buffer_iter[cpu] = | 
| 2464 | 1890 | ring_buffer_read_start(iter->tr->buffer, cpu); | |
| 1891 | } | ||
| 1892 | } else { | ||
| 1893 | cpu = iter->cpu_file; | ||
| 2465 | iter->buffer_iter[cpu] = | 1894 | iter->buffer_iter[cpu] = | 
| 2466 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1895 | ring_buffer_read_start(iter->tr->buffer, cpu); | 
| 2467 | |||
| 2468 | if (!iter->buffer_iter[cpu]) | ||
| 2469 | goto fail_buffer; | ||
| 2470 | } | 1896 | } | 
| 2471 | 1897 | ||
| 2472 | /* TODO stop tracer */ | 1898 | /* TODO stop tracer */ | 
| 2473 | *ret = seq_open(file, &tracer_seq_ops); | 1899 | ret = seq_open(file, &tracer_seq_ops); | 
| 2474 | if (*ret) | 1900 | if (ret < 0) { | 
| 1901 | fail_ret = ERR_PTR(ret); | ||
| 2475 | goto fail_buffer; | 1902 | goto fail_buffer; | 
| 1903 | } | ||
| 2476 | 1904 | ||
| 2477 | m = file->private_data; | 1905 | m = file->private_data; | 
| 2478 | m->private = iter; | 1906 | m->private = iter; | 
| @@ -2482,7 +1910,6 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
| 2482 | 1910 | ||
| 2483 | mutex_unlock(&trace_types_lock); | 1911 | mutex_unlock(&trace_types_lock); | 
| 2484 | 1912 | ||
| 2485 | out: | ||
| 2486 | return iter; | 1913 | return iter; | 
| 2487 | 1914 | ||
| 2488 | fail_buffer: | 1915 | fail_buffer: | 
| @@ -2490,10 +1917,12 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
| 2490 | if (iter->buffer_iter[cpu]) | 1917 | if (iter->buffer_iter[cpu]) | 
| 2491 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 1918 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 
| 2492 | } | 1919 | } | 
| 1920 | fail: | ||
| 2493 | mutex_unlock(&trace_types_lock); | 1921 | mutex_unlock(&trace_types_lock); | 
| 1922 | kfree(iter->trace); | ||
| 2494 | kfree(iter); | 1923 | kfree(iter); | 
| 2495 | 1924 | ||
| 2496 | return ERR_PTR(-ENOMEM); | 1925 | return fail_ret; | 
| 2497 | } | 1926 | } | 
| 2498 | 1927 | ||
| 2499 | int tracing_open_generic(struct inode *inode, struct file *filp) | 1928 | int tracing_open_generic(struct inode *inode, struct file *filp) | 
| @@ -2505,12 +1934,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp) | |||
| 2505 | return 0; | 1934 | return 0; | 
| 2506 | } | 1935 | } | 
| 2507 | 1936 | ||
| 2508 | int tracing_release(struct inode *inode, struct file *file) | 1937 | static int tracing_release(struct inode *inode, struct file *file) | 
| 2509 | { | 1938 | { | 
| 2510 | struct seq_file *m = (struct seq_file *)file->private_data; | 1939 | struct seq_file *m = (struct seq_file *)file->private_data; | 
| 2511 | struct trace_iterator *iter = m->private; | 1940 | struct trace_iterator *iter; | 
| 2512 | int cpu; | 1941 | int cpu; | 
| 2513 | 1942 | ||
| 1943 | if (!(file->f_mode & FMODE_READ)) | ||
| 1944 | return 0; | ||
| 1945 | |||
| 1946 | iter = m->private; | ||
| 1947 | |||
| 2514 | mutex_lock(&trace_types_lock); | 1948 | mutex_lock(&trace_types_lock); | 
| 2515 | for_each_tracing_cpu(cpu) { | 1949 | for_each_tracing_cpu(cpu) { | 
| 2516 | if (iter->buffer_iter[cpu]) | 1950 | if (iter->buffer_iter[cpu]) | 
| @@ -2525,33 +1959,38 @@ int tracing_release(struct inode *inode, struct file *file) | |||
| 2525 | mutex_unlock(&trace_types_lock); | 1959 | mutex_unlock(&trace_types_lock); | 
| 2526 | 1960 | ||
| 2527 | seq_release(inode, file); | 1961 | seq_release(inode, file); | 
| 1962 | mutex_destroy(&iter->mutex); | ||
| 1963 | kfree(iter->trace); | ||
| 2528 | kfree(iter); | 1964 | kfree(iter); | 
| 2529 | return 0; | 1965 | return 0; | 
| 2530 | } | 1966 | } | 
| 2531 | 1967 | ||
| 2532 | static int tracing_open(struct inode *inode, struct file *file) | 1968 | static int tracing_open(struct inode *inode, struct file *file) | 
| 2533 | { | 1969 | { | 
| 2534 | int ret; | ||
| 2535 | |||
| 2536 | __tracing_open(inode, file, &ret); | ||
| 2537 | |||
| 2538 | return ret; | ||
| 2539 | } | ||
| 2540 | |||
| 2541 | static int tracing_lt_open(struct inode *inode, struct file *file) | ||
| 2542 | { | ||
| 2543 | struct trace_iterator *iter; | 1970 | struct trace_iterator *iter; | 
| 2544 | int ret; | 1971 | int ret = 0; | 
| 2545 | 1972 | ||
| 2546 | iter = __tracing_open(inode, file, &ret); | 1973 | /* If this file was open for write, then erase contents */ | 
| 1974 | if ((file->f_mode & FMODE_WRITE) && | ||
| 1975 | !(file->f_flags & O_APPEND)) { | ||
| 1976 | long cpu = (long) inode->i_private; | ||
| 2547 | 1977 | ||
| 2548 | if (!ret) | 1978 | if (cpu == TRACE_PIPE_ALL_CPU) | 
| 2549 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 1979 | tracing_reset_online_cpus(&global_trace); | 
| 1980 | else | ||
| 1981 | tracing_reset(&global_trace, cpu); | ||
| 1982 | } | ||
| 2550 | 1983 | ||
| 1984 | if (file->f_mode & FMODE_READ) { | ||
| 1985 | iter = __tracing_open(inode, file); | ||
| 1986 | if (IS_ERR(iter)) | ||
| 1987 | ret = PTR_ERR(iter); | ||
| 1988 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
| 1989 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | ||
| 1990 | } | ||
| 2551 | return ret; | 1991 | return ret; | 
| 2552 | } | 1992 | } | 
| 2553 | 1993 | ||
| 2554 | |||
| 2555 | static void * | 1994 | static void * | 
| 2556 | t_next(struct seq_file *m, void *v, loff_t *pos) | 1995 | t_next(struct seq_file *m, void *v, loff_t *pos) | 
| 2557 | { | 1996 | { | 
| @@ -2623,21 +2062,22 @@ static int show_traces_open(struct inode *inode, struct file *file) | |||
| 2623 | return ret; | 2062 | return ret; | 
| 2624 | } | 2063 | } | 
| 2625 | 2064 | ||
| 2626 | static struct file_operations tracing_fops = { | 2065 | static ssize_t | 
| 2627 | .open = tracing_open, | 2066 | tracing_write_stub(struct file *filp, const char __user *ubuf, | 
| 2628 | .read = seq_read, | 2067 | size_t count, loff_t *ppos) | 
| 2629 | .llseek = seq_lseek, | 2068 | { | 
| 2630 | .release = tracing_release, | 2069 | return count; | 
| 2631 | }; | 2070 | } | 
| 2632 | 2071 | ||
| 2633 | static struct file_operations tracing_lt_fops = { | 2072 | static const struct file_operations tracing_fops = { | 
| 2634 | .open = tracing_lt_open, | 2073 | .open = tracing_open, | 
| 2635 | .read = seq_read, | 2074 | .read = seq_read, | 
| 2075 | .write = tracing_write_stub, | ||
| 2636 | .llseek = seq_lseek, | 2076 | .llseek = seq_lseek, | 
| 2637 | .release = tracing_release, | 2077 | .release = tracing_release, | 
| 2638 | }; | 2078 | }; | 
| 2639 | 2079 | ||
| 2640 | static struct file_operations show_traces_fops = { | 2080 | static const struct file_operations show_traces_fops = { | 
| 2641 | .open = show_traces_open, | 2081 | .open = show_traces_open, | 
| 2642 | .read = seq_read, | 2082 | .read = seq_read, | 
| 2643 | .release = seq_release, | 2083 | .release = seq_release, | 
| @@ -2730,7 +2170,7 @@ err_unlock: | |||
| 2730 | return err; | 2170 | return err; | 
| 2731 | } | 2171 | } | 
| 2732 | 2172 | ||
| 2733 | static struct file_operations tracing_cpumask_fops = { | 2173 | static const struct file_operations tracing_cpumask_fops = { | 
| 2734 | .open = tracing_open_generic, | 2174 | .open = tracing_open_generic, | 
| 2735 | .read = tracing_cpumask_read, | 2175 | .read = tracing_cpumask_read, | 
| 2736 | .write = tracing_cpumask_write, | 2176 | .write = tracing_cpumask_write, | 
| @@ -2740,57 +2180,62 @@ static ssize_t | |||
| 2740 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | 2180 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | 
| 2741 | size_t cnt, loff_t *ppos) | 2181 | size_t cnt, loff_t *ppos) | 
| 2742 | { | 2182 | { | 
| 2743 | int i; | 2183 | struct tracer_opt *trace_opts; | 
| 2184 | u32 tracer_flags; | ||
| 2185 | int len = 0; | ||
| 2744 | char *buf; | 2186 | char *buf; | 
| 2745 | int r = 0; | 2187 | int r = 0; | 
| 2746 | int len = 0; | 2188 | int i; | 
| 2747 | u32 tracer_flags = current_trace->flags->val; | ||
| 2748 | struct tracer_opt *trace_opts = current_trace->flags->opts; | ||
| 2749 | 2189 | ||
| 2750 | 2190 | ||
| 2751 | /* calulate max size */ | 2191 | /* calculate max size */ | 
| 2752 | for (i = 0; trace_options[i]; i++) { | 2192 | for (i = 0; trace_options[i]; i++) { | 
| 2753 | len += strlen(trace_options[i]); | 2193 | len += strlen(trace_options[i]); | 
| 2754 | len += 3; /* "no" and space */ | 2194 | len += 3; /* "no" and newline */ | 
| 2755 | } | 2195 | } | 
| 2756 | 2196 | ||
| 2197 | mutex_lock(&trace_types_lock); | ||
| 2198 | tracer_flags = current_trace->flags->val; | ||
| 2199 | trace_opts = current_trace->flags->opts; | ||
| 2200 | |||
| 2757 | /* | 2201 | /* | 
| 2758 | * Increase the size with names of options specific | 2202 | * Increase the size with names of options specific | 
| 2759 | * of the current tracer. | 2203 | * of the current tracer. | 
| 2760 | */ | 2204 | */ | 
| 2761 | for (i = 0; trace_opts[i].name; i++) { | 2205 | for (i = 0; trace_opts[i].name; i++) { | 
| 2762 | len += strlen(trace_opts[i].name); | 2206 | len += strlen(trace_opts[i].name); | 
| 2763 | len += 3; /* "no" and space */ | 2207 | len += 3; /* "no" and newline */ | 
| 2764 | } | 2208 | } | 
| 2765 | 2209 | ||
| 2766 | /* +2 for \n and \0 */ | 2210 | /* +2 for \n and \0 */ | 
| 2767 | buf = kmalloc(len + 2, GFP_KERNEL); | 2211 | buf = kmalloc(len + 2, GFP_KERNEL); | 
| 2768 | if (!buf) | 2212 | if (!buf) { | 
| 2213 | mutex_unlock(&trace_types_lock); | ||
| 2769 | return -ENOMEM; | 2214 | return -ENOMEM; | 
| 2215 | } | ||
| 2770 | 2216 | ||
| 2771 | for (i = 0; trace_options[i]; i++) { | 2217 | for (i = 0; trace_options[i]; i++) { | 
| 2772 | if (trace_flags & (1 << i)) | 2218 | if (trace_flags & (1 << i)) | 
| 2773 | r += sprintf(buf + r, "%s ", trace_options[i]); | 2219 | r += sprintf(buf + r, "%s\n", trace_options[i]); | 
| 2774 | else | 2220 | else | 
| 2775 | r += sprintf(buf + r, "no%s ", trace_options[i]); | 2221 | r += sprintf(buf + r, "no%s\n", trace_options[i]); | 
| 2776 | } | 2222 | } | 
| 2777 | 2223 | ||
| 2778 | for (i = 0; trace_opts[i].name; i++) { | 2224 | for (i = 0; trace_opts[i].name; i++) { | 
| 2779 | if (tracer_flags & trace_opts[i].bit) | 2225 | if (tracer_flags & trace_opts[i].bit) | 
| 2780 | r += sprintf(buf + r, "%s ", | 2226 | r += sprintf(buf + r, "%s\n", | 
| 2781 | trace_opts[i].name); | 2227 | trace_opts[i].name); | 
| 2782 | else | 2228 | else | 
| 2783 | r += sprintf(buf + r, "no%s ", | 2229 | r += sprintf(buf + r, "no%s\n", | 
| 2784 | trace_opts[i].name); | 2230 | trace_opts[i].name); | 
| 2785 | } | 2231 | } | 
| 2232 | mutex_unlock(&trace_types_lock); | ||
| 2786 | 2233 | ||
| 2787 | r += sprintf(buf + r, "\n"); | ||
| 2788 | WARN_ON(r >= len + 2); | 2234 | WARN_ON(r >= len + 2); | 
| 2789 | 2235 | ||
| 2790 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2236 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| 2791 | 2237 | ||
| 2792 | kfree(buf); | 2238 | kfree(buf); | 
| 2793 | |||
| 2794 | return r; | 2239 | return r; | 
| 2795 | } | 2240 | } | 
| 2796 | 2241 | ||
| @@ -2828,6 +2273,34 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 2828 | return 0; | 2273 | return 0; | 
| 2829 | } | 2274 | } | 
| 2830 | 2275 | ||
| 2276 | static void set_tracer_flags(unsigned int mask, int enabled) | ||
| 2277 | { | ||
| 2278 | /* do nothing if flag is already set */ | ||
| 2279 | if (!!(trace_flags & mask) == !!enabled) | ||
| 2280 | return; | ||
| 2281 | |||
| 2282 | if (enabled) | ||
| 2283 | trace_flags |= mask; | ||
| 2284 | else | ||
| 2285 | trace_flags &= ~mask; | ||
| 2286 | |||
| 2287 | if (mask == TRACE_ITER_GLOBAL_CLK) { | ||
| 2288 | u64 (*func)(void); | ||
| 2289 | |||
| 2290 | if (enabled) | ||
| 2291 | func = trace_clock_global; | ||
| 2292 | else | ||
| 2293 | func = trace_clock_local; | ||
| 2294 | |||
| 2295 | mutex_lock(&trace_types_lock); | ||
| 2296 | ring_buffer_set_clock(global_trace.buffer, func); | ||
| 2297 | |||
| 2298 | if (max_tr.buffer) | ||
| 2299 | ring_buffer_set_clock(max_tr.buffer, func); | ||
| 2300 | mutex_unlock(&trace_types_lock); | ||
| 2301 | } | ||
| 2302 | } | ||
| 2303 | |||
| 2831 | static ssize_t | 2304 | static ssize_t | 
| 2832 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 2305 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, | 
| 2833 | size_t cnt, loff_t *ppos) | 2306 | size_t cnt, loff_t *ppos) | 
| @@ -2855,17 +2328,16 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2855 | int len = strlen(trace_options[i]); | 2328 | int len = strlen(trace_options[i]); | 
| 2856 | 2329 | ||
| 2857 | if (strncmp(cmp, trace_options[i], len) == 0) { | 2330 | if (strncmp(cmp, trace_options[i], len) == 0) { | 
| 2858 | if (neg) | 2331 | set_tracer_flags(1 << i, !neg); | 
| 2859 | trace_flags &= ~(1 << i); | ||
| 2860 | else | ||
| 2861 | trace_flags |= (1 << i); | ||
| 2862 | break; | 2332 | break; | 
| 2863 | } | 2333 | } | 
| 2864 | } | 2334 | } | 
| 2865 | 2335 | ||
| 2866 | /* If no option could be set, test the specific tracer options */ | 2336 | /* If no option could be set, test the specific tracer options */ | 
| 2867 | if (!trace_options[i]) { | 2337 | if (!trace_options[i]) { | 
| 2338 | mutex_lock(&trace_types_lock); | ||
| 2868 | ret = set_tracer_option(current_trace, cmp, neg); | 2339 | ret = set_tracer_option(current_trace, cmp, neg); | 
| 2340 | mutex_unlock(&trace_types_lock); | ||
| 2869 | if (ret) | 2341 | if (ret) | 
| 2870 | return ret; | 2342 | return ret; | 
| 2871 | } | 2343 | } | 
| @@ -2875,7 +2347,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2875 | return cnt; | 2347 | return cnt; | 
| 2876 | } | 2348 | } | 
| 2877 | 2349 | ||
| 2878 | static struct file_operations tracing_iter_fops = { | 2350 | static const struct file_operations tracing_iter_fops = { | 
| 2879 | .open = tracing_open_generic, | 2351 | .open = tracing_open_generic, | 
| 2880 | .read = tracing_trace_options_read, | 2352 | .read = tracing_trace_options_read, | 
| 2881 | .write = tracing_trace_options_write, | 2353 | .write = tracing_trace_options_write, | 
| @@ -2908,7 +2380,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf, | |||
| 2908 | readme_msg, strlen(readme_msg)); | 2380 | readme_msg, strlen(readme_msg)); | 
| 2909 | } | 2381 | } | 
| 2910 | 2382 | ||
| 2911 | static struct file_operations tracing_readme_fops = { | 2383 | static const struct file_operations tracing_readme_fops = { | 
| 2912 | .open = tracing_open_generic, | 2384 | .open = tracing_open_generic, | 
| 2913 | .read = tracing_readme_read, | 2385 | .read = tracing_readme_read, | 
| 2914 | }; | 2386 | }; | 
| @@ -2930,7 +2402,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
| 2930 | { | 2402 | { | 
| 2931 | struct trace_array *tr = filp->private_data; | 2403 | struct trace_array *tr = filp->private_data; | 
| 2932 | char buf[64]; | 2404 | char buf[64]; | 
| 2933 | long val; | 2405 | unsigned long val; | 
| 2934 | int ret; | 2406 | int ret; | 
| 2935 | 2407 | ||
| 2936 | if (cnt >= sizeof(buf)) | 2408 | if (cnt >= sizeof(buf)) | 
| @@ -2985,13 +2457,105 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
| 2985 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2457 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| 2986 | } | 2458 | } | 
| 2987 | 2459 | ||
| 2988 | static int tracing_set_tracer(char *buf) | 2460 | int tracer_init(struct tracer *t, struct trace_array *tr) | 
| 2461 | { | ||
| 2462 | tracing_reset_online_cpus(tr); | ||
| 2463 | return t->init(tr); | ||
| 2464 | } | ||
| 2465 | |||
| 2466 | static int tracing_resize_ring_buffer(unsigned long size) | ||
| 2467 | { | ||
| 2468 | int ret; | ||
| 2469 | |||
| 2470 | /* | ||
| 2471 | * If kernel or user changes the size of the ring buffer | ||
| 2472 | * we use the size that was given, and we can forget about | ||
| 2473 | * expanding it later. | ||
| 2474 | */ | ||
| 2475 | ring_buffer_expanded = 1; | ||
| 2476 | |||
| 2477 | ret = ring_buffer_resize(global_trace.buffer, size); | ||
| 2478 | if (ret < 0) | ||
| 2479 | return ret; | ||
| 2480 | |||
| 2481 | ret = ring_buffer_resize(max_tr.buffer, size); | ||
| 2482 | if (ret < 0) { | ||
| 2483 | int r; | ||
| 2484 | |||
| 2485 | r = ring_buffer_resize(global_trace.buffer, | ||
| 2486 | global_trace.entries); | ||
| 2487 | if (r < 0) { | ||
| 2488 | /* | ||
| 2489 | * AARGH! We are left with different | ||
| 2490 | * size max buffer!!!! | ||
| 2491 | * The max buffer is our "snapshot" buffer. | ||
| 2492 | * When a tracer needs a snapshot (one of the | ||
| 2493 | * latency tracers), it swaps the max buffer | ||
| 2494 | * with the saved snap shot. We succeeded to | ||
| 2495 | * update the size of the main buffer, but failed to | ||
| 2496 | * update the size of the max buffer. But when we tried | ||
| 2497 | * to reset the main buffer to the original size, we | ||
| 2498 | * failed there too. This is very unlikely to | ||
| 2499 | * happen, but if it does, warn and kill all | ||
| 2500 | * tracing. | ||
| 2501 | */ | ||
| 2502 | WARN_ON(1); | ||
| 2503 | tracing_disabled = 1; | ||
| 2504 | } | ||
| 2505 | return ret; | ||
| 2506 | } | ||
| 2507 | |||
| 2508 | global_trace.entries = size; | ||
| 2509 | |||
| 2510 | return ret; | ||
| 2511 | } | ||
| 2512 | |||
| 2513 | /** | ||
| 2514 | * tracing_update_buffers - used by tracing facility to expand ring buffers | ||
| 2515 | * | ||
| 2516 | * To save on memory when the tracing is never used on a system with it | ||
| 2517 | * configured in. The ring buffers are set to a minimum size. But once | ||
| 2518 | * a user starts to use the tracing facility, then they need to grow | ||
| 2519 | * to their default size. | ||
| 2520 | * | ||
| 2521 | * This function is to be called when a tracer is about to be used. | ||
| 2522 | */ | ||
| 2523 | int tracing_update_buffers(void) | ||
| 2524 | { | ||
| 2525 | int ret = 0; | ||
| 2526 | |||
| 2527 | mutex_lock(&trace_types_lock); | ||
| 2528 | if (!ring_buffer_expanded) | ||
| 2529 | ret = tracing_resize_ring_buffer(trace_buf_size); | ||
| 2530 | mutex_unlock(&trace_types_lock); | ||
| 2531 | |||
| 2532 | return ret; | ||
| 2533 | } | ||
| 2534 | |||
| 2535 | struct trace_option_dentry; | ||
| 2536 | |||
| 2537 | static struct trace_option_dentry * | ||
| 2538 | create_trace_option_files(struct tracer *tracer); | ||
| 2539 | |||
| 2540 | static void | ||
| 2541 | destroy_trace_option_files(struct trace_option_dentry *topts); | ||
| 2542 | |||
| 2543 | static int tracing_set_tracer(const char *buf) | ||
| 2989 | { | 2544 | { | 
| 2545 | static struct trace_option_dentry *topts; | ||
| 2990 | struct trace_array *tr = &global_trace; | 2546 | struct trace_array *tr = &global_trace; | 
| 2991 | struct tracer *t; | 2547 | struct tracer *t; | 
| 2992 | int ret = 0; | 2548 | int ret = 0; | 
| 2993 | 2549 | ||
| 2994 | mutex_lock(&trace_types_lock); | 2550 | mutex_lock(&trace_types_lock); | 
| 2551 | |||
| 2552 | if (!ring_buffer_expanded) { | ||
| 2553 | ret = tracing_resize_ring_buffer(trace_buf_size); | ||
| 2554 | if (ret < 0) | ||
| 2555 | goto out; | ||
| 2556 | ret = 0; | ||
| 2557 | } | ||
| 2558 | |||
| 2995 | for (t = trace_types; t; t = t->next) { | 2559 | for (t = trace_types; t; t = t->next) { | 
| 2996 | if (strcmp(t->name, buf) == 0) | 2560 | if (strcmp(t->name, buf) == 0) | 
| 2997 | break; | 2561 | break; | 
| @@ -3007,9 +2571,14 @@ static int tracing_set_tracer(char *buf) | |||
| 3007 | if (current_trace && current_trace->reset) | 2571 | if (current_trace && current_trace->reset) | 
| 3008 | current_trace->reset(tr); | 2572 | current_trace->reset(tr); | 
| 3009 | 2573 | ||
| 2574 | destroy_trace_option_files(topts); | ||
| 2575 | |||
| 3010 | current_trace = t; | 2576 | current_trace = t; | 
| 2577 | |||
| 2578 | topts = create_trace_option_files(current_trace); | ||
| 2579 | |||
| 3011 | if (t->init) { | 2580 | if (t->init) { | 
| 3012 | ret = t->init(tr); | 2581 | ret = tracer_init(t, tr); | 
| 3013 | if (ret) | 2582 | if (ret) | 
| 3014 | goto out; | 2583 | goto out; | 
| 3015 | } | 2584 | } | 
| @@ -3072,9 +2641,9 @@ static ssize_t | |||
| 3072 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | 2641 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | 
| 3073 | size_t cnt, loff_t *ppos) | 2642 | size_t cnt, loff_t *ppos) | 
| 3074 | { | 2643 | { | 
| 3075 | long *ptr = filp->private_data; | 2644 | unsigned long *ptr = filp->private_data; | 
| 3076 | char buf[64]; | 2645 | char buf[64]; | 
| 3077 | long val; | 2646 | unsigned long val; | 
| 3078 | int ret; | 2647 | int ret; | 
| 3079 | 2648 | ||
| 3080 | if (cnt >= sizeof(buf)) | 2649 | if (cnt >= sizeof(buf)) | 
| @@ -3094,54 +2663,96 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
| 3094 | return cnt; | 2663 | return cnt; | 
| 3095 | } | 2664 | } | 
| 3096 | 2665 | ||
| 3097 | static atomic_t tracing_reader; | ||
| 3098 | |||
| 3099 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 2666 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 
| 3100 | { | 2667 | { | 
| 2668 | long cpu_file = (long) inode->i_private; | ||
| 3101 | struct trace_iterator *iter; | 2669 | struct trace_iterator *iter; | 
| 2670 | int ret = 0; | ||
| 3102 | 2671 | ||
| 3103 | if (tracing_disabled) | 2672 | if (tracing_disabled) | 
| 3104 | return -ENODEV; | 2673 | return -ENODEV; | 
| 3105 | 2674 | ||
| 3106 | /* We only allow for reader of the pipe */ | 2675 | mutex_lock(&trace_types_lock); | 
| 3107 | if (atomic_inc_return(&tracing_reader) != 1) { | 2676 | |
| 3108 | atomic_dec(&tracing_reader); | 2677 | /* We only allow one reader per cpu */ | 
| 3109 | return -EBUSY; | 2678 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 
| 2679 | if (!cpumask_empty(tracing_reader_cpumask)) { | ||
| 2680 | ret = -EBUSY; | ||
| 2681 | goto out; | ||
| 2682 | } | ||
| 2683 | cpumask_setall(tracing_reader_cpumask); | ||
| 2684 | } else { | ||
| 2685 | if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) | ||
| 2686 | cpumask_set_cpu(cpu_file, tracing_reader_cpumask); | ||
| 2687 | else { | ||
| 2688 | ret = -EBUSY; | ||
| 2689 | goto out; | ||
| 2690 | } | ||
| 3110 | } | 2691 | } | 
| 3111 | 2692 | ||
| 3112 | /* create a buffer to store the information to pass to userspace */ | 2693 | /* create a buffer to store the information to pass to userspace */ | 
| 3113 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2694 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 
| 3114 | if (!iter) | 2695 | if (!iter) { | 
| 3115 | return -ENOMEM; | 2696 | ret = -ENOMEM; | 
| 2697 | goto out; | ||
| 2698 | } | ||
| 3116 | 2699 | ||
| 3117 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 2700 | /* | 
| 3118 | kfree(iter); | 2701 | * We make a copy of the current tracer to avoid concurrent | 
| 3119 | return -ENOMEM; | 2702 | * changes on it while we are reading. | 
| 2703 | */ | ||
| 2704 | iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); | ||
| 2705 | if (!iter->trace) { | ||
| 2706 | ret = -ENOMEM; | ||
| 2707 | goto fail; | ||
| 3120 | } | 2708 | } | 
| 2709 | if (current_trace) | ||
| 2710 | *iter->trace = *current_trace; | ||
| 3121 | 2711 | ||
| 3122 | mutex_lock(&trace_types_lock); | 2712 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 
| 2713 | ret = -ENOMEM; | ||
| 2714 | goto fail; | ||
| 2715 | } | ||
| 3123 | 2716 | ||
| 3124 | /* trace pipe does not show start of buffer */ | 2717 | /* trace pipe does not show start of buffer */ | 
| 3125 | cpumask_setall(iter->started); | 2718 | cpumask_setall(iter->started); | 
| 3126 | 2719 | ||
| 2720 | iter->cpu_file = cpu_file; | ||
| 3127 | iter->tr = &global_trace; | 2721 | iter->tr = &global_trace; | 
| 3128 | iter->trace = current_trace; | 2722 | mutex_init(&iter->mutex); | 
| 3129 | filp->private_data = iter; | 2723 | filp->private_data = iter; | 
| 3130 | 2724 | ||
| 3131 | if (iter->trace->pipe_open) | 2725 | if (iter->trace->pipe_open) | 
| 3132 | iter->trace->pipe_open(iter); | 2726 | iter->trace->pipe_open(iter); | 
| 2727 | |||
| 2728 | out: | ||
| 3133 | mutex_unlock(&trace_types_lock); | 2729 | mutex_unlock(&trace_types_lock); | 
| 2730 | return ret; | ||
| 3134 | 2731 | ||
| 3135 | return 0; | 2732 | fail: | 
| 2733 | kfree(iter->trace); | ||
| 2734 | kfree(iter); | ||
| 2735 | mutex_unlock(&trace_types_lock); | ||
| 2736 | return ret; | ||
| 3136 | } | 2737 | } | 
| 3137 | 2738 | ||
| 3138 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 2739 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 
| 3139 | { | 2740 | { | 
| 3140 | struct trace_iterator *iter = file->private_data; | 2741 | struct trace_iterator *iter = file->private_data; | 
| 3141 | 2742 | ||
| 2743 | mutex_lock(&trace_types_lock); | ||
| 2744 | |||
| 2745 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) | ||
| 2746 | cpumask_clear(tracing_reader_cpumask); | ||
| 2747 | else | ||
| 2748 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | ||
| 2749 | |||
| 2750 | mutex_unlock(&trace_types_lock); | ||
| 2751 | |||
| 3142 | free_cpumask_var(iter->started); | 2752 | free_cpumask_var(iter->started); | 
| 2753 | mutex_destroy(&iter->mutex); | ||
| 2754 | kfree(iter->trace); | ||
| 3143 | kfree(iter); | 2755 | kfree(iter); | 
| 3144 | atomic_dec(&tracing_reader); | ||
| 3145 | 2756 | ||
| 3146 | return 0; | 2757 | return 0; | 
| 3147 | } | 2758 | } | 
| @@ -3167,67 +2778,57 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |||
| 3167 | } | 2778 | } | 
| 3168 | } | 2779 | } | 
| 3169 | 2780 | ||
| 3170 | /* | 2781 | |
| 3171 | * Consumer reader. | 2782 | void default_wait_pipe(struct trace_iterator *iter) | 
| 3172 | */ | ||
| 3173 | static ssize_t | ||
| 3174 | tracing_read_pipe(struct file *filp, char __user *ubuf, | ||
| 3175 | size_t cnt, loff_t *ppos) | ||
| 3176 | { | 2783 | { | 
| 3177 | struct trace_iterator *iter = filp->private_data; | 2784 | DEFINE_WAIT(wait); | 
| 3178 | ssize_t sret; | ||
| 3179 | 2785 | ||
| 3180 | /* return any leftover data */ | 2786 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); | 
| 3181 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | ||
| 3182 | if (sret != -EBUSY) | ||
| 3183 | return sret; | ||
| 3184 | 2787 | ||
| 3185 | trace_seq_reset(&iter->seq); | 2788 | if (trace_empty(iter)) | 
| 2789 | schedule(); | ||
| 3186 | 2790 | ||
| 3187 | mutex_lock(&trace_types_lock); | 2791 | finish_wait(&trace_wait, &wait); | 
| 3188 | if (iter->trace->read) { | 2792 | } | 
| 3189 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | 2793 | |
| 3190 | if (sret) | 2794 | /* | 
| 3191 | goto out; | 2795 | * This is a make-shift waitqueue. | 
| 3192 | } | 2796 | * A tracer might use this callback on some rare cases: | 
| 2797 | * | ||
| 2798 | * 1) the current tracer might hold the runqueue lock when it wakes up | ||
| 2799 | * a reader, hence a deadlock (sched, function, and function graph tracers) | ||
| 2800 | * 2) the function tracers, trace all functions, we don't want | ||
| 2801 | * the overhead of calling wake_up and friends | ||
| 2802 | * (and tracing them too) | ||
| 2803 | * | ||
| 2804 | * Anyway, this is really very primitive wakeup. | ||
| 2805 | */ | ||
| 2806 | void poll_wait_pipe(struct trace_iterator *iter) | ||
| 2807 | { | ||
| 2808 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 2809 | /* sleep for 100 msecs, and try again. */ | ||
| 2810 | schedule_timeout(HZ / 10); | ||
| 2811 | } | ||
| 2812 | |||
| 2813 | /* Must be called with trace_types_lock mutex held. */ | ||
| 2814 | static int tracing_wait_pipe(struct file *filp) | ||
| 2815 | { | ||
| 2816 | struct trace_iterator *iter = filp->private_data; | ||
| 3193 | 2817 | ||
| 3194 | waitagain: | ||
| 3195 | sret = 0; | ||
| 3196 | while (trace_empty(iter)) { | 2818 | while (trace_empty(iter)) { | 
| 3197 | 2819 | ||
| 3198 | if ((filp->f_flags & O_NONBLOCK)) { | 2820 | if ((filp->f_flags & O_NONBLOCK)) { | 
| 3199 | sret = -EAGAIN; | 2821 | return -EAGAIN; | 
| 3200 | goto out; | ||
| 3201 | } | 2822 | } | 
| 3202 | 2823 | ||
| 3203 | /* | 2824 | mutex_unlock(&iter->mutex); | 
| 3204 | * This is a make-shift waitqueue. The reason we don't use | ||
| 3205 | * an actual wait queue is because: | ||
| 3206 | * 1) we only ever have one waiter | ||
| 3207 | * 2) the tracing, traces all functions, we don't want | ||
| 3208 | * the overhead of calling wake_up and friends | ||
| 3209 | * (and tracing them too) | ||
| 3210 | * Anyway, this is really very primitive wakeup. | ||
| 3211 | */ | ||
| 3212 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 3213 | iter->tr->waiter = current; | ||
| 3214 | |||
| 3215 | mutex_unlock(&trace_types_lock); | ||
| 3216 | |||
| 3217 | /* sleep for 100 msecs, and try again. */ | ||
| 3218 | schedule_timeout(HZ/10); | ||
| 3219 | |||
| 3220 | mutex_lock(&trace_types_lock); | ||
| 3221 | 2825 | ||
| 3222 | iter->tr->waiter = NULL; | 2826 | iter->trace->wait_pipe(iter); | 
| 3223 | 2827 | ||
| 3224 | if (signal_pending(current)) { | 2828 | mutex_lock(&iter->mutex); | 
| 3225 | sret = -EINTR; | ||
| 3226 | goto out; | ||
| 3227 | } | ||
| 3228 | 2829 | ||
| 3229 | if (iter->trace != current_trace) | 2830 | if (signal_pending(current)) | 
| 3230 | goto out; | 2831 | return -EINTR; | 
| 3231 | 2832 | ||
| 3232 | /* | 2833 | /* | 
| 3233 | * We block until we read something and tracing is disabled. | 2834 | * We block until we read something and tracing is disabled. | 
| @@ -3240,13 +2841,59 @@ waitagain: | |||
| 3240 | */ | 2841 | */ | 
| 3241 | if (!tracer_enabled && iter->pos) | 2842 | if (!tracer_enabled && iter->pos) | 
| 3242 | break; | 2843 | break; | 
| 2844 | } | ||
| 2845 | |||
| 2846 | return 1; | ||
| 2847 | } | ||
| 2848 | |||
| 2849 | /* | ||
| 2850 | * Consumer reader. | ||
| 2851 | */ | ||
| 2852 | static ssize_t | ||
| 2853 | tracing_read_pipe(struct file *filp, char __user *ubuf, | ||
| 2854 | size_t cnt, loff_t *ppos) | ||
| 2855 | { | ||
| 2856 | struct trace_iterator *iter = filp->private_data; | ||
| 2857 | static struct tracer *old_tracer; | ||
| 2858 | ssize_t sret; | ||
| 3243 | 2859 | ||
| 3244 | continue; | 2860 | /* return any leftover data */ | 
| 2861 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | ||
| 2862 | if (sret != -EBUSY) | ||
| 2863 | return sret; | ||
| 2864 | |||
| 2865 | trace_seq_init(&iter->seq); | ||
| 2866 | |||
| 2867 | /* copy the tracer to avoid using a global lock all around */ | ||
| 2868 | mutex_lock(&trace_types_lock); | ||
| 2869 | if (unlikely(old_tracer != current_trace && current_trace)) { | ||
| 2870 | old_tracer = current_trace; | ||
| 2871 | *iter->trace = *current_trace; | ||
| 2872 | } | ||
| 2873 | mutex_unlock(&trace_types_lock); | ||
| 2874 | |||
| 2875 | /* | ||
| 2876 | * Avoid more than one consumer on a single file descriptor | ||
| 2877 | * This is just a matter of traces coherency, the ring buffer itself | ||
| 2878 | * is protected. | ||
| 2879 | */ | ||
| 2880 | mutex_lock(&iter->mutex); | ||
| 2881 | if (iter->trace->read) { | ||
| 2882 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | ||
| 2883 | if (sret) | ||
| 2884 | goto out; | ||
| 3245 | } | 2885 | } | 
| 3246 | 2886 | ||
| 2887 | waitagain: | ||
| 2888 | sret = tracing_wait_pipe(filp); | ||
| 2889 | if (sret <= 0) | ||
| 2890 | goto out; | ||
| 2891 | |||
| 3247 | /* stop when tracing is finished */ | 2892 | /* stop when tracing is finished */ | 
| 3248 | if (trace_empty(iter)) | 2893 | if (trace_empty(iter)) { | 
| 2894 | sret = 0; | ||
| 3249 | goto out; | 2895 | goto out; | 
| 2896 | } | ||
| 3250 | 2897 | ||
| 3251 | if (cnt >= PAGE_SIZE) | 2898 | if (cnt >= PAGE_SIZE) | 
| 3252 | cnt = PAGE_SIZE - 1; | 2899 | cnt = PAGE_SIZE - 1; | 
| @@ -3267,8 +2914,8 @@ waitagain: | |||
| 3267 | iter->seq.len = len; | 2914 | iter->seq.len = len; | 
| 3268 | break; | 2915 | break; | 
| 3269 | } | 2916 | } | 
| 3270 | 2917 | if (ret != TRACE_TYPE_NO_CONSUME) | |
| 3271 | trace_consume(iter); | 2918 | trace_consume(iter); | 
| 3272 | 2919 | ||
| 3273 | if (iter->seq.len >= cnt) | 2920 | if (iter->seq.len >= cnt) | 
| 3274 | break; | 2921 | break; | 
| @@ -3277,7 +2924,7 @@ waitagain: | |||
| 3277 | /* Now copy what we have to the user */ | 2924 | /* Now copy what we have to the user */ | 
| 3278 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 2925 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 
| 3279 | if (iter->seq.readpos >= iter->seq.len) | 2926 | if (iter->seq.readpos >= iter->seq.len) | 
| 3280 | trace_seq_reset(&iter->seq); | 2927 | trace_seq_init(&iter->seq); | 
| 3281 | 2928 | ||
| 3282 | /* | 2929 | /* | 
| 3283 | * If there was nothing to send to user, inspite of consuming trace | 2930 | * If there was nothing to send to user, inspite of consuming trace | 
| @@ -3287,20 +2934,165 @@ waitagain: | |||
| 3287 | goto waitagain; | 2934 | goto waitagain; | 
| 3288 | 2935 | ||
| 3289 | out: | 2936 | out: | 
| 3290 | mutex_unlock(&trace_types_lock); | 2937 | mutex_unlock(&iter->mutex); | 
| 3291 | 2938 | ||
| 3292 | return sret; | 2939 | return sret; | 
| 3293 | } | 2940 | } | 
| 3294 | 2941 | ||
| 2942 | static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, | ||
| 2943 | struct pipe_buffer *buf) | ||
| 2944 | { | ||
| 2945 | __free_page(buf->page); | ||
| 2946 | } | ||
| 2947 | |||
| 2948 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | ||
| 2949 | unsigned int idx) | ||
| 2950 | { | ||
| 2951 | __free_page(spd->pages[idx]); | ||
| 2952 | } | ||
| 2953 | |||
| 2954 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | ||
| 2955 | .can_merge = 0, | ||
| 2956 | .map = generic_pipe_buf_map, | ||
| 2957 | .unmap = generic_pipe_buf_unmap, | ||
| 2958 | .confirm = generic_pipe_buf_confirm, | ||
| 2959 | .release = tracing_pipe_buf_release, | ||
| 2960 | .steal = generic_pipe_buf_steal, | ||
| 2961 | .get = generic_pipe_buf_get, | ||
| 2962 | }; | ||
| 2963 | |||
| 2964 | static size_t | ||
| 2965 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | ||
| 2966 | { | ||
| 2967 | size_t count; | ||
| 2968 | int ret; | ||
| 2969 | |||
| 2970 | /* Seq buffer is page-sized, exactly what we need. */ | ||
| 2971 | for (;;) { | ||
| 2972 | count = iter->seq.len; | ||
| 2973 | ret = print_trace_line(iter); | ||
| 2974 | count = iter->seq.len - count; | ||
| 2975 | if (rem < count) { | ||
| 2976 | rem = 0; | ||
| 2977 | iter->seq.len -= count; | ||
| 2978 | break; | ||
| 2979 | } | ||
| 2980 | if (ret == TRACE_TYPE_PARTIAL_LINE) { | ||
| 2981 | iter->seq.len -= count; | ||
| 2982 | break; | ||
| 2983 | } | ||
| 2984 | |||
| 2985 | trace_consume(iter); | ||
| 2986 | rem -= count; | ||
| 2987 | if (!find_next_entry_inc(iter)) { | ||
| 2988 | rem = 0; | ||
| 2989 | iter->ent = NULL; | ||
| 2990 | break; | ||
| 2991 | } | ||
| 2992 | } | ||
| 2993 | |||
| 2994 | return rem; | ||
| 2995 | } | ||
| 2996 | |||
| 2997 | static ssize_t tracing_splice_read_pipe(struct file *filp, | ||
| 2998 | loff_t *ppos, | ||
| 2999 | struct pipe_inode_info *pipe, | ||
| 3000 | size_t len, | ||
| 3001 | unsigned int flags) | ||
| 3002 | { | ||
| 3003 | struct page *pages[PIPE_BUFFERS]; | ||
| 3004 | struct partial_page partial[PIPE_BUFFERS]; | ||
| 3005 | struct trace_iterator *iter = filp->private_data; | ||
| 3006 | struct splice_pipe_desc spd = { | ||
| 3007 | .pages = pages, | ||
| 3008 | .partial = partial, | ||
| 3009 | .nr_pages = 0, /* This gets updated below. */ | ||
| 3010 | .flags = flags, | ||
| 3011 | .ops = &tracing_pipe_buf_ops, | ||
| 3012 | .spd_release = tracing_spd_release_pipe, | ||
| 3013 | }; | ||
| 3014 | static struct tracer *old_tracer; | ||
| 3015 | ssize_t ret; | ||
| 3016 | size_t rem; | ||
| 3017 | unsigned int i; | ||
| 3018 | |||
| 3019 | /* copy the tracer to avoid using a global lock all around */ | ||
| 3020 | mutex_lock(&trace_types_lock); | ||
| 3021 | if (unlikely(old_tracer != current_trace && current_trace)) { | ||
| 3022 | old_tracer = current_trace; | ||
| 3023 | *iter->trace = *current_trace; | ||
| 3024 | } | ||
| 3025 | mutex_unlock(&trace_types_lock); | ||
| 3026 | |||
| 3027 | mutex_lock(&iter->mutex); | ||
| 3028 | |||
| 3029 | if (iter->trace->splice_read) { | ||
| 3030 | ret = iter->trace->splice_read(iter, filp, | ||
| 3031 | ppos, pipe, len, flags); | ||
| 3032 | if (ret) | ||
| 3033 | goto out_err; | ||
| 3034 | } | ||
| 3035 | |||
| 3036 | ret = tracing_wait_pipe(filp); | ||
| 3037 | if (ret <= 0) | ||
| 3038 | goto out_err; | ||
| 3039 | |||
| 3040 | if (!iter->ent && !find_next_entry_inc(iter)) { | ||
| 3041 | ret = -EFAULT; | ||
| 3042 | goto out_err; | ||
| 3043 | } | ||
| 3044 | |||
| 3045 | /* Fill as many pages as possible. */ | ||
| 3046 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | ||
| 3047 | pages[i] = alloc_page(GFP_KERNEL); | ||
| 3048 | if (!pages[i]) | ||
| 3049 | break; | ||
| 3050 | |||
| 3051 | rem = tracing_fill_pipe_page(rem, iter); | ||
| 3052 | |||
| 3053 | /* Copy the data into the page, so we can start over. */ | ||
| 3054 | ret = trace_seq_to_buffer(&iter->seq, | ||
| 3055 | page_address(pages[i]), | ||
| 3056 | iter->seq.len); | ||
| 3057 | if (ret < 0) { | ||
| 3058 | __free_page(pages[i]); | ||
| 3059 | break; | ||
| 3060 | } | ||
| 3061 | partial[i].offset = 0; | ||
| 3062 | partial[i].len = iter->seq.len; | ||
| 3063 | |||
| 3064 | trace_seq_init(&iter->seq); | ||
| 3065 | } | ||
| 3066 | |||
| 3067 | mutex_unlock(&iter->mutex); | ||
| 3068 | |||
| 3069 | spd.nr_pages = i; | ||
| 3070 | |||
| 3071 | return splice_to_pipe(pipe, &spd); | ||
| 3072 | |||
| 3073 | out_err: | ||
| 3074 | mutex_unlock(&iter->mutex); | ||
| 3075 | |||
| 3076 | return ret; | ||
| 3077 | } | ||
| 3078 | |||
| 3295 | static ssize_t | 3079 | static ssize_t | 
| 3296 | tracing_entries_read(struct file *filp, char __user *ubuf, | 3080 | tracing_entries_read(struct file *filp, char __user *ubuf, | 
| 3297 | size_t cnt, loff_t *ppos) | 3081 | size_t cnt, loff_t *ppos) | 
| 3298 | { | 3082 | { | 
| 3299 | struct trace_array *tr = filp->private_data; | 3083 | struct trace_array *tr = filp->private_data; | 
| 3300 | char buf[64]; | 3084 | char buf[96]; | 
| 3301 | int r; | 3085 | int r; | 
| 3302 | 3086 | ||
| 3303 | r = sprintf(buf, "%lu\n", tr->entries >> 10); | 3087 | mutex_lock(&trace_types_lock); | 
| 3088 | if (!ring_buffer_expanded) | ||
| 3089 | r = sprintf(buf, "%lu (expanded: %lu)\n", | ||
| 3090 | tr->entries >> 10, | ||
| 3091 | trace_buf_size >> 10); | ||
| 3092 | else | ||
| 3093 | r = sprintf(buf, "%lu\n", tr->entries >> 10); | ||
| 3094 | mutex_unlock(&trace_types_lock); | ||
| 3095 | |||
| 3304 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3096 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
| 3305 | } | 3097 | } | 
| 3306 | 3098 | ||
| @@ -3344,28 +3136,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 3344 | val <<= 10; | 3136 | val <<= 10; | 
| 3345 | 3137 | ||
| 3346 | if (val != global_trace.entries) { | 3138 | if (val != global_trace.entries) { | 
| 3347 | ret = ring_buffer_resize(global_trace.buffer, val); | 3139 | ret = tracing_resize_ring_buffer(val); | 
| 3348 | if (ret < 0) { | ||
| 3349 | cnt = ret; | ||
| 3350 | goto out; | ||
| 3351 | } | ||
| 3352 | |||
| 3353 | ret = ring_buffer_resize(max_tr.buffer, val); | ||
| 3354 | if (ret < 0) { | 3140 | if (ret < 0) { | 
| 3355 | int r; | ||
| 3356 | cnt = ret; | 3141 | cnt = ret; | 
| 3357 | r = ring_buffer_resize(global_trace.buffer, | ||
| 3358 | global_trace.entries); | ||
| 3359 | if (r < 0) { | ||
| 3360 | /* AARGH! We are left with different | ||
| 3361 | * size max buffer!!!! */ | ||
| 3362 | WARN_ON(1); | ||
| 3363 | tracing_disabled = 1; | ||
| 3364 | } | ||
| 3365 | goto out; | 3142 | goto out; | 
| 3366 | } | 3143 | } | 
| 3367 | |||
| 3368 | global_trace.entries = val; | ||
| 3369 | } | 3144 | } | 
| 3370 | 3145 | ||
| 3371 | filp->f_pos += cnt; | 3146 | filp->f_pos += cnt; | 
| @@ -3393,7 +3168,7 @@ static int mark_printk(const char *fmt, ...) | |||
| 3393 | int ret; | 3168 | int ret; | 
| 3394 | va_list args; | 3169 | va_list args; | 
| 3395 | va_start(args, fmt); | 3170 | va_start(args, fmt); | 
| 3396 | ret = trace_vprintk(0, -1, fmt, args); | 3171 | ret = trace_vprintk(0, fmt, args); | 
| 3397 | va_end(args); | 3172 | va_end(args); | 
| 3398 | return ret; | 3173 | return ret; | 
| 3399 | } | 3174 | } | 
| @@ -3433,42 +3208,288 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3433 | return cnt; | 3208 | return cnt; | 
| 3434 | } | 3209 | } | 
| 3435 | 3210 | ||
| 3436 | static struct file_operations tracing_max_lat_fops = { | 3211 | static const struct file_operations tracing_max_lat_fops = { | 
| 3437 | .open = tracing_open_generic, | 3212 | .open = tracing_open_generic, | 
| 3438 | .read = tracing_max_lat_read, | 3213 | .read = tracing_max_lat_read, | 
| 3439 | .write = tracing_max_lat_write, | 3214 | .write = tracing_max_lat_write, | 
| 3440 | }; | 3215 | }; | 
| 3441 | 3216 | ||
| 3442 | static struct file_operations tracing_ctrl_fops = { | 3217 | static const struct file_operations tracing_ctrl_fops = { | 
| 3443 | .open = tracing_open_generic, | 3218 | .open = tracing_open_generic, | 
| 3444 | .read = tracing_ctrl_read, | 3219 | .read = tracing_ctrl_read, | 
| 3445 | .write = tracing_ctrl_write, | 3220 | .write = tracing_ctrl_write, | 
| 3446 | }; | 3221 | }; | 
| 3447 | 3222 | ||
| 3448 | static struct file_operations set_tracer_fops = { | 3223 | static const struct file_operations set_tracer_fops = { | 
| 3449 | .open = tracing_open_generic, | 3224 | .open = tracing_open_generic, | 
| 3450 | .read = tracing_set_trace_read, | 3225 | .read = tracing_set_trace_read, | 
| 3451 | .write = tracing_set_trace_write, | 3226 | .write = tracing_set_trace_write, | 
| 3452 | }; | 3227 | }; | 
| 3453 | 3228 | ||
| 3454 | static struct file_operations tracing_pipe_fops = { | 3229 | static const struct file_operations tracing_pipe_fops = { | 
| 3455 | .open = tracing_open_pipe, | 3230 | .open = tracing_open_pipe, | 
| 3456 | .poll = tracing_poll_pipe, | 3231 | .poll = tracing_poll_pipe, | 
| 3457 | .read = tracing_read_pipe, | 3232 | .read = tracing_read_pipe, | 
| 3233 | .splice_read = tracing_splice_read_pipe, | ||
| 3458 | .release = tracing_release_pipe, | 3234 | .release = tracing_release_pipe, | 
| 3459 | }; | 3235 | }; | 
| 3460 | 3236 | ||
| 3461 | static struct file_operations tracing_entries_fops = { | 3237 | static const struct file_operations tracing_entries_fops = { | 
| 3462 | .open = tracing_open_generic, | 3238 | .open = tracing_open_generic, | 
| 3463 | .read = tracing_entries_read, | 3239 | .read = tracing_entries_read, | 
| 3464 | .write = tracing_entries_write, | 3240 | .write = tracing_entries_write, | 
| 3465 | }; | 3241 | }; | 
| 3466 | 3242 | ||
| 3467 | static struct file_operations tracing_mark_fops = { | 3243 | static const struct file_operations tracing_mark_fops = { | 
| 3468 | .open = tracing_open_generic, | 3244 | .open = tracing_open_generic, | 
| 3469 | .write = tracing_mark_write, | 3245 | .write = tracing_mark_write, | 
| 3470 | }; | 3246 | }; | 
| 3471 | 3247 | ||
| 3248 | struct ftrace_buffer_info { | ||
| 3249 | struct trace_array *tr; | ||
| 3250 | void *spare; | ||
| 3251 | int cpu; | ||
| 3252 | unsigned int read; | ||
| 3253 | }; | ||
| 3254 | |||
| 3255 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | ||
| 3256 | { | ||
| 3257 | int cpu = (int)(long)inode->i_private; | ||
| 3258 | struct ftrace_buffer_info *info; | ||
| 3259 | |||
| 3260 | if (tracing_disabled) | ||
| 3261 | return -ENODEV; | ||
| 3262 | |||
| 3263 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
| 3264 | if (!info) | ||
| 3265 | return -ENOMEM; | ||
| 3266 | |||
| 3267 | info->tr = &global_trace; | ||
| 3268 | info->cpu = cpu; | ||
| 3269 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer); | ||
| 3270 | /* Force reading ring buffer for first read */ | ||
| 3271 | info->read = (unsigned int)-1; | ||
| 3272 | if (!info->spare) | ||
| 3273 | goto out; | ||
| 3274 | |||
| 3275 | filp->private_data = info; | ||
| 3276 | |||
| 3277 | return 0; | ||
| 3278 | |||
| 3279 | out: | ||
| 3280 | kfree(info); | ||
| 3281 | return -ENOMEM; | ||
| 3282 | } | ||
| 3283 | |||
| 3284 | static ssize_t | ||
| 3285 | tracing_buffers_read(struct file *filp, char __user *ubuf, | ||
| 3286 | size_t count, loff_t *ppos) | ||
| 3287 | { | ||
| 3288 | struct ftrace_buffer_info *info = filp->private_data; | ||
| 3289 | unsigned int pos; | ||
| 3290 | ssize_t ret; | ||
| 3291 | size_t size; | ||
| 3292 | |||
| 3293 | if (!count) | ||
| 3294 | return 0; | ||
| 3295 | |||
| 3296 | /* Do we have previous read data to read? */ | ||
| 3297 | if (info->read < PAGE_SIZE) | ||
| 3298 | goto read; | ||
| 3299 | |||
| 3300 | info->read = 0; | ||
| 3301 | |||
| 3302 | ret = ring_buffer_read_page(info->tr->buffer, | ||
| 3303 | &info->spare, | ||
| 3304 | count, | ||
| 3305 | info->cpu, 0); | ||
| 3306 | if (ret < 0) | ||
| 3307 | return 0; | ||
| 3308 | |||
| 3309 | pos = ring_buffer_page_len(info->spare); | ||
| 3310 | |||
| 3311 | if (pos < PAGE_SIZE) | ||
| 3312 | memset(info->spare + pos, 0, PAGE_SIZE - pos); | ||
| 3313 | |||
| 3314 | read: | ||
| 3315 | size = PAGE_SIZE - info->read; | ||
| 3316 | if (size > count) | ||
| 3317 | size = count; | ||
| 3318 | |||
| 3319 | ret = copy_to_user(ubuf, info->spare + info->read, size); | ||
| 3320 | if (ret == size) | ||
| 3321 | return -EFAULT; | ||
| 3322 | size -= ret; | ||
| 3323 | |||
| 3324 | *ppos += size; | ||
| 3325 | info->read += size; | ||
| 3326 | |||
| 3327 | return size; | ||
| 3328 | } | ||
| 3329 | |||
| 3330 | static int tracing_buffers_release(struct inode *inode, struct file *file) | ||
| 3331 | { | ||
| 3332 | struct ftrace_buffer_info *info = file->private_data; | ||
| 3333 | |||
| 3334 | ring_buffer_free_read_page(info->tr->buffer, info->spare); | ||
| 3335 | kfree(info); | ||
| 3336 | |||
| 3337 | return 0; | ||
| 3338 | } | ||
| 3339 | |||
| 3340 | struct buffer_ref { | ||
| 3341 | struct ring_buffer *buffer; | ||
| 3342 | void *page; | ||
| 3343 | int ref; | ||
| 3344 | }; | ||
| 3345 | |||
| 3346 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | ||
| 3347 | struct pipe_buffer *buf) | ||
| 3348 | { | ||
| 3349 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | ||
| 3350 | |||
| 3351 | if (--ref->ref) | ||
| 3352 | return; | ||
| 3353 | |||
| 3354 | ring_buffer_free_read_page(ref->buffer, ref->page); | ||
| 3355 | kfree(ref); | ||
| 3356 | buf->private = 0; | ||
| 3357 | } | ||
| 3358 | |||
| 3359 | static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, | ||
| 3360 | struct pipe_buffer *buf) | ||
| 3361 | { | ||
| 3362 | return 1; | ||
| 3363 | } | ||
| 3364 | |||
| 3365 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | ||
| 3366 | struct pipe_buffer *buf) | ||
| 3367 | { | ||
| 3368 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | ||
| 3369 | |||
| 3370 | ref->ref++; | ||
| 3371 | } | ||
| 3372 | |||
| 3373 | /* Pipe buffer operations for a buffer. */ | ||
| 3374 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | ||
| 3375 | .can_merge = 0, | ||
| 3376 | .map = generic_pipe_buf_map, | ||
| 3377 | .unmap = generic_pipe_buf_unmap, | ||
| 3378 | .confirm = generic_pipe_buf_confirm, | ||
| 3379 | .release = buffer_pipe_buf_release, | ||
| 3380 | .steal = buffer_pipe_buf_steal, | ||
| 3381 | .get = buffer_pipe_buf_get, | ||
| 3382 | }; | ||
| 3383 | |||
| 3384 | /* | ||
| 3385 | * Callback from splice_to_pipe(), if we need to release some pages | ||
| 3386 | * at the end of the spd in case we error'ed out in filling the pipe. | ||
| 3387 | */ | ||
| 3388 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | ||
| 3389 | { | ||
| 3390 | struct buffer_ref *ref = | ||
| 3391 | (struct buffer_ref *)spd->partial[i].private; | ||
| 3392 | |||
| 3393 | if (--ref->ref) | ||
| 3394 | return; | ||
| 3395 | |||
| 3396 | ring_buffer_free_read_page(ref->buffer, ref->page); | ||
| 3397 | kfree(ref); | ||
| 3398 | spd->partial[i].private = 0; | ||
| 3399 | } | ||
| 3400 | |||
| 3401 | static ssize_t | ||
| 3402 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, | ||
| 3403 | struct pipe_inode_info *pipe, size_t len, | ||
| 3404 | unsigned int flags) | ||
| 3405 | { | ||
| 3406 | struct ftrace_buffer_info *info = file->private_data; | ||
| 3407 | struct partial_page partial[PIPE_BUFFERS]; | ||
| 3408 | struct page *pages[PIPE_BUFFERS]; | ||
| 3409 | struct splice_pipe_desc spd = { | ||
| 3410 | .pages = pages, | ||
| 3411 | .partial = partial, | ||
| 3412 | .flags = flags, | ||
| 3413 | .ops = &buffer_pipe_buf_ops, | ||
| 3414 | .spd_release = buffer_spd_release, | ||
| 3415 | }; | ||
| 3416 | struct buffer_ref *ref; | ||
| 3417 | int size, i; | ||
| 3418 | size_t ret; | ||
| 3419 | |||
| 3420 | /* | ||
| 3421 | * We can't seek on a buffer input | ||
| 3422 | */ | ||
| 3423 | if (unlikely(*ppos)) | ||
| 3424 | return -ESPIPE; | ||
| 3425 | |||
| 3426 | |||
| 3427 | for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { | ||
| 3428 | struct page *page; | ||
| 3429 | int r; | ||
| 3430 | |||
| 3431 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | ||
| 3432 | if (!ref) | ||
| 3433 | break; | ||
| 3434 | |||
| 3435 | ref->buffer = info->tr->buffer; | ||
| 3436 | ref->page = ring_buffer_alloc_read_page(ref->buffer); | ||
| 3437 | if (!ref->page) { | ||
| 3438 | kfree(ref); | ||
| 3439 | break; | ||
| 3440 | } | ||
| 3441 | |||
| 3442 | r = ring_buffer_read_page(ref->buffer, &ref->page, | ||
| 3443 | len, info->cpu, 0); | ||
| 3444 | if (r < 0) { | ||
| 3445 | ring_buffer_free_read_page(ref->buffer, | ||
| 3446 | ref->page); | ||
| 3447 | kfree(ref); | ||
| 3448 | break; | ||
| 3449 | } | ||
| 3450 | |||
| 3451 | /* | ||
| 3452 | * zero out any left over data, this is going to | ||
| 3453 | * user land. | ||
| 3454 | */ | ||
| 3455 | size = ring_buffer_page_len(ref->page); | ||
| 3456 | if (size < PAGE_SIZE) | ||
| 3457 | memset(ref->page + size, 0, PAGE_SIZE - size); | ||
| 3458 | |||
| 3459 | page = virt_to_page(ref->page); | ||
| 3460 | |||
| 3461 | spd.pages[i] = page; | ||
| 3462 | spd.partial[i].len = PAGE_SIZE; | ||
| 3463 | spd.partial[i].offset = 0; | ||
| 3464 | spd.partial[i].private = (unsigned long)ref; | ||
| 3465 | spd.nr_pages++; | ||
| 3466 | } | ||
| 3467 | |||
| 3468 | spd.nr_pages = i; | ||
| 3469 | |||
| 3470 | /* did we read anything? */ | ||
| 3471 | if (!spd.nr_pages) { | ||
| 3472 | if (flags & SPLICE_F_NONBLOCK) | ||
| 3473 | ret = -EAGAIN; | ||
| 3474 | else | ||
| 3475 | ret = 0; | ||
| 3476 | /* TODO: block */ | ||
| 3477 | return ret; | ||
| 3478 | } | ||
| 3479 | |||
| 3480 | ret = splice_to_pipe(pipe, &spd); | ||
| 3481 | |||
| 3482 | return ret; | ||
| 3483 | } | ||
| 3484 | |||
| 3485 | static const struct file_operations tracing_buffers_fops = { | ||
| 3486 | .open = tracing_buffers_open, | ||
| 3487 | .read = tracing_buffers_read, | ||
| 3488 | .release = tracing_buffers_release, | ||
| 3489 | .splice_read = tracing_buffers_splice_read, | ||
| 3490 | .llseek = no_llseek, | ||
| 3491 | }; | ||
| 3492 | |||
| 3472 | #ifdef CONFIG_DYNAMIC_FTRACE | 3493 | #ifdef CONFIG_DYNAMIC_FTRACE | 
| 3473 | 3494 | ||
| 3474 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 3495 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 
| @@ -3500,7 +3521,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf, | |||
| 3500 | return r; | 3521 | return r; | 
| 3501 | } | 3522 | } | 
| 3502 | 3523 | ||
| 3503 | static struct file_operations tracing_dyn_info_fops = { | 3524 | static const struct file_operations tracing_dyn_info_fops = { | 
| 3504 | .open = tracing_open_generic, | 3525 | .open = tracing_open_generic, | 
| 3505 | .read = tracing_read_dyn_info, | 3526 | .read = tracing_read_dyn_info, | 
| 3506 | }; | 3527 | }; | 
| @@ -3515,6 +3536,9 @@ struct dentry *tracing_init_dentry(void) | |||
| 3515 | if (d_tracer) | 3536 | if (d_tracer) | 
| 3516 | return d_tracer; | 3537 | return d_tracer; | 
| 3517 | 3538 | ||
| 3539 | if (!debugfs_initialized()) | ||
| 3540 | return NULL; | ||
| 3541 | |||
| 3518 | d_tracer = debugfs_create_dir("tracing", NULL); | 3542 | d_tracer = debugfs_create_dir("tracing", NULL); | 
| 3519 | 3543 | ||
| 3520 | if (!d_tracer && !once) { | 3544 | if (!d_tracer && !once) { | 
| @@ -3526,15 +3550,350 @@ struct dentry *tracing_init_dentry(void) | |||
| 3526 | return d_tracer; | 3550 | return d_tracer; | 
| 3527 | } | 3551 | } | 
| 3528 | 3552 | ||
| 3553 | static struct dentry *d_percpu; | ||
| 3554 | |||
| 3555 | struct dentry *tracing_dentry_percpu(void) | ||
| 3556 | { | ||
| 3557 | static int once; | ||
| 3558 | struct dentry *d_tracer; | ||
| 3559 | |||
| 3560 | if (d_percpu) | ||
| 3561 | return d_percpu; | ||
| 3562 | |||
| 3563 | d_tracer = tracing_init_dentry(); | ||
| 3564 | |||
| 3565 | if (!d_tracer) | ||
| 3566 | return NULL; | ||
| 3567 | |||
| 3568 | d_percpu = debugfs_create_dir("per_cpu", d_tracer); | ||
| 3569 | |||
| 3570 | if (!d_percpu && !once) { | ||
| 3571 | once = 1; | ||
| 3572 | pr_warning("Could not create debugfs directory 'per_cpu'\n"); | ||
| 3573 | return NULL; | ||
| 3574 | } | ||
| 3575 | |||
| 3576 | return d_percpu; | ||
| 3577 | } | ||
| 3578 | |||
| 3579 | static void tracing_init_debugfs_percpu(long cpu) | ||
| 3580 | { | ||
| 3581 | struct dentry *d_percpu = tracing_dentry_percpu(); | ||
| 3582 | struct dentry *entry, *d_cpu; | ||
| 3583 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | ||
| 3584 | char cpu_dir[7]; | ||
| 3585 | |||
| 3586 | if (cpu > 999 || cpu < 0) | ||
| 3587 | return; | ||
| 3588 | |||
| 3589 | sprintf(cpu_dir, "cpu%ld", cpu); | ||
| 3590 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | ||
| 3591 | if (!d_cpu) { | ||
| 3592 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | ||
| 3593 | return; | ||
| 3594 | } | ||
| 3595 | |||
| 3596 | /* per cpu trace_pipe */ | ||
| 3597 | entry = debugfs_create_file("trace_pipe", 0444, d_cpu, | ||
| 3598 | (void *) cpu, &tracing_pipe_fops); | ||
| 3599 | if (!entry) | ||
| 3600 | pr_warning("Could not create debugfs 'trace_pipe' entry\n"); | ||
| 3601 | |||
| 3602 | /* per cpu trace */ | ||
| 3603 | entry = debugfs_create_file("trace", 0644, d_cpu, | ||
| 3604 | (void *) cpu, &tracing_fops); | ||
| 3605 | if (!entry) | ||
| 3606 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
| 3607 | |||
| 3608 | entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu, | ||
| 3609 | (void *) cpu, &tracing_buffers_fops); | ||
| 3610 | if (!entry) | ||
| 3611 | pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n"); | ||
| 3612 | } | ||
| 3613 | |||
| 3529 | #ifdef CONFIG_FTRACE_SELFTEST | 3614 | #ifdef CONFIG_FTRACE_SELFTEST | 
| 3530 | /* Let selftest have access to static functions in this file */ | 3615 | /* Let selftest have access to static functions in this file */ | 
| 3531 | #include "trace_selftest.c" | 3616 | #include "trace_selftest.c" | 
| 3532 | #endif | 3617 | #endif | 
| 3533 | 3618 | ||
| 3619 | struct trace_option_dentry { | ||
| 3620 | struct tracer_opt *opt; | ||
| 3621 | struct tracer_flags *flags; | ||
| 3622 | struct dentry *entry; | ||
| 3623 | }; | ||
| 3624 | |||
| 3625 | static ssize_t | ||
| 3626 | trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
| 3627 | loff_t *ppos) | ||
| 3628 | { | ||
| 3629 | struct trace_option_dentry *topt = filp->private_data; | ||
| 3630 | char *buf; | ||
| 3631 | |||
| 3632 | if (topt->flags->val & topt->opt->bit) | ||
| 3633 | buf = "1\n"; | ||
| 3634 | else | ||
| 3635 | buf = "0\n"; | ||
| 3636 | |||
| 3637 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | ||
| 3638 | } | ||
| 3639 | |||
| 3640 | static ssize_t | ||
| 3641 | trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
| 3642 | loff_t *ppos) | ||
| 3643 | { | ||
| 3644 | struct trace_option_dentry *topt = filp->private_data; | ||
| 3645 | unsigned long val; | ||
| 3646 | char buf[64]; | ||
| 3647 | int ret; | ||
| 3648 | |||
| 3649 | if (cnt >= sizeof(buf)) | ||
| 3650 | return -EINVAL; | ||
| 3651 | |||
| 3652 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 3653 | return -EFAULT; | ||
| 3654 | |||
| 3655 | buf[cnt] = 0; | ||
| 3656 | |||
| 3657 | ret = strict_strtoul(buf, 10, &val); | ||
| 3658 | if (ret < 0) | ||
| 3659 | return ret; | ||
| 3660 | |||
| 3661 | ret = 0; | ||
| 3662 | switch (val) { | ||
| 3663 | case 0: | ||
| 3664 | /* do nothing if already cleared */ | ||
| 3665 | if (!(topt->flags->val & topt->opt->bit)) | ||
| 3666 | break; | ||
| 3667 | |||
| 3668 | mutex_lock(&trace_types_lock); | ||
| 3669 | if (current_trace->set_flag) | ||
| 3670 | ret = current_trace->set_flag(topt->flags->val, | ||
| 3671 | topt->opt->bit, 0); | ||
| 3672 | mutex_unlock(&trace_types_lock); | ||
| 3673 | if (ret) | ||
| 3674 | return ret; | ||
| 3675 | topt->flags->val &= ~topt->opt->bit; | ||
| 3676 | break; | ||
| 3677 | case 1: | ||
| 3678 | /* do nothing if already set */ | ||
| 3679 | if (topt->flags->val & topt->opt->bit) | ||
| 3680 | break; | ||
| 3681 | |||
| 3682 | mutex_lock(&trace_types_lock); | ||
| 3683 | if (current_trace->set_flag) | ||
| 3684 | ret = current_trace->set_flag(topt->flags->val, | ||
| 3685 | topt->opt->bit, 1); | ||
| 3686 | mutex_unlock(&trace_types_lock); | ||
| 3687 | if (ret) | ||
| 3688 | return ret; | ||
| 3689 | topt->flags->val |= topt->opt->bit; | ||
| 3690 | break; | ||
| 3691 | |||
| 3692 | default: | ||
| 3693 | return -EINVAL; | ||
| 3694 | } | ||
| 3695 | |||
| 3696 | *ppos += cnt; | ||
| 3697 | |||
| 3698 | return cnt; | ||
| 3699 | } | ||
| 3700 | |||
| 3701 | |||
| 3702 | static const struct file_operations trace_options_fops = { | ||
| 3703 | .open = tracing_open_generic, | ||
| 3704 | .read = trace_options_read, | ||
| 3705 | .write = trace_options_write, | ||
| 3706 | }; | ||
| 3707 | |||
| 3708 | static ssize_t | ||
| 3709 | trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
| 3710 | loff_t *ppos) | ||
| 3711 | { | ||
| 3712 | long index = (long)filp->private_data; | ||
| 3713 | char *buf; | ||
| 3714 | |||
| 3715 | if (trace_flags & (1 << index)) | ||
| 3716 | buf = "1\n"; | ||
| 3717 | else | ||
| 3718 | buf = "0\n"; | ||
| 3719 | |||
| 3720 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | ||
| 3721 | } | ||
| 3722 | |||
| 3723 | static ssize_t | ||
| 3724 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
| 3725 | loff_t *ppos) | ||
| 3726 | { | ||
| 3727 | long index = (long)filp->private_data; | ||
| 3728 | char buf[64]; | ||
| 3729 | unsigned long val; | ||
| 3730 | int ret; | ||
| 3731 | |||
| 3732 | if (cnt >= sizeof(buf)) | ||
| 3733 | return -EINVAL; | ||
| 3734 | |||
| 3735 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 3736 | return -EFAULT; | ||
| 3737 | |||
| 3738 | buf[cnt] = 0; | ||
| 3739 | |||
| 3740 | ret = strict_strtoul(buf, 10, &val); | ||
| 3741 | if (ret < 0) | ||
| 3742 | return ret; | ||
| 3743 | |||
| 3744 | switch (val) { | ||
| 3745 | case 0: | ||
| 3746 | trace_flags &= ~(1 << index); | ||
| 3747 | break; | ||
| 3748 | case 1: | ||
| 3749 | trace_flags |= 1 << index; | ||
| 3750 | break; | ||
| 3751 | |||
| 3752 | default: | ||
| 3753 | return -EINVAL; | ||
| 3754 | } | ||
| 3755 | |||
| 3756 | *ppos += cnt; | ||
| 3757 | |||
| 3758 | return cnt; | ||
| 3759 | } | ||
| 3760 | |||
| 3761 | static const struct file_operations trace_options_core_fops = { | ||
| 3762 | .open = tracing_open_generic, | ||
| 3763 | .read = trace_options_core_read, | ||
| 3764 | .write = trace_options_core_write, | ||
| 3765 | }; | ||
| 3766 | |||
| 3767 | static struct dentry *trace_options_init_dentry(void) | ||
| 3768 | { | ||
| 3769 | struct dentry *d_tracer; | ||
| 3770 | static struct dentry *t_options; | ||
| 3771 | |||
| 3772 | if (t_options) | ||
| 3773 | return t_options; | ||
| 3774 | |||
| 3775 | d_tracer = tracing_init_dentry(); | ||
| 3776 | if (!d_tracer) | ||
| 3777 | return NULL; | ||
| 3778 | |||
| 3779 | t_options = debugfs_create_dir("options", d_tracer); | ||
| 3780 | if (!t_options) { | ||
| 3781 | pr_warning("Could not create debugfs directory 'options'\n"); | ||
| 3782 | return NULL; | ||
| 3783 | } | ||
| 3784 | |||
| 3785 | return t_options; | ||
| 3786 | } | ||
| 3787 | |||
| 3788 | static void | ||
| 3789 | create_trace_option_file(struct trace_option_dentry *topt, | ||
| 3790 | struct tracer_flags *flags, | ||
| 3791 | struct tracer_opt *opt) | ||
| 3792 | { | ||
| 3793 | struct dentry *t_options; | ||
| 3794 | struct dentry *entry; | ||
| 3795 | |||
| 3796 | t_options = trace_options_init_dentry(); | ||
| 3797 | if (!t_options) | ||
| 3798 | return; | ||
| 3799 | |||
| 3800 | topt->flags = flags; | ||
| 3801 | topt->opt = opt; | ||
| 3802 | |||
| 3803 | entry = debugfs_create_file(opt->name, 0644, t_options, topt, | ||
| 3804 | &trace_options_fops); | ||
| 3805 | |||
| 3806 | topt->entry = entry; | ||
| 3807 | |||
| 3808 | } | ||
| 3809 | |||
| 3810 | static struct trace_option_dentry * | ||
| 3811 | create_trace_option_files(struct tracer *tracer) | ||
| 3812 | { | ||
| 3813 | struct trace_option_dentry *topts; | ||
| 3814 | struct tracer_flags *flags; | ||
| 3815 | struct tracer_opt *opts; | ||
| 3816 | int cnt; | ||
| 3817 | |||
| 3818 | if (!tracer) | ||
| 3819 | return NULL; | ||
| 3820 | |||
| 3821 | flags = tracer->flags; | ||
| 3822 | |||
| 3823 | if (!flags || !flags->opts) | ||
| 3824 | return NULL; | ||
| 3825 | |||
| 3826 | opts = flags->opts; | ||
| 3827 | |||
| 3828 | for (cnt = 0; opts[cnt].name; cnt++) | ||
| 3829 | ; | ||
| 3830 | |||
| 3831 | topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); | ||
| 3832 | if (!topts) | ||
| 3833 | return NULL; | ||
| 3834 | |||
| 3835 | for (cnt = 0; opts[cnt].name; cnt++) | ||
| 3836 | create_trace_option_file(&topts[cnt], flags, | ||
| 3837 | &opts[cnt]); | ||
| 3838 | |||
| 3839 | return topts; | ||
| 3840 | } | ||
| 3841 | |||
| 3842 | static void | ||
| 3843 | destroy_trace_option_files(struct trace_option_dentry *topts) | ||
| 3844 | { | ||
| 3845 | int cnt; | ||
| 3846 | |||
| 3847 | if (!topts) | ||
| 3848 | return; | ||
| 3849 | |||
| 3850 | for (cnt = 0; topts[cnt].opt; cnt++) { | ||
| 3851 | if (topts[cnt].entry) | ||
| 3852 | debugfs_remove(topts[cnt].entry); | ||
| 3853 | } | ||
| 3854 | |||
| 3855 | kfree(topts); | ||
| 3856 | } | ||
| 3857 | |||
| 3858 | static struct dentry * | ||
| 3859 | create_trace_option_core_file(const char *option, long index) | ||
| 3860 | { | ||
| 3861 | struct dentry *t_options; | ||
| 3862 | struct dentry *entry; | ||
| 3863 | |||
| 3864 | t_options = trace_options_init_dentry(); | ||
| 3865 | if (!t_options) | ||
| 3866 | return NULL; | ||
| 3867 | |||
| 3868 | entry = debugfs_create_file(option, 0644, t_options, (void *)index, | ||
| 3869 | &trace_options_core_fops); | ||
| 3870 | |||
| 3871 | return entry; | ||
| 3872 | } | ||
| 3873 | |||
| 3874 | static __init void create_trace_options_dir(void) | ||
| 3875 | { | ||
| 3876 | struct dentry *t_options; | ||
| 3877 | struct dentry *entry; | ||
| 3878 | int i; | ||
| 3879 | |||
| 3880 | t_options = trace_options_init_dentry(); | ||
| 3881 | if (!t_options) | ||
| 3882 | return; | ||
| 3883 | |||
| 3884 | for (i = 0; trace_options[i]; i++) { | ||
| 3885 | entry = create_trace_option_core_file(trace_options[i], i); | ||
| 3886 | if (!entry) | ||
| 3887 | pr_warning("Could not create debugfs %s entry\n", | ||
| 3888 | trace_options[i]); | ||
| 3889 | } | ||
| 3890 | } | ||
| 3891 | |||
| 3534 | static __init int tracer_init_debugfs(void) | 3892 | static __init int tracer_init_debugfs(void) | 
| 3535 | { | 3893 | { | 
| 3536 | struct dentry *d_tracer; | 3894 | struct dentry *d_tracer; | 
| 3537 | struct dentry *entry; | 3895 | struct dentry *entry; | 
| 3896 | int cpu; | ||
| 3538 | 3897 | ||
| 3539 | d_tracer = tracing_init_dentry(); | 3898 | d_tracer = tracing_init_dentry(); | 
| 3540 | 3899 | ||
| @@ -3548,18 +3907,15 @@ static __init int tracer_init_debugfs(void) | |||
| 3548 | if (!entry) | 3907 | if (!entry) | 
| 3549 | pr_warning("Could not create debugfs 'trace_options' entry\n"); | 3908 | pr_warning("Could not create debugfs 'trace_options' entry\n"); | 
| 3550 | 3909 | ||
| 3910 | create_trace_options_dir(); | ||
| 3911 | |||
| 3551 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 3912 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 
| 3552 | NULL, &tracing_cpumask_fops); | 3913 | NULL, &tracing_cpumask_fops); | 
| 3553 | if (!entry) | 3914 | if (!entry) | 
| 3554 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 3915 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 
| 3555 | 3916 | ||
| 3556 | entry = debugfs_create_file("latency_trace", 0444, d_tracer, | 3917 | entry = debugfs_create_file("trace", 0644, d_tracer, | 
| 3557 | &global_trace, &tracing_lt_fops); | 3918 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 
| 3558 | if (!entry) | ||
| 3559 | pr_warning("Could not create debugfs 'latency_trace' entry\n"); | ||
| 3560 | |||
| 3561 | entry = debugfs_create_file("trace", 0444, d_tracer, | ||
| 3562 | &global_trace, &tracing_fops); | ||
| 3563 | if (!entry) | 3919 | if (!entry) | 
| 3564 | pr_warning("Could not create debugfs 'trace' entry\n"); | 3920 | pr_warning("Could not create debugfs 'trace' entry\n"); | 
| 3565 | 3921 | ||
| @@ -3590,8 +3946,8 @@ static __init int tracer_init_debugfs(void) | |||
| 3590 | if (!entry) | 3946 | if (!entry) | 
| 3591 | pr_warning("Could not create debugfs 'README' entry\n"); | 3947 | pr_warning("Could not create debugfs 'README' entry\n"); | 
| 3592 | 3948 | ||
| 3593 | entry = debugfs_create_file("trace_pipe", 0644, d_tracer, | 3949 | entry = debugfs_create_file("trace_pipe", 0444, d_tracer, | 
| 3594 | NULL, &tracing_pipe_fops); | 3950 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 
| 3595 | if (!entry) | 3951 | if (!entry) | 
| 3596 | pr_warning("Could not create debugfs " | 3952 | pr_warning("Could not create debugfs " | 
| 3597 | "'trace_pipe' entry\n"); | 3953 | "'trace_pipe' entry\n"); | 
| @@ -3619,77 +3975,12 @@ static __init int tracer_init_debugfs(void) | |||
| 3619 | #ifdef CONFIG_SYSPROF_TRACER | 3975 | #ifdef CONFIG_SYSPROF_TRACER | 
| 3620 | init_tracer_sysprof_debugfs(d_tracer); | 3976 | init_tracer_sysprof_debugfs(d_tracer); | 
| 3621 | #endif | 3977 | #endif | 
| 3622 | return 0; | ||
| 3623 | } | ||
| 3624 | |||
| 3625 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | ||
| 3626 | { | ||
| 3627 | static DEFINE_SPINLOCK(trace_buf_lock); | ||
| 3628 | static char trace_buf[TRACE_BUF_SIZE]; | ||
| 3629 | |||
| 3630 | struct ring_buffer_event *event; | ||
| 3631 | struct trace_array *tr = &global_trace; | ||
| 3632 | struct trace_array_cpu *data; | ||
| 3633 | int cpu, len = 0, size, pc; | ||
| 3634 | struct print_entry *entry; | ||
| 3635 | unsigned long irq_flags; | ||
| 3636 | |||
| 3637 | if (tracing_disabled || tracing_selftest_running) | ||
| 3638 | return 0; | ||
| 3639 | |||
| 3640 | pc = preempt_count(); | ||
| 3641 | preempt_disable_notrace(); | ||
| 3642 | cpu = raw_smp_processor_id(); | ||
| 3643 | data = tr->data[cpu]; | ||
| 3644 | |||
| 3645 | if (unlikely(atomic_read(&data->disabled))) | ||
| 3646 | goto out; | ||
| 3647 | |||
| 3648 | pause_graph_tracing(); | ||
| 3649 | spin_lock_irqsave(&trace_buf_lock, irq_flags); | ||
| 3650 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
| 3651 | |||
| 3652 | len = min(len, TRACE_BUF_SIZE-1); | ||
| 3653 | trace_buf[len] = 0; | ||
| 3654 | |||
| 3655 | size = sizeof(*entry) + len + 1; | ||
| 3656 | event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); | ||
| 3657 | if (!event) | ||
| 3658 | goto out_unlock; | ||
| 3659 | entry = ring_buffer_event_data(event); | ||
| 3660 | tracing_generic_entry_update(&entry->ent, irq_flags, pc); | ||
| 3661 | entry->ent.type = TRACE_PRINT; | ||
| 3662 | entry->ip = ip; | ||
| 3663 | entry->depth = depth; | ||
| 3664 | |||
| 3665 | memcpy(&entry->buf, trace_buf, len); | ||
| 3666 | entry->buf[len] = 0; | ||
| 3667 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
| 3668 | |||
| 3669 | out_unlock: | ||
| 3670 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); | ||
| 3671 | unpause_graph_tracing(); | ||
| 3672 | out: | ||
| 3673 | preempt_enable_notrace(); | ||
| 3674 | |||
| 3675 | return len; | ||
| 3676 | } | ||
| 3677 | EXPORT_SYMBOL_GPL(trace_vprintk); | ||
| 3678 | 3978 | ||
| 3679 | int __ftrace_printk(unsigned long ip, const char *fmt, ...) | 3979 | for_each_tracing_cpu(cpu) | 
| 3680 | { | 3980 | tracing_init_debugfs_percpu(cpu); | 
| 3681 | int ret; | ||
| 3682 | va_list ap; | ||
| 3683 | |||
| 3684 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
| 3685 | return 0; | ||
| 3686 | 3981 | ||
| 3687 | va_start(ap, fmt); | 3982 | return 0; | 
| 3688 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
| 3689 | va_end(ap); | ||
| 3690 | return ret; | ||
| 3691 | } | 3983 | } | 
| 3692 | EXPORT_SYMBOL_GPL(__ftrace_printk); | ||
| 3693 | 3984 | ||
| 3694 | static int trace_panic_handler(struct notifier_block *this, | 3985 | static int trace_panic_handler(struct notifier_block *this, | 
| 3695 | unsigned long event, void *unused) | 3986 | unsigned long event, void *unused) | 
| @@ -3750,14 +4041,15 @@ trace_printk_seq(struct trace_seq *s) | |||
| 3750 | 4041 | ||
| 3751 | printk(KERN_TRACE "%s", s->buffer); | 4042 | printk(KERN_TRACE "%s", s->buffer); | 
| 3752 | 4043 | ||
| 3753 | trace_seq_reset(s); | 4044 | trace_seq_init(s); | 
| 3754 | } | 4045 | } | 
| 3755 | 4046 | ||
| 3756 | void ftrace_dump(void) | 4047 | static void __ftrace_dump(bool disable_tracing) | 
| 3757 | { | 4048 | { | 
| 3758 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 4049 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 
| 3759 | /* use static because iter can be a bit big for the stack */ | 4050 | /* use static because iter can be a bit big for the stack */ | 
| 3760 | static struct trace_iterator iter; | 4051 | static struct trace_iterator iter; | 
| 4052 | unsigned int old_userobj; | ||
| 3761 | static int dump_ran; | 4053 | static int dump_ran; | 
| 3762 | unsigned long flags; | 4054 | unsigned long flags; | 
| 3763 | int cnt = 0, cpu; | 4055 | int cnt = 0, cpu; | 
| @@ -3769,21 +4061,26 @@ void ftrace_dump(void) | |||
| 3769 | 4061 | ||
| 3770 | dump_ran = 1; | 4062 | dump_ran = 1; | 
| 3771 | 4063 | ||
| 3772 | /* No turning back! */ | ||
| 3773 | tracing_off(); | 4064 | tracing_off(); | 
| 3774 | ftrace_kill(); | 4065 | |
| 4066 | if (disable_tracing) | ||
| 4067 | ftrace_kill(); | ||
| 3775 | 4068 | ||
| 3776 | for_each_tracing_cpu(cpu) { | 4069 | for_each_tracing_cpu(cpu) { | 
| 3777 | atomic_inc(&global_trace.data[cpu]->disabled); | 4070 | atomic_inc(&global_trace.data[cpu]->disabled); | 
| 3778 | } | 4071 | } | 
| 3779 | 4072 | ||
| 4073 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | ||
| 4074 | |||
| 3780 | /* don't look at user memory in panic mode */ | 4075 | /* don't look at user memory in panic mode */ | 
| 3781 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4076 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 
| 3782 | 4077 | ||
| 3783 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 4078 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 
| 3784 | 4079 | ||
| 4080 | /* Simulate the iterator */ | ||
| 3785 | iter.tr = &global_trace; | 4081 | iter.tr = &global_trace; | 
| 3786 | iter.trace = current_trace; | 4082 | iter.trace = current_trace; | 
| 4083 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
| 3787 | 4084 | ||
| 3788 | /* | 4085 | /* | 
| 3789 | * We need to stop all tracing on all CPUS to read the | 4086 | * We need to stop all tracing on all CPUS to read the | 
| @@ -3819,13 +4116,30 @@ void ftrace_dump(void) | |||
| 3819 | else | 4116 | else | 
| 3820 | printk(KERN_TRACE "---------------------------------\n"); | 4117 | printk(KERN_TRACE "---------------------------------\n"); | 
| 3821 | 4118 | ||
| 4119 | /* Re-enable tracing if requested */ | ||
| 4120 | if (!disable_tracing) { | ||
| 4121 | trace_flags |= old_userobj; | ||
| 4122 | |||
| 4123 | for_each_tracing_cpu(cpu) { | ||
| 4124 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
| 4125 | } | ||
| 4126 | tracing_on(); | ||
| 4127 | } | ||
| 4128 | |||
| 3822 | out: | 4129 | out: | 
| 3823 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | 4130 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | 
| 3824 | } | 4131 | } | 
| 3825 | 4132 | ||
| 4133 | /* By default: disable tracing after the dump */ | ||
| 4134 | void ftrace_dump(void) | ||
| 4135 | { | ||
| 4136 | __ftrace_dump(true); | ||
| 4137 | } | ||
| 4138 | |||
| 3826 | __init static int tracer_alloc_buffers(void) | 4139 | __init static int tracer_alloc_buffers(void) | 
| 3827 | { | 4140 | { | 
| 3828 | struct trace_array_cpu *data; | 4141 | struct trace_array_cpu *data; | 
| 4142 | int ring_buf_size; | ||
| 3829 | int i; | 4143 | int i; | 
| 3830 | int ret = -ENOMEM; | 4144 | int ret = -ENOMEM; | 
| 3831 | 4145 | ||
| @@ -3835,11 +4149,21 @@ __init static int tracer_alloc_buffers(void) | |||
| 3835 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4149 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 
| 3836 | goto out_free_buffer_mask; | 4150 | goto out_free_buffer_mask; | 
| 3837 | 4151 | ||
| 4152 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | ||
| 4153 | goto out_free_tracing_cpumask; | ||
| 4154 | |||
| 4155 | /* To save memory, keep the ring buffer size to its minimum */ | ||
| 4156 | if (ring_buffer_expanded) | ||
| 4157 | ring_buf_size = trace_buf_size; | ||
| 4158 | else | ||
| 4159 | ring_buf_size = 1; | ||
| 4160 | |||
| 3838 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4161 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 
| 3839 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4162 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 
| 4163 | cpumask_clear(tracing_reader_cpumask); | ||
| 3840 | 4164 | ||
| 3841 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4165 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 
| 3842 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, | 4166 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 
| 3843 | TRACE_BUFFER_FLAGS); | 4167 | TRACE_BUFFER_FLAGS); | 
| 3844 | if (!global_trace.buffer) { | 4168 | if (!global_trace.buffer) { | 
| 3845 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 4169 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 
| @@ -3850,7 +4174,7 @@ __init static int tracer_alloc_buffers(void) | |||
| 3850 | 4174 | ||
| 3851 | 4175 | ||
| 3852 | #ifdef CONFIG_TRACER_MAX_TRACE | 4176 | #ifdef CONFIG_TRACER_MAX_TRACE | 
| 3853 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, | 4177 | max_tr.buffer = ring_buffer_alloc(ring_buf_size, | 
| 3854 | TRACE_BUFFER_FLAGS); | 4178 | TRACE_BUFFER_FLAGS); | 
| 3855 | if (!max_tr.buffer) { | 4179 | if (!max_tr.buffer) { | 
| 3856 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 4180 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 
| @@ -3871,14 +4195,10 @@ __init static int tracer_alloc_buffers(void) | |||
| 3871 | trace_init_cmdlines(); | 4195 | trace_init_cmdlines(); | 
| 3872 | 4196 | ||
| 3873 | register_tracer(&nop_trace); | 4197 | register_tracer(&nop_trace); | 
| 4198 | current_trace = &nop_trace; | ||
| 3874 | #ifdef CONFIG_BOOT_TRACER | 4199 | #ifdef CONFIG_BOOT_TRACER | 
| 3875 | register_tracer(&boot_tracer); | 4200 | register_tracer(&boot_tracer); | 
| 3876 | current_trace = &boot_tracer; | ||
| 3877 | current_trace->init(&global_trace); | ||
| 3878 | #else | ||
| 3879 | current_trace = &nop_trace; | ||
| 3880 | #endif | 4201 | #endif | 
| 3881 | |||
| 3882 | /* All seems OK, enable tracing */ | 4202 | /* All seems OK, enable tracing */ | 
| 3883 | tracing_disabled = 0; | 4203 | tracing_disabled = 0; | 
| 3884 | 4204 | ||
| @@ -3886,14 +4206,38 @@ __init static int tracer_alloc_buffers(void) | |||
| 3886 | &trace_panic_notifier); | 4206 | &trace_panic_notifier); | 
| 3887 | 4207 | ||
| 3888 | register_die_notifier(&trace_die_notifier); | 4208 | register_die_notifier(&trace_die_notifier); | 
| 3889 | ret = 0; | 4209 | |
| 4210 | return 0; | ||
| 3890 | 4211 | ||
| 3891 | out_free_cpumask: | 4212 | out_free_cpumask: | 
| 4213 | free_cpumask_var(tracing_reader_cpumask); | ||
| 4214 | out_free_tracing_cpumask: | ||
| 3892 | free_cpumask_var(tracing_cpumask); | 4215 | free_cpumask_var(tracing_cpumask); | 
| 3893 | out_free_buffer_mask: | 4216 | out_free_buffer_mask: | 
| 3894 | free_cpumask_var(tracing_buffer_mask); | 4217 | free_cpumask_var(tracing_buffer_mask); | 
| 3895 | out: | 4218 | out: | 
| 3896 | return ret; | 4219 | return ret; | 
| 3897 | } | 4220 | } | 
| 4221 | |||
| 4222 | __init static int clear_boot_tracer(void) | ||
| 4223 | { | ||
| 4224 | /* | ||
| 4225 | * The default tracer at boot buffer is an init section. | ||
| 4226 | * This function is called in lateinit. If we did not | ||
| 4227 | * find the boot tracer, then clear it out, to prevent | ||
| 4228 | * later registration from accessing the buffer that is | ||
| 4229 | * about to be freed. | ||
| 4230 | */ | ||
| 4231 | if (!default_bootup_tracer) | ||
| 4232 | return 0; | ||
| 4233 | |||
| 4234 | printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | ||
| 4235 | default_bootup_tracer); | ||
| 4236 | default_bootup_tracer = NULL; | ||
| 4237 | |||
| 4238 | return 0; | ||
| 4239 | } | ||
| 4240 | |||
| 3898 | early_initcall(tracer_alloc_buffers); | 4241 | early_initcall(tracer_alloc_buffers); | 
| 3899 | fs_initcall(tracer_init_debugfs); | 4242 | fs_initcall(tracer_init_debugfs); | 
| 4243 | late_initcall(clear_boot_tracer); | ||
