diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 903 |
1 files changed, 501 insertions, 402 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8c358395d338..874f2893cff0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -43,14 +43,11 @@ | |||
| 43 | 43 | ||
| 44 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 44 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
| 45 | 45 | ||
| 46 | unsigned long __read_mostly tracing_max_latency; | ||
| 47 | unsigned long __read_mostly tracing_thresh; | ||
| 48 | |||
| 49 | /* | 46 | /* |
| 50 | * On boot up, the ring buffer is set to the minimum size, so that | 47 | * On boot up, the ring buffer is set to the minimum size, so that |
| 51 | * we do not waste memory on systems that are not using tracing. | 48 | * we do not waste memory on systems that are not using tracing. |
| 52 | */ | 49 | */ |
| 53 | static int ring_buffer_expanded; | 50 | int ring_buffer_expanded; |
| 54 | 51 | ||
| 55 | /* | 52 | /* |
| 56 | * We need to change this state when a selftest is running. | 53 | * We need to change this state when a selftest is running. |
| @@ -64,7 +61,7 @@ static bool __read_mostly tracing_selftest_running; | |||
| 64 | /* | 61 | /* |
| 65 | * If a tracer is running, we do not want to run SELFTEST. | 62 | * If a tracer is running, we do not want to run SELFTEST. |
| 66 | */ | 63 | */ |
| 67 | static bool __read_mostly tracing_selftest_disabled; | 64 | bool __read_mostly tracing_selftest_disabled; |
| 68 | 65 | ||
| 69 | /* For tracers that don't implement custom flags */ | 66 | /* For tracers that don't implement custom flags */ |
| 70 | static struct tracer_opt dummy_tracer_opt[] = { | 67 | static struct tracer_opt dummy_tracer_opt[] = { |
| @@ -89,7 +86,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
| 89 | */ | 86 | */ |
| 90 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
| 91 | 88 | ||
| 92 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); |
| 93 | 90 | ||
| 94 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
| 95 | { | 92 | { |
| @@ -128,19 +125,19 @@ int ftrace_dump_on_oops; | |||
| 128 | 125 | ||
| 129 | static int tracing_set_tracer(const char *buf); | 126 | static int tracing_set_tracer(const char *buf); |
| 130 | 127 | ||
| 131 | #define BOOTUP_TRACER_SIZE 100 | 128 | #define MAX_TRACER_SIZE 100 |
| 132 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | 129 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
| 133 | static char *default_bootup_tracer; | 130 | static char *default_bootup_tracer; |
| 134 | 131 | ||
| 135 | static int __init set_ftrace(char *str) | 132 | static int __init set_cmdline_ftrace(char *str) |
| 136 | { | 133 | { |
| 137 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); | 134 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
| 138 | default_bootup_tracer = bootup_tracer_buf; | 135 | default_bootup_tracer = bootup_tracer_buf; |
| 139 | /* We are using ftrace early, expand it */ | 136 | /* We are using ftrace early, expand it */ |
| 140 | ring_buffer_expanded = 1; | 137 | ring_buffer_expanded = 1; |
| 141 | return 1; | 138 | return 1; |
| 142 | } | 139 | } |
| 143 | __setup("ftrace=", set_ftrace); | 140 | __setup("ftrace=", set_cmdline_ftrace); |
| 144 | 141 | ||
| 145 | static int __init set_ftrace_dump_on_oops(char *str) | 142 | static int __init set_ftrace_dump_on_oops(char *str) |
| 146 | { | 143 | { |
| @@ -172,10 +169,11 @@ static struct trace_array global_trace; | |||
| 172 | 169 | ||
| 173 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 170 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
| 174 | 171 | ||
| 175 | int filter_current_check_discard(struct ftrace_event_call *call, void *rec, | 172 | int filter_current_check_discard(struct ring_buffer *buffer, |
| 173 | struct ftrace_event_call *call, void *rec, | ||
| 176 | struct ring_buffer_event *event) | 174 | struct ring_buffer_event *event) |
| 177 | { | 175 | { |
| 178 | return filter_check_discard(call, rec, global_trace.buffer, event); | 176 | return filter_check_discard(call, rec, buffer, event); |
| 179 | } | 177 | } |
| 180 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 178 | EXPORT_SYMBOL_GPL(filter_current_check_discard); |
| 181 | 179 | ||
| @@ -244,13 +242,6 @@ static struct tracer *trace_types __read_mostly; | |||
| 244 | static struct tracer *current_trace __read_mostly; | 242 | static struct tracer *current_trace __read_mostly; |
| 245 | 243 | ||
| 246 | /* | 244 | /* |
| 247 | * max_tracer_type_len is used to simplify the allocating of | ||
| 248 | * buffers to read userspace tracer names. We keep track of | ||
| 249 | * the longest tracer name registered. | ||
| 250 | */ | ||
| 251 | static int max_tracer_type_len; | ||
| 252 | |||
| 253 | /* | ||
| 254 | * trace_types_lock is used to protect the trace_types list. | 245 | * trace_types_lock is used to protect the trace_types list. |
| 255 | * This lock is also used to keep user access serialized. | 246 | * This lock is also used to keep user access serialized. |
| 256 | * Accesses from userspace will grab this lock while userspace | 247 | * Accesses from userspace will grab this lock while userspace |
| @@ -266,6 +257,9 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
| 266 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 257 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
| 267 | TRACE_ITER_GRAPH_TIME; | 258 | TRACE_ITER_GRAPH_TIME; |
| 268 | 259 | ||
| 260 | static int trace_stop_count; | ||
| 261 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
| 262 | |||
| 269 | /** | 263 | /** |
| 270 | * trace_wake_up - wake up tasks waiting for trace input | 264 | * trace_wake_up - wake up tasks waiting for trace input |
| 271 | * | 265 | * |
| @@ -274,12 +268,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
| 274 | */ | 268 | */ |
| 275 | void trace_wake_up(void) | 269 | void trace_wake_up(void) |
| 276 | { | 270 | { |
| 271 | int cpu; | ||
| 272 | |||
| 273 | if (trace_flags & TRACE_ITER_BLOCK) | ||
| 274 | return; | ||
| 277 | /* | 275 | /* |
| 278 | * The runqueue_is_locked() can fail, but this is the best we | 276 | * The runqueue_is_locked() can fail, but this is the best we |
| 279 | * have for now: | 277 | * have for now: |
| 280 | */ | 278 | */ |
| 281 | if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) | 279 | cpu = get_cpu(); |
| 280 | if (!runqueue_is_locked(cpu)) | ||
| 282 | wake_up(&trace_wait); | 281 | wake_up(&trace_wait); |
| 282 | put_cpu(); | ||
| 283 | } | 283 | } |
| 284 | 284 | ||
| 285 | static int __init set_buf_size(char *str) | 285 | static int __init set_buf_size(char *str) |
| @@ -323,49 +323,125 @@ static const char *trace_options[] = { | |||
| 323 | "printk-msg-only", | 323 | "printk-msg-only", |
| 324 | "context-info", | 324 | "context-info", |
| 325 | "latency-format", | 325 | "latency-format", |
| 326 | "global-clock", | ||
| 327 | "sleep-time", | 326 | "sleep-time", |
| 328 | "graph-time", | 327 | "graph-time", |
| 329 | NULL | 328 | NULL |
| 330 | }; | 329 | }; |
| 331 | 330 | ||
| 331 | static struct { | ||
| 332 | u64 (*func)(void); | ||
| 333 | const char *name; | ||
| 334 | } trace_clocks[] = { | ||
| 335 | { trace_clock_local, "local" }, | ||
| 336 | { trace_clock_global, "global" }, | ||
| 337 | }; | ||
| 338 | |||
| 339 | int trace_clock_id; | ||
| 340 | |||
| 332 | /* | 341 | /* |
| 333 | * ftrace_max_lock is used to protect the swapping of buffers | 342 | * trace_parser_get_init - gets the buffer for trace parser |
| 334 | * when taking a max snapshot. The buffers themselves are | ||
| 335 | * protected by per_cpu spinlocks. But the action of the swap | ||
| 336 | * needs its own lock. | ||
| 337 | * | ||
| 338 | * This is defined as a raw_spinlock_t in order to help | ||
| 339 | * with performance when lockdep debugging is enabled. | ||
| 340 | */ | 343 | */ |
| 341 | static raw_spinlock_t ftrace_max_lock = | 344 | int trace_parser_get_init(struct trace_parser *parser, int size) |
| 342 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 345 | { |
| 346 | memset(parser, 0, sizeof(*parser)); | ||
| 347 | |||
| 348 | parser->buffer = kmalloc(size, GFP_KERNEL); | ||
| 349 | if (!parser->buffer) | ||
| 350 | return 1; | ||
| 351 | |||
| 352 | parser->size = size; | ||
| 353 | return 0; | ||
| 354 | } | ||
| 343 | 355 | ||
| 344 | /* | 356 | /* |
| 345 | * Copy the new maximum trace into the separate maximum-trace | 357 | * trace_parser_put - frees the buffer for trace parser |
| 346 | * structure. (this way the maximum trace is permanently saved, | ||
| 347 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | ||
| 348 | */ | 358 | */ |
| 349 | static void | 359 | void trace_parser_put(struct trace_parser *parser) |
| 350 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
| 351 | { | 360 | { |
| 352 | struct trace_array_cpu *data = tr->data[cpu]; | 361 | kfree(parser->buffer); |
| 362 | } | ||
| 353 | 363 | ||
| 354 | max_tr.cpu = cpu; | 364 | /* |
| 355 | max_tr.time_start = data->preempt_timestamp; | 365 | * trace_get_user - reads the user input string separated by space |
| 366 | * (matched by isspace(ch)) | ||
| 367 | * | ||
| 368 | * For each string found the 'struct trace_parser' is updated, | ||
| 369 | * and the function returns. | ||
| 370 | * | ||
| 371 | * Returns number of bytes read. | ||
| 372 | * | ||
| 373 | * See kernel/trace/trace.h for 'struct trace_parser' details. | ||
| 374 | */ | ||
| 375 | int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | ||
| 376 | size_t cnt, loff_t *ppos) | ||
| 377 | { | ||
| 378 | char ch; | ||
| 379 | size_t read = 0; | ||
| 380 | ssize_t ret; | ||
| 356 | 381 | ||
| 357 | data = max_tr.data[cpu]; | 382 | if (!*ppos) |
| 358 | data->saved_latency = tracing_max_latency; | 383 | trace_parser_clear(parser); |
| 359 | 384 | ||
| 360 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 385 | ret = get_user(ch, ubuf++); |
| 361 | data->pid = tsk->pid; | 386 | if (ret) |
| 362 | data->uid = task_uid(tsk); | 387 | goto out; |
| 363 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | ||
| 364 | data->policy = tsk->policy; | ||
| 365 | data->rt_priority = tsk->rt_priority; | ||
| 366 | 388 | ||
| 367 | /* record this tasks comm */ | 389 | read++; |
| 368 | tracing_record_cmdline(tsk); | 390 | cnt--; |
| 391 | |||
| 392 | /* | ||
| 393 | * The parser is not finished with the last write, | ||
| 394 | * continue reading the user input without skipping spaces. | ||
| 395 | */ | ||
| 396 | if (!parser->cont) { | ||
| 397 | /* skip white space */ | ||
| 398 | while (cnt && isspace(ch)) { | ||
| 399 | ret = get_user(ch, ubuf++); | ||
| 400 | if (ret) | ||
| 401 | goto out; | ||
| 402 | read++; | ||
| 403 | cnt--; | ||
| 404 | } | ||
| 405 | |||
| 406 | /* only spaces were written */ | ||
| 407 | if (isspace(ch)) { | ||
| 408 | *ppos += read; | ||
| 409 | ret = read; | ||
| 410 | goto out; | ||
| 411 | } | ||
| 412 | |||
| 413 | parser->idx = 0; | ||
| 414 | } | ||
| 415 | |||
| 416 | /* read the non-space input */ | ||
| 417 | while (cnt && !isspace(ch)) { | ||
| 418 | if (parser->idx < parser->size - 1) | ||
| 419 | parser->buffer[parser->idx++] = ch; | ||
| 420 | else { | ||
| 421 | ret = -EINVAL; | ||
| 422 | goto out; | ||
| 423 | } | ||
| 424 | ret = get_user(ch, ubuf++); | ||
| 425 | if (ret) | ||
| 426 | goto out; | ||
| 427 | read++; | ||
| 428 | cnt--; | ||
| 429 | } | ||
| 430 | |||
| 431 | /* We either got finished input or we have to wait for another call. */ | ||
| 432 | if (isspace(ch)) { | ||
| 433 | parser->buffer[parser->idx] = 0; | ||
| 434 | parser->cont = false; | ||
| 435 | } else { | ||
| 436 | parser->cont = true; | ||
| 437 | parser->buffer[parser->idx++] = ch; | ||
| 438 | } | ||
| 439 | |||
| 440 | *ppos += read; | ||
| 441 | ret = read; | ||
| 442 | |||
| 443 | out: | ||
| 444 | return ret; | ||
| 369 | } | 445 | } |
| 370 | 446 | ||
| 371 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 447 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) |
| @@ -411,6 +487,56 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 411 | return cnt; | 487 | return cnt; |
| 412 | } | 488 | } |
| 413 | 489 | ||
| 490 | /* | ||
| 491 | * ftrace_max_lock is used to protect the swapping of buffers | ||
| 492 | * when taking a max snapshot. The buffers themselves are | ||
| 493 | * protected by per_cpu spinlocks. But the action of the swap | ||
| 494 | * needs its own lock. | ||
| 495 | * | ||
| 496 | * This is defined as a raw_spinlock_t in order to help | ||
| 497 | * with performance when lockdep debugging is enabled. | ||
| 498 | * | ||
| 499 | * It is also used in other places outside the update_max_tr | ||
| 500 | * so it needs to be defined outside of the | ||
| 501 | * CONFIG_TRACER_MAX_TRACE. | ||
| 502 | */ | ||
| 503 | static raw_spinlock_t ftrace_max_lock = | ||
| 504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
| 505 | |||
| 506 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
| 507 | unsigned long __read_mostly tracing_max_latency; | ||
| 508 | unsigned long __read_mostly tracing_thresh; | ||
| 509 | |||
| 510 | /* | ||
| 511 | * Copy the new maximum trace into the separate maximum-trace | ||
| 512 | * structure. (this way the maximum trace is permanently saved, | ||
| 513 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | ||
| 514 | */ | ||
| 515 | static void | ||
| 516 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | ||
| 517 | { | ||
| 518 | struct trace_array_cpu *data = tr->data[cpu]; | ||
| 519 | struct trace_array_cpu *max_data = tr->data[cpu]; | ||
| 520 | |||
| 521 | max_tr.cpu = cpu; | ||
| 522 | max_tr.time_start = data->preempt_timestamp; | ||
| 523 | |||
| 524 | max_data = max_tr.data[cpu]; | ||
| 525 | max_data->saved_latency = tracing_max_latency; | ||
| 526 | max_data->critical_start = data->critical_start; | ||
| 527 | max_data->critical_end = data->critical_end; | ||
| 528 | |||
| 529 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | ||
| 530 | max_data->pid = tsk->pid; | ||
| 531 | max_data->uid = task_uid(tsk); | ||
| 532 | max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | ||
| 533 | max_data->policy = tsk->policy; | ||
| 534 | max_data->rt_priority = tsk->rt_priority; | ||
| 535 | |||
| 536 | /* record this tasks comm */ | ||
| 537 | tracing_record_cmdline(tsk); | ||
| 538 | } | ||
| 539 | |||
| 414 | /** | 540 | /** |
| 415 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | 541 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr |
| 416 | * @tr: tracer | 542 | * @tr: tracer |
| @@ -425,16 +551,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 425 | { | 551 | { |
| 426 | struct ring_buffer *buf = tr->buffer; | 552 | struct ring_buffer *buf = tr->buffer; |
| 427 | 553 | ||
| 554 | if (trace_stop_count) | ||
| 555 | return; | ||
| 556 | |||
| 428 | WARN_ON_ONCE(!irqs_disabled()); | 557 | WARN_ON_ONCE(!irqs_disabled()); |
| 429 | __raw_spin_lock(&ftrace_max_lock); | 558 | __raw_spin_lock(&ftrace_max_lock); |
| 430 | 559 | ||
| 431 | tr->buffer = max_tr.buffer; | 560 | tr->buffer = max_tr.buffer; |
| 432 | max_tr.buffer = buf; | 561 | max_tr.buffer = buf; |
| 433 | 562 | ||
| 434 | ftrace_disable_cpu(); | ||
| 435 | ring_buffer_reset(tr->buffer); | ||
| 436 | ftrace_enable_cpu(); | ||
| 437 | |||
| 438 | __update_max_tr(tr, tsk, cpu); | 563 | __update_max_tr(tr, tsk, cpu); |
| 439 | __raw_spin_unlock(&ftrace_max_lock); | 564 | __raw_spin_unlock(&ftrace_max_lock); |
| 440 | } | 565 | } |
| @@ -452,21 +577,35 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 452 | { | 577 | { |
| 453 | int ret; | 578 | int ret; |
| 454 | 579 | ||
| 580 | if (trace_stop_count) | ||
| 581 | return; | ||
| 582 | |||
| 455 | WARN_ON_ONCE(!irqs_disabled()); | 583 | WARN_ON_ONCE(!irqs_disabled()); |
| 456 | __raw_spin_lock(&ftrace_max_lock); | 584 | __raw_spin_lock(&ftrace_max_lock); |
| 457 | 585 | ||
| 458 | ftrace_disable_cpu(); | 586 | ftrace_disable_cpu(); |
| 459 | 587 | ||
| 460 | ring_buffer_reset(max_tr.buffer); | ||
| 461 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | 588 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); |
| 462 | 589 | ||
| 590 | if (ret == -EBUSY) { | ||
| 591 | /* | ||
| 592 | * We failed to swap the buffer due to a commit taking | ||
| 593 | * place on this CPU. We fail to record, but we reset | ||
| 594 | * the max trace buffer (no one writes directly to it) | ||
| 595 | * and flag that it failed. | ||
| 596 | */ | ||
| 597 | trace_array_printk(&max_tr, _THIS_IP_, | ||
| 598 | "Failed to swap buffers due to commit in progress\n"); | ||
| 599 | } | ||
| 600 | |||
| 463 | ftrace_enable_cpu(); | 601 | ftrace_enable_cpu(); |
| 464 | 602 | ||
| 465 | WARN_ON_ONCE(ret && ret != -EAGAIN); | 603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
| 466 | 604 | ||
| 467 | __update_max_tr(tr, tsk, cpu); | 605 | __update_max_tr(tr, tsk, cpu); |
| 468 | __raw_spin_unlock(&ftrace_max_lock); | 606 | __raw_spin_unlock(&ftrace_max_lock); |
| 469 | } | 607 | } |
| 608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | ||
| 470 | 609 | ||
| 471 | /** | 610 | /** |
| 472 | * register_tracer - register a tracer with the ftrace system. | 611 | * register_tracer - register a tracer with the ftrace system. |
| @@ -479,7 +618,6 @@ __releases(kernel_lock) | |||
| 479 | __acquires(kernel_lock) | 618 | __acquires(kernel_lock) |
| 480 | { | 619 | { |
| 481 | struct tracer *t; | 620 | struct tracer *t; |
| 482 | int len; | ||
| 483 | int ret = 0; | 621 | int ret = 0; |
| 484 | 622 | ||
| 485 | if (!type->name) { | 623 | if (!type->name) { |
| @@ -487,6 +625,11 @@ __acquires(kernel_lock) | |||
| 487 | return -1; | 625 | return -1; |
| 488 | } | 626 | } |
| 489 | 627 | ||
| 628 | if (strlen(type->name) > MAX_TRACER_SIZE) { | ||
| 629 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | ||
| 630 | return -1; | ||
| 631 | } | ||
| 632 | |||
| 490 | /* | 633 | /* |
| 491 | * When this gets called we hold the BKL which means that | 634 | * When this gets called we hold the BKL which means that |
| 492 | * preemption is disabled. Various trace selftests however | 635 | * preemption is disabled. Various trace selftests however |
| @@ -501,7 +644,7 @@ __acquires(kernel_lock) | |||
| 501 | for (t = trace_types; t; t = t->next) { | 644 | for (t = trace_types; t; t = t->next) { |
| 502 | if (strcmp(type->name, t->name) == 0) { | 645 | if (strcmp(type->name, t->name) == 0) { |
| 503 | /* already found */ | 646 | /* already found */ |
| 504 | pr_info("Trace %s already registered\n", | 647 | pr_info("Tracer %s already registered\n", |
| 505 | type->name); | 648 | type->name); |
| 506 | ret = -1; | 649 | ret = -1; |
| 507 | goto out; | 650 | goto out; |
| @@ -523,7 +666,6 @@ __acquires(kernel_lock) | |||
| 523 | if (type->selftest && !tracing_selftest_disabled) { | 666 | if (type->selftest && !tracing_selftest_disabled) { |
| 524 | struct tracer *saved_tracer = current_trace; | 667 | struct tracer *saved_tracer = current_trace; |
| 525 | struct trace_array *tr = &global_trace; | 668 | struct trace_array *tr = &global_trace; |
| 526 | int i; | ||
| 527 | 669 | ||
| 528 | /* | 670 | /* |
| 529 | * Run a selftest on this tracer. | 671 | * Run a selftest on this tracer. |
| @@ -532,8 +674,7 @@ __acquires(kernel_lock) | |||
| 532 | * internal tracing to verify that everything is in order. | 674 | * internal tracing to verify that everything is in order. |
| 533 | * If we fail, we do not register this tracer. | 675 | * If we fail, we do not register this tracer. |
| 534 | */ | 676 | */ |
| 535 | for_each_tracing_cpu(i) | 677 | tracing_reset_online_cpus(tr); |
| 536 | tracing_reset(tr, i); | ||
| 537 | 678 | ||
| 538 | current_trace = type; | 679 | current_trace = type; |
| 539 | /* the test is responsible for initializing and enabling */ | 680 | /* the test is responsible for initializing and enabling */ |
| @@ -546,8 +687,7 @@ __acquires(kernel_lock) | |||
| 546 | goto out; | 687 | goto out; |
| 547 | } | 688 | } |
| 548 | /* Only reset on passing, to avoid touching corrupted buffers */ | 689 | /* Only reset on passing, to avoid touching corrupted buffers */ |
| 549 | for_each_tracing_cpu(i) | 690 | tracing_reset_online_cpus(tr); |
| 550 | tracing_reset(tr, i); | ||
| 551 | 691 | ||
| 552 | printk(KERN_CONT "PASSED\n"); | 692 | printk(KERN_CONT "PASSED\n"); |
| 553 | } | 693 | } |
| @@ -555,9 +695,6 @@ __acquires(kernel_lock) | |||
| 555 | 695 | ||
| 556 | type->next = trace_types; | 696 | type->next = trace_types; |
| 557 | trace_types = type; | 697 | trace_types = type; |
| 558 | len = strlen(type->name); | ||
| 559 | if (len > max_tracer_type_len) | ||
| 560 | max_tracer_type_len = len; | ||
| 561 | 698 | ||
| 562 | out: | 699 | out: |
| 563 | tracing_selftest_running = false; | 700 | tracing_selftest_running = false; |
| @@ -566,7 +703,7 @@ __acquires(kernel_lock) | |||
| 566 | if (ret || !default_bootup_tracer) | 703 | if (ret || !default_bootup_tracer) |
| 567 | goto out_unlock; | 704 | goto out_unlock; |
| 568 | 705 | ||
| 569 | if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) | 706 | if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) |
| 570 | goto out_unlock; | 707 | goto out_unlock; |
| 571 | 708 | ||
| 572 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 709 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
| @@ -588,14 +725,13 @@ __acquires(kernel_lock) | |||
| 588 | void unregister_tracer(struct tracer *type) | 725 | void unregister_tracer(struct tracer *type) |
| 589 | { | 726 | { |
| 590 | struct tracer **t; | 727 | struct tracer **t; |
| 591 | int len; | ||
| 592 | 728 | ||
| 593 | mutex_lock(&trace_types_lock); | 729 | mutex_lock(&trace_types_lock); |
| 594 | for (t = &trace_types; *t; t = &(*t)->next) { | 730 | for (t = &trace_types; *t; t = &(*t)->next) { |
| 595 | if (*t == type) | 731 | if (*t == type) |
| 596 | goto found; | 732 | goto found; |
| 597 | } | 733 | } |
| 598 | pr_info("Trace %s not registered\n", type->name); | 734 | pr_info("Tracer %s not registered\n", type->name); |
| 599 | goto out; | 735 | goto out; |
| 600 | 736 | ||
| 601 | found: | 737 | found: |
| @@ -608,35 +744,46 @@ void unregister_tracer(struct tracer *type) | |||
| 608 | current_trace->stop(&global_trace); | 744 | current_trace->stop(&global_trace); |
| 609 | current_trace = &nop_trace; | 745 | current_trace = &nop_trace; |
| 610 | } | 746 | } |
| 611 | 747 | out: | |
| 612 | if (strlen(type->name) != max_tracer_type_len) | ||
| 613 | goto out; | ||
| 614 | |||
| 615 | max_tracer_type_len = 0; | ||
| 616 | for (t = &trace_types; *t; t = &(*t)->next) { | ||
| 617 | len = strlen((*t)->name); | ||
| 618 | if (len > max_tracer_type_len) | ||
| 619 | max_tracer_type_len = len; | ||
| 620 | } | ||
| 621 | out: | ||
| 622 | mutex_unlock(&trace_types_lock); | 748 | mutex_unlock(&trace_types_lock); |
| 623 | } | 749 | } |
| 624 | 750 | ||
| 625 | void tracing_reset(struct trace_array *tr, int cpu) | 751 | static void __tracing_reset(struct trace_array *tr, int cpu) |
| 626 | { | 752 | { |
| 627 | ftrace_disable_cpu(); | 753 | ftrace_disable_cpu(); |
| 628 | ring_buffer_reset_cpu(tr->buffer, cpu); | 754 | ring_buffer_reset_cpu(tr->buffer, cpu); |
| 629 | ftrace_enable_cpu(); | 755 | ftrace_enable_cpu(); |
| 630 | } | 756 | } |
| 631 | 757 | ||
| 758 | void tracing_reset(struct trace_array *tr, int cpu) | ||
| 759 | { | ||
| 760 | struct ring_buffer *buffer = tr->buffer; | ||
| 761 | |||
| 762 | ring_buffer_record_disable(buffer); | ||
| 763 | |||
| 764 | /* Make sure all commits have finished */ | ||
| 765 | synchronize_sched(); | ||
| 766 | __tracing_reset(tr, cpu); | ||
| 767 | |||
| 768 | ring_buffer_record_enable(buffer); | ||
| 769 | } | ||
| 770 | |||
| 632 | void tracing_reset_online_cpus(struct trace_array *tr) | 771 | void tracing_reset_online_cpus(struct trace_array *tr) |
| 633 | { | 772 | { |
| 773 | struct ring_buffer *buffer = tr->buffer; | ||
| 634 | int cpu; | 774 | int cpu; |
| 635 | 775 | ||
| 776 | ring_buffer_record_disable(buffer); | ||
| 777 | |||
| 778 | /* Make sure all commits have finished */ | ||
| 779 | synchronize_sched(); | ||
| 780 | |||
| 636 | tr->time_start = ftrace_now(tr->cpu); | 781 | tr->time_start = ftrace_now(tr->cpu); |
| 637 | 782 | ||
| 638 | for_each_online_cpu(cpu) | 783 | for_each_online_cpu(cpu) |
| 639 | tracing_reset(tr, cpu); | 784 | __tracing_reset(tr, cpu); |
| 785 | |||
| 786 | ring_buffer_record_enable(buffer); | ||
| 640 | } | 787 | } |
| 641 | 788 | ||
| 642 | void tracing_reset_current(int cpu) | 789 | void tracing_reset_current(int cpu) |
| @@ -667,8 +814,10 @@ static void trace_init_cmdlines(void) | |||
| 667 | cmdline_idx = 0; | 814 | cmdline_idx = 0; |
| 668 | } | 815 | } |
| 669 | 816 | ||
| 670 | static int trace_stop_count; | 817 | int is_tracing_stopped(void) |
| 671 | static DEFINE_SPINLOCK(tracing_start_lock); | 818 | { |
| 819 | return trace_stop_count; | ||
| 820 | } | ||
| 672 | 821 | ||
| 673 | /** | 822 | /** |
| 674 | * ftrace_off_permanent - disable all ftrace code permanently | 823 | * ftrace_off_permanent - disable all ftrace code permanently |
| @@ -837,7 +986,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 837 | 986 | ||
| 838 | entry->preempt_count = pc & 0xff; | 987 | entry->preempt_count = pc & 0xff; |
| 839 | entry->pid = (tsk) ? tsk->pid : 0; | 988 | entry->pid = (tsk) ? tsk->pid : 0; |
| 840 | entry->tgid = (tsk) ? tsk->tgid : 0; | 989 | entry->lock_depth = (tsk) ? tsk->lock_depth : 0; |
| 841 | entry->flags = | 990 | entry->flags = |
| 842 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 991 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
| 843 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 992 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
| @@ -850,14 +999,15 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 850 | } | 999 | } |
| 851 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | 1000 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
| 852 | 1001 | ||
| 853 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 1002 | struct ring_buffer_event * |
| 854 | int type, | 1003 | trace_buffer_lock_reserve(struct ring_buffer *buffer, |
| 855 | unsigned long len, | 1004 | int type, |
| 856 | unsigned long flags, int pc) | 1005 | unsigned long len, |
| 1006 | unsigned long flags, int pc) | ||
| 857 | { | 1007 | { |
| 858 | struct ring_buffer_event *event; | 1008 | struct ring_buffer_event *event; |
| 859 | 1009 | ||
| 860 | event = ring_buffer_lock_reserve(tr->buffer, len); | 1010 | event = ring_buffer_lock_reserve(buffer, len); |
| 861 | if (event != NULL) { | 1011 | if (event != NULL) { |
| 862 | struct trace_entry *ent = ring_buffer_event_data(event); | 1012 | struct trace_entry *ent = ring_buffer_event_data(event); |
| 863 | 1013 | ||
| @@ -867,58 +1017,60 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | |||
| 867 | 1017 | ||
| 868 | return event; | 1018 | return event; |
| 869 | } | 1019 | } |
| 870 | static void ftrace_trace_stack(struct trace_array *tr, | ||
| 871 | unsigned long flags, int skip, int pc); | ||
| 872 | static void ftrace_trace_userstack(struct trace_array *tr, | ||
| 873 | unsigned long flags, int pc); | ||
| 874 | 1020 | ||
| 875 | static inline void __trace_buffer_unlock_commit(struct trace_array *tr, | 1021 | static inline void |
| 876 | struct ring_buffer_event *event, | 1022 | __trace_buffer_unlock_commit(struct ring_buffer *buffer, |
| 877 | unsigned long flags, int pc, | 1023 | struct ring_buffer_event *event, |
| 878 | int wake) | 1024 | unsigned long flags, int pc, |
| 1025 | int wake) | ||
| 879 | { | 1026 | { |
| 880 | ring_buffer_unlock_commit(tr->buffer, event); | 1027 | ring_buffer_unlock_commit(buffer, event); |
| 881 | 1028 | ||
| 882 | ftrace_trace_stack(tr, flags, 6, pc); | 1029 | ftrace_trace_stack(buffer, flags, 6, pc); |
| 883 | ftrace_trace_userstack(tr, flags, pc); | 1030 | ftrace_trace_userstack(buffer, flags, pc); |
| 884 | 1031 | ||
| 885 | if (wake) | 1032 | if (wake) |
| 886 | trace_wake_up(); | 1033 | trace_wake_up(); |
| 887 | } | 1034 | } |
| 888 | 1035 | ||
| 889 | void trace_buffer_unlock_commit(struct trace_array *tr, | 1036 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
| 890 | struct ring_buffer_event *event, | 1037 | struct ring_buffer_event *event, |
| 891 | unsigned long flags, int pc) | 1038 | unsigned long flags, int pc) |
| 892 | { | 1039 | { |
| 893 | __trace_buffer_unlock_commit(tr, event, flags, pc, 1); | 1040 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
| 894 | } | 1041 | } |
| 895 | 1042 | ||
| 896 | struct ring_buffer_event * | 1043 | struct ring_buffer_event * |
| 897 | trace_current_buffer_lock_reserve(int type, unsigned long len, | 1044 | trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, |
| 1045 | int type, unsigned long len, | ||
| 898 | unsigned long flags, int pc) | 1046 | unsigned long flags, int pc) |
| 899 | { | 1047 | { |
| 900 | return trace_buffer_lock_reserve(&global_trace, | 1048 | *current_rb = global_trace.buffer; |
| 1049 | return trace_buffer_lock_reserve(*current_rb, | ||
| 901 | type, len, flags, pc); | 1050 | type, len, flags, pc); |
| 902 | } | 1051 | } |
| 903 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); | 1052 | EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); |
| 904 | 1053 | ||
| 905 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | 1054 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
| 1055 | struct ring_buffer_event *event, | ||
| 906 | unsigned long flags, int pc) | 1056 | unsigned long flags, int pc) |
| 907 | { | 1057 | { |
| 908 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); | 1058 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); |
| 909 | } | 1059 | } |
| 910 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); | 1060 | EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); |
| 911 | 1061 | ||
| 912 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | 1062 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, |
| 913 | unsigned long flags, int pc) | 1063 | struct ring_buffer_event *event, |
| 1064 | unsigned long flags, int pc) | ||
| 914 | { | 1065 | { |
| 915 | __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); | 1066 | __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); |
| 916 | } | 1067 | } |
| 917 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); | 1068 | EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); |
| 918 | 1069 | ||
| 919 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event) | 1070 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
| 1071 | struct ring_buffer_event *event) | ||
| 920 | { | 1072 | { |
| 921 | ring_buffer_discard_commit(global_trace.buffer, event); | 1073 | ring_buffer_discard_commit(buffer, event); |
| 922 | } | 1074 | } |
| 923 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); | 1075 | EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); |
| 924 | 1076 | ||
| @@ -928,6 +1080,7 @@ trace_function(struct trace_array *tr, | |||
| 928 | int pc) | 1080 | int pc) |
| 929 | { | 1081 | { |
| 930 | struct ftrace_event_call *call = &event_function; | 1082 | struct ftrace_event_call *call = &event_function; |
| 1083 | struct ring_buffer *buffer = tr->buffer; | ||
| 931 | struct ring_buffer_event *event; | 1084 | struct ring_buffer_event *event; |
| 932 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
| 933 | 1086 | ||
| @@ -935,7 +1088,7 @@ trace_function(struct trace_array *tr, | |||
| 935 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
| 936 | return; | 1089 | return; |
| 937 | 1090 | ||
| 938 | event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
| 939 | flags, pc); | 1092 | flags, pc); |
| 940 | if (!event) | 1093 | if (!event) |
| 941 | return; | 1094 | return; |
| @@ -943,58 +1096,10 @@ trace_function(struct trace_array *tr, | |||
| 943 | entry->ip = ip; | 1096 | entry->ip = ip; |
| 944 | entry->parent_ip = parent_ip; | 1097 | entry->parent_ip = parent_ip; |
| 945 | 1098 | ||
| 946 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1099 | if (!filter_check_discard(call, entry, buffer, event)) |
| 947 | ring_buffer_unlock_commit(tr->buffer, event); | 1100 | ring_buffer_unlock_commit(buffer, event); |
| 948 | } | ||
| 949 | |||
| 950 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 951 | static int __trace_graph_entry(struct trace_array *tr, | ||
| 952 | struct ftrace_graph_ent *trace, | ||
| 953 | unsigned long flags, | ||
| 954 | int pc) | ||
| 955 | { | ||
| 956 | struct ftrace_event_call *call = &event_funcgraph_entry; | ||
| 957 | struct ring_buffer_event *event; | ||
| 958 | struct ftrace_graph_ent_entry *entry; | ||
| 959 | |||
| 960 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
| 961 | return 0; | ||
| 962 | |||
| 963 | event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, | ||
| 964 | sizeof(*entry), flags, pc); | ||
| 965 | if (!event) | ||
| 966 | return 0; | ||
| 967 | entry = ring_buffer_event_data(event); | ||
| 968 | entry->graph_ent = *trace; | ||
| 969 | if (!filter_current_check_discard(call, entry, event)) | ||
| 970 | ring_buffer_unlock_commit(global_trace.buffer, event); | ||
| 971 | |||
| 972 | return 1; | ||
| 973 | } | 1101 | } |
| 974 | 1102 | ||
| 975 | static void __trace_graph_return(struct trace_array *tr, | ||
| 976 | struct ftrace_graph_ret *trace, | ||
| 977 | unsigned long flags, | ||
| 978 | int pc) | ||
| 979 | { | ||
| 980 | struct ftrace_event_call *call = &event_funcgraph_exit; | ||
| 981 | struct ring_buffer_event *event; | ||
| 982 | struct ftrace_graph_ret_entry *entry; | ||
| 983 | |||
| 984 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
| 985 | return; | ||
| 986 | |||
| 987 | event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET, | ||
| 988 | sizeof(*entry), flags, pc); | ||
| 989 | if (!event) | ||
| 990 | return; | ||
| 991 | entry = ring_buffer_event_data(event); | ||
| 992 | entry->ret = *trace; | ||
| 993 | if (!filter_current_check_discard(call, entry, event)) | ||
| 994 | ring_buffer_unlock_commit(global_trace.buffer, event); | ||
| 995 | } | ||
| 996 | #endif | ||
| 997 | |||
| 998 | void | 1103 | void |
| 999 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 1104 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, |
| 1000 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 1105 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
| @@ -1004,17 +1109,17 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
| 1004 | trace_function(tr, ip, parent_ip, flags, pc); | 1109 | trace_function(tr, ip, parent_ip, flags, pc); |
| 1005 | } | 1110 | } |
| 1006 | 1111 | ||
| 1007 | static void __ftrace_trace_stack(struct trace_array *tr, | 1112 | #ifdef CONFIG_STACKTRACE |
| 1113 | static void __ftrace_trace_stack(struct ring_buffer *buffer, | ||
| 1008 | unsigned long flags, | 1114 | unsigned long flags, |
| 1009 | int skip, int pc) | 1115 | int skip, int pc) |
| 1010 | { | 1116 | { |
| 1011 | #ifdef CONFIG_STACKTRACE | ||
| 1012 | struct ftrace_event_call *call = &event_kernel_stack; | 1117 | struct ftrace_event_call *call = &event_kernel_stack; |
| 1013 | struct ring_buffer_event *event; | 1118 | struct ring_buffer_event *event; |
| 1014 | struct stack_entry *entry; | 1119 | struct stack_entry *entry; |
| 1015 | struct stack_trace trace; | 1120 | struct stack_trace trace; |
| 1016 | 1121 | ||
| 1017 | event = trace_buffer_lock_reserve(tr, TRACE_STACK, | 1122 | event = trace_buffer_lock_reserve(buffer, TRACE_STACK, |
| 1018 | sizeof(*entry), flags, pc); | 1123 | sizeof(*entry), flags, pc); |
| 1019 | if (!event) | 1124 | if (!event) |
| 1020 | return; | 1125 | return; |
| @@ -1027,32 +1132,28 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
| 1027 | trace.entries = entry->caller; | 1132 | trace.entries = entry->caller; |
| 1028 | 1133 | ||
| 1029 | save_stack_trace(&trace); | 1134 | save_stack_trace(&trace); |
| 1030 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1135 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1031 | ring_buffer_unlock_commit(tr->buffer, event); | 1136 | ring_buffer_unlock_commit(buffer, event); |
| 1032 | #endif | ||
| 1033 | } | 1137 | } |
| 1034 | 1138 | ||
| 1035 | static void ftrace_trace_stack(struct trace_array *tr, | 1139 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
| 1036 | unsigned long flags, | 1140 | int skip, int pc) |
| 1037 | int skip, int pc) | ||
| 1038 | { | 1141 | { |
| 1039 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | 1142 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) |
| 1040 | return; | 1143 | return; |
| 1041 | 1144 | ||
| 1042 | __ftrace_trace_stack(tr, flags, skip, pc); | 1145 | __ftrace_trace_stack(buffer, flags, skip, pc); |
| 1043 | } | 1146 | } |
| 1044 | 1147 | ||
| 1045 | void __trace_stack(struct trace_array *tr, | 1148 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
| 1046 | unsigned long flags, | 1149 | int pc) |
| 1047 | int skip, int pc) | ||
| 1048 | { | 1150 | { |
| 1049 | __ftrace_trace_stack(tr, flags, skip, pc); | 1151 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
| 1050 | } | 1152 | } |
| 1051 | 1153 | ||
| 1052 | static void ftrace_trace_userstack(struct trace_array *tr, | 1154 | void |
| 1053 | unsigned long flags, int pc) | 1155 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
| 1054 | { | 1156 | { |
| 1055 | #ifdef CONFIG_STACKTRACE | ||
| 1056 | struct ftrace_event_call *call = &event_user_stack; | 1157 | struct ftrace_event_call *call = &event_user_stack; |
| 1057 | struct ring_buffer_event *event; | 1158 | struct ring_buffer_event *event; |
| 1058 | struct userstack_entry *entry; | 1159 | struct userstack_entry *entry; |
| @@ -1061,12 +1162,13 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
| 1061 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 1162 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
| 1062 | return; | 1163 | return; |
| 1063 | 1164 | ||
| 1064 | event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, | 1165 | event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, |
| 1065 | sizeof(*entry), flags, pc); | 1166 | sizeof(*entry), flags, pc); |
| 1066 | if (!event) | 1167 | if (!event) |
| 1067 | return; | 1168 | return; |
| 1068 | entry = ring_buffer_event_data(event); | 1169 | entry = ring_buffer_event_data(event); |
| 1069 | 1170 | ||
| 1171 | entry->tgid = current->tgid; | ||
| 1070 | memset(&entry->caller, 0, sizeof(entry->caller)); | 1172 | memset(&entry->caller, 0, sizeof(entry->caller)); |
| 1071 | 1173 | ||
| 1072 | trace.nr_entries = 0; | 1174 | trace.nr_entries = 0; |
| @@ -1075,9 +1177,8 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
| 1075 | trace.entries = entry->caller; | 1177 | trace.entries = entry->caller; |
| 1076 | 1178 | ||
| 1077 | save_stack_trace_user(&trace); | 1179 | save_stack_trace_user(&trace); |
| 1078 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1180 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1079 | ring_buffer_unlock_commit(tr->buffer, event); | 1181 | ring_buffer_unlock_commit(buffer, event); |
| 1080 | #endif | ||
| 1081 | } | 1182 | } |
| 1082 | 1183 | ||
| 1083 | #ifdef UNUSED | 1184 | #ifdef UNUSED |
| @@ -1087,16 +1188,20 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
| 1087 | } | 1188 | } |
| 1088 | #endif /* UNUSED */ | 1189 | #endif /* UNUSED */ |
| 1089 | 1190 | ||
| 1191 | #endif /* CONFIG_STACKTRACE */ | ||
| 1192 | |||
| 1090 | static void | 1193 | static void |
| 1091 | ftrace_trace_special(void *__tr, | 1194 | ftrace_trace_special(void *__tr, |
| 1092 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 1195 | unsigned long arg1, unsigned long arg2, unsigned long arg3, |
| 1093 | int pc) | 1196 | int pc) |
| 1094 | { | 1197 | { |
| 1198 | struct ftrace_event_call *call = &event_special; | ||
| 1095 | struct ring_buffer_event *event; | 1199 | struct ring_buffer_event *event; |
| 1096 | struct trace_array *tr = __tr; | 1200 | struct trace_array *tr = __tr; |
| 1201 | struct ring_buffer *buffer = tr->buffer; | ||
| 1097 | struct special_entry *entry; | 1202 | struct special_entry *entry; |
| 1098 | 1203 | ||
| 1099 | event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, | 1204 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, |
| 1100 | sizeof(*entry), 0, pc); | 1205 | sizeof(*entry), 0, pc); |
| 1101 | if (!event) | 1206 | if (!event) |
| 1102 | return; | 1207 | return; |
| @@ -1104,7 +1209,9 @@ ftrace_trace_special(void *__tr, | |||
| 1104 | entry->arg1 = arg1; | 1209 | entry->arg1 = arg1; |
| 1105 | entry->arg2 = arg2; | 1210 | entry->arg2 = arg2; |
| 1106 | entry->arg3 = arg3; | 1211 | entry->arg3 = arg3; |
| 1107 | trace_buffer_unlock_commit(tr, event, 0, pc); | 1212 | |
| 1213 | if (!filter_check_discard(call, entry, buffer, event)) | ||
| 1214 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
| 1108 | } | 1215 | } |
| 1109 | 1216 | ||
| 1110 | void | 1217 | void |
| @@ -1115,62 +1222,6 @@ __trace_special(void *__tr, void *__data, | |||
| 1115 | } | 1222 | } |
| 1116 | 1223 | ||
| 1117 | void | 1224 | void |
| 1118 | tracing_sched_switch_trace(struct trace_array *tr, | ||
| 1119 | struct task_struct *prev, | ||
| 1120 | struct task_struct *next, | ||
| 1121 | unsigned long flags, int pc) | ||
| 1122 | { | ||
| 1123 | struct ftrace_event_call *call = &event_context_switch; | ||
| 1124 | struct ring_buffer_event *event; | ||
| 1125 | struct ctx_switch_entry *entry; | ||
| 1126 | |||
| 1127 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, | ||
| 1128 | sizeof(*entry), flags, pc); | ||
| 1129 | if (!event) | ||
| 1130 | return; | ||
| 1131 | entry = ring_buffer_event_data(event); | ||
| 1132 | entry->prev_pid = prev->pid; | ||
| 1133 | entry->prev_prio = prev->prio; | ||
| 1134 | entry->prev_state = prev->state; | ||
| 1135 | entry->next_pid = next->pid; | ||
| 1136 | entry->next_prio = next->prio; | ||
| 1137 | entry->next_state = next->state; | ||
| 1138 | entry->next_cpu = task_cpu(next); | ||
| 1139 | |||
| 1140 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
| 1141 | trace_buffer_unlock_commit(tr, event, flags, pc); | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | void | ||
| 1145 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
| 1146 | struct task_struct *wakee, | ||
| 1147 | struct task_struct *curr, | ||
| 1148 | unsigned long flags, int pc) | ||
| 1149 | { | ||
| 1150 | struct ftrace_event_call *call = &event_wakeup; | ||
| 1151 | struct ring_buffer_event *event; | ||
| 1152 | struct ctx_switch_entry *entry; | ||
| 1153 | |||
| 1154 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, | ||
| 1155 | sizeof(*entry), flags, pc); | ||
| 1156 | if (!event) | ||
| 1157 | return; | ||
| 1158 | entry = ring_buffer_event_data(event); | ||
| 1159 | entry->prev_pid = curr->pid; | ||
| 1160 | entry->prev_prio = curr->prio; | ||
| 1161 | entry->prev_state = curr->state; | ||
| 1162 | entry->next_pid = wakee->pid; | ||
| 1163 | entry->next_prio = wakee->prio; | ||
| 1164 | entry->next_state = wakee->state; | ||
| 1165 | entry->next_cpu = task_cpu(wakee); | ||
| 1166 | |||
| 1167 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
| 1168 | ring_buffer_unlock_commit(tr->buffer, event); | ||
| 1169 | ftrace_trace_stack(tr, flags, 6, pc); | ||
| 1170 | ftrace_trace_userstack(tr, flags, pc); | ||
| 1171 | } | ||
| 1172 | |||
| 1173 | void | ||
| 1174 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | 1225 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) |
| 1175 | { | 1226 | { |
| 1176 | struct trace_array *tr = &global_trace; | 1227 | struct trace_array *tr = &global_trace; |
| @@ -1194,68 +1245,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
| 1194 | local_irq_restore(flags); | 1245 | local_irq_restore(flags); |
| 1195 | } | 1246 | } |
| 1196 | 1247 | ||
| 1197 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 1198 | int trace_graph_entry(struct ftrace_graph_ent *trace) | ||
| 1199 | { | ||
| 1200 | struct trace_array *tr = &global_trace; | ||
| 1201 | struct trace_array_cpu *data; | ||
| 1202 | unsigned long flags; | ||
| 1203 | long disabled; | ||
| 1204 | int ret; | ||
| 1205 | int cpu; | ||
| 1206 | int pc; | ||
| 1207 | |||
| 1208 | if (!ftrace_trace_task(current)) | ||
| 1209 | return 0; | ||
| 1210 | |||
| 1211 | if (!ftrace_graph_addr(trace->func)) | ||
| 1212 | return 0; | ||
| 1213 | |||
| 1214 | local_irq_save(flags); | ||
| 1215 | cpu = raw_smp_processor_id(); | ||
| 1216 | data = tr->data[cpu]; | ||
| 1217 | disabled = atomic_inc_return(&data->disabled); | ||
| 1218 | if (likely(disabled == 1)) { | ||
| 1219 | pc = preempt_count(); | ||
| 1220 | ret = __trace_graph_entry(tr, trace, flags, pc); | ||
| 1221 | } else { | ||
| 1222 | ret = 0; | ||
| 1223 | } | ||
| 1224 | /* Only do the atomic if it is not already set */ | ||
| 1225 | if (!test_tsk_trace_graph(current)) | ||
| 1226 | set_tsk_trace_graph(current); | ||
| 1227 | |||
| 1228 | atomic_dec(&data->disabled); | ||
| 1229 | local_irq_restore(flags); | ||
| 1230 | |||
| 1231 | return ret; | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
| 1235 | { | ||
| 1236 | struct trace_array *tr = &global_trace; | ||
| 1237 | struct trace_array_cpu *data; | ||
| 1238 | unsigned long flags; | ||
| 1239 | long disabled; | ||
| 1240 | int cpu; | ||
| 1241 | int pc; | ||
| 1242 | |||
| 1243 | local_irq_save(flags); | ||
| 1244 | cpu = raw_smp_processor_id(); | ||
| 1245 | data = tr->data[cpu]; | ||
| 1246 | disabled = atomic_inc_return(&data->disabled); | ||
| 1247 | if (likely(disabled == 1)) { | ||
| 1248 | pc = preempt_count(); | ||
| 1249 | __trace_graph_return(tr, trace, flags, pc); | ||
| 1250 | } | ||
| 1251 | if (!trace->depth) | ||
| 1252 | clear_tsk_trace_graph(current); | ||
| 1253 | atomic_dec(&data->disabled); | ||
| 1254 | local_irq_restore(flags); | ||
| 1255 | } | ||
| 1256 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
| 1257 | |||
| 1258 | |||
| 1259 | /** | 1248 | /** |
| 1260 | * trace_vbprintk - write binary msg to tracing buffer | 1249 | * trace_vbprintk - write binary msg to tracing buffer |
| 1261 | * | 1250 | * |
| @@ -1268,6 +1257,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1268 | 1257 | ||
| 1269 | struct ftrace_event_call *call = &event_bprint; | 1258 | struct ftrace_event_call *call = &event_bprint; |
| 1270 | struct ring_buffer_event *event; | 1259 | struct ring_buffer_event *event; |
| 1260 | struct ring_buffer *buffer; | ||
| 1271 | struct trace_array *tr = &global_trace; | 1261 | struct trace_array *tr = &global_trace; |
| 1272 | struct trace_array_cpu *data; | 1262 | struct trace_array_cpu *data; |
| 1273 | struct bprint_entry *entry; | 1263 | struct bprint_entry *entry; |
| @@ -1300,7 +1290,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1300 | goto out_unlock; | 1290 | goto out_unlock; |
| 1301 | 1291 | ||
| 1302 | size = sizeof(*entry) + sizeof(u32) * len; | 1292 | size = sizeof(*entry) + sizeof(u32) * len; |
| 1303 | event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); | 1293 | buffer = tr->buffer; |
| 1294 | event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | ||
| 1295 | flags, pc); | ||
| 1304 | if (!event) | 1296 | if (!event) |
| 1305 | goto out_unlock; | 1297 | goto out_unlock; |
| 1306 | entry = ring_buffer_event_data(event); | 1298 | entry = ring_buffer_event_data(event); |
| @@ -1308,8 +1300,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1308 | entry->fmt = fmt; | 1300 | entry->fmt = fmt; |
| 1309 | 1301 | ||
| 1310 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1302 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
| 1311 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1303 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1312 | ring_buffer_unlock_commit(tr->buffer, event); | 1304 | ring_buffer_unlock_commit(buffer, event); |
| 1313 | 1305 | ||
| 1314 | out_unlock: | 1306 | out_unlock: |
| 1315 | __raw_spin_unlock(&trace_buf_lock); | 1307 | __raw_spin_unlock(&trace_buf_lock); |
| @@ -1324,14 +1316,30 @@ out: | |||
| 1324 | } | 1316 | } |
| 1325 | EXPORT_SYMBOL_GPL(trace_vbprintk); | 1317 | EXPORT_SYMBOL_GPL(trace_vbprintk); |
| 1326 | 1318 | ||
| 1327 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 1319 | int trace_array_printk(struct trace_array *tr, |
| 1320 | unsigned long ip, const char *fmt, ...) | ||
| 1321 | { | ||
| 1322 | int ret; | ||
| 1323 | va_list ap; | ||
| 1324 | |||
| 1325 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
| 1326 | return 0; | ||
| 1327 | |||
| 1328 | va_start(ap, fmt); | ||
| 1329 | ret = trace_array_vprintk(tr, ip, fmt, ap); | ||
| 1330 | va_end(ap); | ||
| 1331 | return ret; | ||
| 1332 | } | ||
| 1333 | |||
| 1334 | int trace_array_vprintk(struct trace_array *tr, | ||
| 1335 | unsigned long ip, const char *fmt, va_list args) | ||
| 1328 | { | 1336 | { |
| 1329 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; |
| 1330 | static char trace_buf[TRACE_BUF_SIZE]; | 1338 | static char trace_buf[TRACE_BUF_SIZE]; |
| 1331 | 1339 | ||
| 1332 | struct ftrace_event_call *call = &event_print; | 1340 | struct ftrace_event_call *call = &event_print; |
| 1333 | struct ring_buffer_event *event; | 1341 | struct ring_buffer_event *event; |
| 1334 | struct trace_array *tr = &global_trace; | 1342 | struct ring_buffer *buffer; |
| 1335 | struct trace_array_cpu *data; | 1343 | struct trace_array_cpu *data; |
| 1336 | int cpu, len = 0, size, pc; | 1344 | int cpu, len = 0, size, pc; |
| 1337 | struct print_entry *entry; | 1345 | struct print_entry *entry; |
| @@ -1353,22 +1361,25 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1353 | pause_graph_tracing(); | 1361 | pause_graph_tracing(); |
| 1354 | raw_local_irq_save(irq_flags); | 1362 | raw_local_irq_save(irq_flags); |
| 1355 | __raw_spin_lock(&trace_buf_lock); | 1363 | __raw_spin_lock(&trace_buf_lock); |
| 1356 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1364 | if (args == NULL) { |
| 1357 | 1365 | strncpy(trace_buf, fmt, TRACE_BUF_SIZE); | |
| 1358 | len = min(len, TRACE_BUF_SIZE-1); | 1366 | len = strlen(trace_buf); |
| 1359 | trace_buf[len] = 0; | 1367 | } else |
| 1368 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
| 1360 | 1369 | ||
| 1361 | size = sizeof(*entry) + len + 1; | 1370 | size = sizeof(*entry) + len + 1; |
| 1362 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); | 1371 | buffer = tr->buffer; |
| 1372 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | ||
| 1373 | irq_flags, pc); | ||
| 1363 | if (!event) | 1374 | if (!event) |
| 1364 | goto out_unlock; | 1375 | goto out_unlock; |
| 1365 | entry = ring_buffer_event_data(event); | 1376 | entry = ring_buffer_event_data(event); |
| 1366 | entry->ip = ip; | 1377 | entry->ip = ip; |
| 1367 | 1378 | ||
| 1368 | memcpy(&entry->buf, trace_buf, len); | 1379 | memcpy(&entry->buf, trace_buf, len); |
| 1369 | entry->buf[len] = 0; | 1380 | entry->buf[len] = '\0'; |
| 1370 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 1381 | if (!filter_check_discard(call, entry, buffer, event)) |
| 1371 | ring_buffer_unlock_commit(tr->buffer, event); | 1382 | ring_buffer_unlock_commit(buffer, event); |
| 1372 | 1383 | ||
| 1373 | out_unlock: | 1384 | out_unlock: |
| 1374 | __raw_spin_unlock(&trace_buf_lock); | 1385 | __raw_spin_unlock(&trace_buf_lock); |
| @@ -1380,6 +1391,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1380 | 1391 | ||
| 1381 | return len; | 1392 | return len; |
| 1382 | } | 1393 | } |
| 1394 | |||
| 1395 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | ||
| 1396 | { | ||
| 1397 | return trace_array_vprintk(&global_trace, ip, fmt, args); | ||
| 1398 | } | ||
| 1383 | EXPORT_SYMBOL_GPL(trace_vprintk); | 1399 | EXPORT_SYMBOL_GPL(trace_vprintk); |
| 1384 | 1400 | ||
| 1385 | enum trace_file_type { | 1401 | enum trace_file_type { |
| @@ -1519,6 +1535,37 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1519 | return ent; | 1535 | return ent; |
| 1520 | } | 1536 | } |
| 1521 | 1537 | ||
| 1538 | static void tracing_iter_reset(struct trace_iterator *iter, int cpu) | ||
| 1539 | { | ||
| 1540 | struct trace_array *tr = iter->tr; | ||
| 1541 | struct ring_buffer_event *event; | ||
| 1542 | struct ring_buffer_iter *buf_iter; | ||
| 1543 | unsigned long entries = 0; | ||
| 1544 | u64 ts; | ||
| 1545 | |||
| 1546 | tr->data[cpu]->skipped_entries = 0; | ||
| 1547 | |||
| 1548 | if (!iter->buffer_iter[cpu]) | ||
| 1549 | return; | ||
| 1550 | |||
| 1551 | buf_iter = iter->buffer_iter[cpu]; | ||
| 1552 | ring_buffer_iter_reset(buf_iter); | ||
| 1553 | |||
| 1554 | /* | ||
| 1555 | * We could have the case with the max latency tracers | ||
| 1556 | * that a reset never took place on a cpu. This is evident | ||
| 1557 | * by the timestamp being before the start of the buffer. | ||
| 1558 | */ | ||
| 1559 | while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { | ||
| 1560 | if (ts >= iter->tr->time_start) | ||
| 1561 | break; | ||
| 1562 | entries++; | ||
| 1563 | ring_buffer_read(buf_iter, NULL); | ||
| 1564 | } | ||
| 1565 | |||
| 1566 | tr->data[cpu]->skipped_entries = entries; | ||
| 1567 | } | ||
| 1568 | |||
| 1522 | /* | 1569 | /* |
| 1523 | * No necessary locking here. The worst thing which can | 1570 | * No necessary locking here. The worst thing which can |
| 1524 | * happen is loosing events consumed at the same time | 1571 | * happen is loosing events consumed at the same time |
| @@ -1557,10 +1604,9 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
| 1557 | 1604 | ||
| 1558 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1605 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
| 1559 | for_each_tracing_cpu(cpu) | 1606 | for_each_tracing_cpu(cpu) |
| 1560 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | 1607 | tracing_iter_reset(iter, cpu); |
| 1561 | } else | 1608 | } else |
| 1562 | ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); | 1609 | tracing_iter_reset(iter, cpu_file); |
| 1563 | |||
| 1564 | 1610 | ||
| 1565 | ftrace_enable_cpu(); | 1611 | ftrace_enable_cpu(); |
| 1566 | 1612 | ||
| @@ -1589,10 +1635,10 @@ static void print_lat_help_header(struct seq_file *m) | |||
| 1589 | seq_puts(m, "# | / _----=> need-resched \n"); | 1635 | seq_puts(m, "# | / _----=> need-resched \n"); |
| 1590 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | 1636 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); |
| 1591 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | 1637 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); |
| 1592 | seq_puts(m, "# |||| / \n"); | 1638 | seq_puts(m, "# |||| /_--=> lock-depth \n"); |
| 1593 | seq_puts(m, "# ||||| delay \n"); | 1639 | seq_puts(m, "# |||||/ delay \n"); |
| 1594 | seq_puts(m, "# cmd pid ||||| time | caller \n"); | 1640 | seq_puts(m, "# cmd pid |||||| time | caller \n"); |
| 1595 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | 1641 | seq_puts(m, "# \\ / |||||| \\ | / \n"); |
| 1596 | } | 1642 | } |
| 1597 | 1643 | ||
| 1598 | static void print_func_help_header(struct seq_file *m) | 1644 | static void print_func_help_header(struct seq_file *m) |
| @@ -1609,16 +1655,32 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
| 1609 | struct trace_array *tr = iter->tr; | 1655 | struct trace_array *tr = iter->tr; |
| 1610 | struct trace_array_cpu *data = tr->data[tr->cpu]; | 1656 | struct trace_array_cpu *data = tr->data[tr->cpu]; |
| 1611 | struct tracer *type = current_trace; | 1657 | struct tracer *type = current_trace; |
| 1612 | unsigned long total; | 1658 | unsigned long entries = 0; |
| 1613 | unsigned long entries; | 1659 | unsigned long total = 0; |
| 1660 | unsigned long count; | ||
| 1614 | const char *name = "preemption"; | 1661 | const char *name = "preemption"; |
| 1662 | int cpu; | ||
| 1615 | 1663 | ||
| 1616 | if (type) | 1664 | if (type) |
| 1617 | name = type->name; | 1665 | name = type->name; |
| 1618 | 1666 | ||
| 1619 | entries = ring_buffer_entries(iter->tr->buffer); | 1667 | |
| 1620 | total = entries + | 1668 | for_each_tracing_cpu(cpu) { |
| 1621 | ring_buffer_overruns(iter->tr->buffer); | 1669 | count = ring_buffer_entries_cpu(tr->buffer, cpu); |
| 1670 | /* | ||
| 1671 | * If this buffer has skipped entries, then we hold all | ||
| 1672 | * entries for the trace and we need to ignore the | ||
| 1673 | * ones before the time stamp. | ||
| 1674 | */ | ||
| 1675 | if (tr->data[cpu]->skipped_entries) { | ||
| 1676 | count -= tr->data[cpu]->skipped_entries; | ||
| 1677 | /* total is the same as the entries */ | ||
| 1678 | total += count; | ||
| 1679 | } else | ||
| 1680 | total += count + | ||
| 1681 | ring_buffer_overrun_cpu(tr->buffer, cpu); | ||
| 1682 | entries += count; | ||
| 1683 | } | ||
| 1622 | 1684 | ||
| 1623 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", | 1685 | seq_printf(m, "# %s latency trace v1.1.5 on %s\n", |
| 1624 | name, UTS_RELEASE); | 1686 | name, UTS_RELEASE); |
| @@ -1660,7 +1722,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
| 1660 | seq_puts(m, "\n# => ended at: "); | 1722 | seq_puts(m, "\n# => ended at: "); |
| 1661 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); | 1723 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); |
| 1662 | trace_print_seq(m, &iter->seq); | 1724 | trace_print_seq(m, &iter->seq); |
| 1663 | seq_puts(m, "#\n"); | 1725 | seq_puts(m, "\n#\n"); |
| 1664 | } | 1726 | } |
| 1665 | 1727 | ||
| 1666 | seq_puts(m, "#\n"); | 1728 | seq_puts(m, "#\n"); |
| @@ -1679,6 +1741,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
| 1679 | if (cpumask_test_cpu(iter->cpu, iter->started)) | 1741 | if (cpumask_test_cpu(iter->cpu, iter->started)) |
| 1680 | return; | 1742 | return; |
| 1681 | 1743 | ||
| 1744 | if (iter->tr->data[iter->cpu]->skipped_entries) | ||
| 1745 | return; | ||
| 1746 | |||
| 1682 | cpumask_set_cpu(iter->cpu, iter->started); | 1747 | cpumask_set_cpu(iter->cpu, iter->started); |
| 1683 | 1748 | ||
| 1684 | /* Don't print started cpu buffer for the first entry of the trace */ | 1749 | /* Don't print started cpu buffer for the first entry of the trace */ |
| @@ -1885,7 +1950,7 @@ static int s_show(struct seq_file *m, void *v) | |||
| 1885 | return 0; | 1950 | return 0; |
| 1886 | } | 1951 | } |
| 1887 | 1952 | ||
| 1888 | static struct seq_operations tracer_seq_ops = { | 1953 | static const struct seq_operations tracer_seq_ops = { |
| 1889 | .start = s_start, | 1954 | .start = s_start, |
| 1890 | .next = s_next, | 1955 | .next = s_next, |
| 1891 | .stop = s_stop, | 1956 | .stop = s_stop, |
| @@ -1920,11 +1985,9 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 1920 | if (current_trace) | 1985 | if (current_trace) |
| 1921 | *iter->trace = *current_trace; | 1986 | *iter->trace = *current_trace; |
| 1922 | 1987 | ||
| 1923 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) | 1988 | if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) |
| 1924 | goto fail; | 1989 | goto fail; |
| 1925 | 1990 | ||
| 1926 | cpumask_clear(iter->started); | ||
| 1927 | |||
| 1928 | if (current_trace && current_trace->print_max) | 1991 | if (current_trace && current_trace->print_max) |
| 1929 | iter->tr = &max_tr; | 1992 | iter->tr = &max_tr; |
| 1930 | else | 1993 | else |
| @@ -1941,19 +2004,23 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 1941 | if (ring_buffer_overruns(iter->tr->buffer)) | 2004 | if (ring_buffer_overruns(iter->tr->buffer)) |
| 1942 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 2005 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
| 1943 | 2006 | ||
| 2007 | /* stop the trace while dumping */ | ||
| 2008 | tracing_stop(); | ||
| 2009 | |||
| 1944 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2010 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { |
| 1945 | for_each_tracing_cpu(cpu) { | 2011 | for_each_tracing_cpu(cpu) { |
| 1946 | 2012 | ||
| 1947 | iter->buffer_iter[cpu] = | 2013 | iter->buffer_iter[cpu] = |
| 1948 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2014 | ring_buffer_read_start(iter->tr->buffer, cpu); |
| 2015 | tracing_iter_reset(iter, cpu); | ||
| 1949 | } | 2016 | } |
| 1950 | } else { | 2017 | } else { |
| 1951 | cpu = iter->cpu_file; | 2018 | cpu = iter->cpu_file; |
| 1952 | iter->buffer_iter[cpu] = | 2019 | iter->buffer_iter[cpu] = |
| 1953 | ring_buffer_read_start(iter->tr->buffer, cpu); | 2020 | ring_buffer_read_start(iter->tr->buffer, cpu); |
| 2021 | tracing_iter_reset(iter, cpu); | ||
| 1954 | } | 2022 | } |
| 1955 | 2023 | ||
| 1956 | /* TODO stop tracer */ | ||
| 1957 | ret = seq_open(file, &tracer_seq_ops); | 2024 | ret = seq_open(file, &tracer_seq_ops); |
| 1958 | if (ret < 0) { | 2025 | if (ret < 0) { |
| 1959 | fail_ret = ERR_PTR(ret); | 2026 | fail_ret = ERR_PTR(ret); |
| @@ -1963,9 +2030,6 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 1963 | m = file->private_data; | 2030 | m = file->private_data; |
| 1964 | m->private = iter; | 2031 | m->private = iter; |
| 1965 | 2032 | ||
| 1966 | /* stop the trace while dumping */ | ||
| 1967 | tracing_stop(); | ||
| 1968 | |||
| 1969 | mutex_unlock(&trace_types_lock); | 2033 | mutex_unlock(&trace_types_lock); |
| 1970 | 2034 | ||
| 1971 | return iter; | 2035 | return iter; |
| @@ -1976,6 +2040,7 @@ __tracing_open(struct inode *inode, struct file *file) | |||
| 1976 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 2040 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
| 1977 | } | 2041 | } |
| 1978 | free_cpumask_var(iter->started); | 2042 | free_cpumask_var(iter->started); |
| 2043 | tracing_start(); | ||
| 1979 | fail: | 2044 | fail: |
| 1980 | mutex_unlock(&trace_types_lock); | 2045 | mutex_unlock(&trace_types_lock); |
| 1981 | kfree(iter->trace); | 2046 | kfree(iter->trace); |
| @@ -2097,7 +2162,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 2097 | return 0; | 2162 | return 0; |
| 2098 | } | 2163 | } |
| 2099 | 2164 | ||
| 2100 | static struct seq_operations show_traces_seq_ops = { | 2165 | static const struct seq_operations show_traces_seq_ops = { |
| 2101 | .start = t_start, | 2166 | .start = t_start, |
| 2102 | .next = t_next, | 2167 | .next = t_next, |
| 2103 | .stop = t_stop, | 2168 | .stop = t_stop, |
| @@ -2257,8 +2322,8 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf, | |||
| 2257 | len += 3; /* "no" and newline */ | 2322 | len += 3; /* "no" and newline */ |
| 2258 | } | 2323 | } |
| 2259 | 2324 | ||
| 2260 | /* +2 for \n and \0 */ | 2325 | /* +1 for \0 */ |
| 2261 | buf = kmalloc(len + 2, GFP_KERNEL); | 2326 | buf = kmalloc(len + 1, GFP_KERNEL); |
| 2262 | if (!buf) { | 2327 | if (!buf) { |
| 2263 | mutex_unlock(&trace_types_lock); | 2328 | mutex_unlock(&trace_types_lock); |
| 2264 | return -ENOMEM; | 2329 | return -ENOMEM; |
| @@ -2281,7 +2346,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf, | |||
| 2281 | } | 2346 | } |
| 2282 | mutex_unlock(&trace_types_lock); | 2347 | mutex_unlock(&trace_types_lock); |
| 2283 | 2348 | ||
| 2284 | WARN_ON(r >= len + 2); | 2349 | WARN_ON(r >= len + 1); |
| 2285 | 2350 | ||
| 2286 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2351 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
| 2287 | 2352 | ||
| @@ -2292,23 +2357,23 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf, | |||
| 2292 | /* Try to assign a tracer specific option */ | 2357 | /* Try to assign a tracer specific option */ |
| 2293 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 2358 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) |
| 2294 | { | 2359 | { |
| 2295 | struct tracer_flags *trace_flags = trace->flags; | 2360 | struct tracer_flags *tracer_flags = trace->flags; |
| 2296 | struct tracer_opt *opts = NULL; | 2361 | struct tracer_opt *opts = NULL; |
| 2297 | int ret = 0, i = 0; | 2362 | int ret = 0, i = 0; |
| 2298 | int len; | 2363 | int len; |
| 2299 | 2364 | ||
| 2300 | for (i = 0; trace_flags->opts[i].name; i++) { | 2365 | for (i = 0; tracer_flags->opts[i].name; i++) { |
| 2301 | opts = &trace_flags->opts[i]; | 2366 | opts = &tracer_flags->opts[i]; |
| 2302 | len = strlen(opts->name); | 2367 | len = strlen(opts->name); |
| 2303 | 2368 | ||
| 2304 | if (strncmp(cmp, opts->name, len) == 0) { | 2369 | if (strncmp(cmp, opts->name, len) == 0) { |
| 2305 | ret = trace->set_flag(trace_flags->val, | 2370 | ret = trace->set_flag(tracer_flags->val, |
| 2306 | opts->bit, !neg); | 2371 | opts->bit, !neg); |
| 2307 | break; | 2372 | break; |
| 2308 | } | 2373 | } |
| 2309 | } | 2374 | } |
| 2310 | /* Not found */ | 2375 | /* Not found */ |
| 2311 | if (!trace_flags->opts[i].name) | 2376 | if (!tracer_flags->opts[i].name) |
| 2312 | return -EINVAL; | 2377 | return -EINVAL; |
| 2313 | 2378 | ||
| 2314 | /* Refused to handle */ | 2379 | /* Refused to handle */ |
| @@ -2316,9 +2381,9 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 2316 | return ret; | 2381 | return ret; |
| 2317 | 2382 | ||
| 2318 | if (neg) | 2383 | if (neg) |
| 2319 | trace_flags->val &= ~opts->bit; | 2384 | tracer_flags->val &= ~opts->bit; |
| 2320 | else | 2385 | else |
| 2321 | trace_flags->val |= opts->bit; | 2386 | tracer_flags->val |= opts->bit; |
| 2322 | 2387 | ||
| 2323 | return 0; | 2388 | return 0; |
| 2324 | } | 2389 | } |
| @@ -2333,22 +2398,6 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
| 2333 | trace_flags |= mask; | 2398 | trace_flags |= mask; |
| 2334 | else | 2399 | else |
| 2335 | trace_flags &= ~mask; | 2400 | trace_flags &= ~mask; |
| 2336 | |||
| 2337 | if (mask == TRACE_ITER_GLOBAL_CLK) { | ||
| 2338 | u64 (*func)(void); | ||
| 2339 | |||
| 2340 | if (enabled) | ||
| 2341 | func = trace_clock_global; | ||
| 2342 | else | ||
| 2343 | func = trace_clock_local; | ||
| 2344 | |||
| 2345 | mutex_lock(&trace_types_lock); | ||
| 2346 | ring_buffer_set_clock(global_trace.buffer, func); | ||
| 2347 | |||
| 2348 | if (max_tr.buffer) | ||
| 2349 | ring_buffer_set_clock(max_tr.buffer, func); | ||
| 2350 | mutex_unlock(&trace_types_lock); | ||
| 2351 | } | ||
| 2352 | } | 2401 | } |
| 2353 | 2402 | ||
| 2354 | static ssize_t | 2403 | static ssize_t |
| @@ -2392,7 +2441,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2392 | return ret; | 2441 | return ret; |
| 2393 | } | 2442 | } |
| 2394 | 2443 | ||
| 2395 | filp->f_pos += cnt; | 2444 | *ppos += cnt; |
| 2396 | 2445 | ||
| 2397 | return cnt; | 2446 | return cnt; |
| 2398 | } | 2447 | } |
| @@ -2534,7 +2583,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
| 2534 | } | 2583 | } |
| 2535 | mutex_unlock(&trace_types_lock); | 2584 | mutex_unlock(&trace_types_lock); |
| 2536 | 2585 | ||
| 2537 | filp->f_pos += cnt; | 2586 | *ppos += cnt; |
| 2538 | 2587 | ||
| 2539 | return cnt; | 2588 | return cnt; |
| 2540 | } | 2589 | } |
| @@ -2543,7 +2592,7 @@ static ssize_t | |||
| 2543 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | 2592 | tracing_set_trace_read(struct file *filp, char __user *ubuf, |
| 2544 | size_t cnt, loff_t *ppos) | 2593 | size_t cnt, loff_t *ppos) |
| 2545 | { | 2594 | { |
| 2546 | char buf[max_tracer_type_len+2]; | 2595 | char buf[MAX_TRACER_SIZE+2]; |
| 2547 | int r; | 2596 | int r; |
| 2548 | 2597 | ||
| 2549 | mutex_lock(&trace_types_lock); | 2598 | mutex_lock(&trace_types_lock); |
| @@ -2693,15 +2742,15 @@ static ssize_t | |||
| 2693 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 2742 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
| 2694 | size_t cnt, loff_t *ppos) | 2743 | size_t cnt, loff_t *ppos) |
| 2695 | { | 2744 | { |
| 2696 | char buf[max_tracer_type_len+1]; | 2745 | char buf[MAX_TRACER_SIZE+1]; |
| 2697 | int i; | 2746 | int i; |
| 2698 | size_t ret; | 2747 | size_t ret; |
| 2699 | int err; | 2748 | int err; |
| 2700 | 2749 | ||
| 2701 | ret = cnt; | 2750 | ret = cnt; |
| 2702 | 2751 | ||
| 2703 | if (cnt > max_tracer_type_len) | 2752 | if (cnt > MAX_TRACER_SIZE) |
| 2704 | cnt = max_tracer_type_len; | 2753 | cnt = MAX_TRACER_SIZE; |
| 2705 | 2754 | ||
| 2706 | if (copy_from_user(&buf, ubuf, cnt)) | 2755 | if (copy_from_user(&buf, ubuf, cnt)) |
| 2707 | return -EFAULT; | 2756 | return -EFAULT; |
| @@ -2716,7 +2765,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
| 2716 | if (err) | 2765 | if (err) |
| 2717 | return err; | 2766 | return err; |
| 2718 | 2767 | ||
| 2719 | filp->f_pos += ret; | 2768 | *ppos += ret; |
| 2720 | 2769 | ||
| 2721 | return ret; | 2770 | return ret; |
| 2722 | } | 2771 | } |
| @@ -3251,7 +3300,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 3251 | } | 3300 | } |
| 3252 | } | 3301 | } |
| 3253 | 3302 | ||
| 3254 | filp->f_pos += cnt; | 3303 | *ppos += cnt; |
| 3255 | 3304 | ||
| 3256 | /* If check pages failed, return ENOMEM */ | 3305 | /* If check pages failed, return ENOMEM */ |
| 3257 | if (tracing_disabled) | 3306 | if (tracing_disabled) |
| @@ -3271,22 +3320,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
| 3271 | return cnt; | 3320 | return cnt; |
| 3272 | } | 3321 | } |
| 3273 | 3322 | ||
| 3274 | static int mark_printk(const char *fmt, ...) | ||
| 3275 | { | ||
| 3276 | int ret; | ||
| 3277 | va_list args; | ||
| 3278 | va_start(args, fmt); | ||
| 3279 | ret = trace_vprintk(0, fmt, args); | ||
| 3280 | va_end(args); | ||
| 3281 | return ret; | ||
| 3282 | } | ||
| 3283 | |||
| 3284 | static ssize_t | 3323 | static ssize_t |
| 3285 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 3324 | tracing_mark_write(struct file *filp, const char __user *ubuf, |
| 3286 | size_t cnt, loff_t *fpos) | 3325 | size_t cnt, loff_t *fpos) |
| 3287 | { | 3326 | { |
| 3288 | char *buf; | 3327 | char *buf; |
| 3289 | char *end; | ||
| 3290 | 3328 | ||
| 3291 | if (tracing_disabled) | 3329 | if (tracing_disabled) |
| 3292 | return -EINVAL; | 3330 | return -EINVAL; |
| @@ -3294,7 +3332,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3294 | if (cnt > TRACE_BUF_SIZE) | 3332 | if (cnt > TRACE_BUF_SIZE) |
| 3295 | cnt = TRACE_BUF_SIZE; | 3333 | cnt = TRACE_BUF_SIZE; |
| 3296 | 3334 | ||
| 3297 | buf = kmalloc(cnt + 1, GFP_KERNEL); | 3335 | buf = kmalloc(cnt + 2, GFP_KERNEL); |
| 3298 | if (buf == NULL) | 3336 | if (buf == NULL) |
| 3299 | return -ENOMEM; | 3337 | return -ENOMEM; |
| 3300 | 3338 | ||
| @@ -3302,20 +3340,75 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3302 | kfree(buf); | 3340 | kfree(buf); |
| 3303 | return -EFAULT; | 3341 | return -EFAULT; |
| 3304 | } | 3342 | } |
| 3343 | if (buf[cnt-1] != '\n') { | ||
| 3344 | buf[cnt] = '\n'; | ||
| 3345 | buf[cnt+1] = '\0'; | ||
| 3346 | } else | ||
| 3347 | buf[cnt] = '\0'; | ||
| 3305 | 3348 | ||
| 3306 | /* Cut from the first nil or newline. */ | 3349 | cnt = trace_vprintk(0, buf, NULL); |
| 3307 | buf[cnt] = '\0'; | ||
| 3308 | end = strchr(buf, '\n'); | ||
| 3309 | if (end) | ||
| 3310 | *end = '\0'; | ||
| 3311 | |||
| 3312 | cnt = mark_printk("%s\n", buf); | ||
| 3313 | kfree(buf); | 3350 | kfree(buf); |
| 3314 | *fpos += cnt; | 3351 | *fpos += cnt; |
| 3315 | 3352 | ||
| 3316 | return cnt; | 3353 | return cnt; |
| 3317 | } | 3354 | } |
| 3318 | 3355 | ||
| 3356 | static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, | ||
| 3357 | size_t cnt, loff_t *ppos) | ||
| 3358 | { | ||
| 3359 | char buf[64]; | ||
| 3360 | int bufiter = 0; | ||
| 3361 | int i; | ||
| 3362 | |||
| 3363 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | ||
| 3364 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, | ||
| 3365 | "%s%s%s%s", i ? " " : "", | ||
| 3366 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | ||
| 3367 | i == trace_clock_id ? "]" : ""); | ||
| 3368 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); | ||
| 3369 | |||
| 3370 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); | ||
| 3371 | } | ||
| 3372 | |||
| 3373 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | ||
| 3374 | size_t cnt, loff_t *fpos) | ||
| 3375 | { | ||
| 3376 | char buf[64]; | ||
| 3377 | const char *clockstr; | ||
| 3378 | int i; | ||
| 3379 | |||
| 3380 | if (cnt >= sizeof(buf)) | ||
| 3381 | return -EINVAL; | ||
| 3382 | |||
| 3383 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 3384 | return -EFAULT; | ||
| 3385 | |||
| 3386 | buf[cnt] = 0; | ||
| 3387 | |||
| 3388 | clockstr = strstrip(buf); | ||
| 3389 | |||
| 3390 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { | ||
| 3391 | if (strcmp(trace_clocks[i].name, clockstr) == 0) | ||
| 3392 | break; | ||
| 3393 | } | ||
| 3394 | if (i == ARRAY_SIZE(trace_clocks)) | ||
| 3395 | return -EINVAL; | ||
| 3396 | |||
| 3397 | trace_clock_id = i; | ||
| 3398 | |||
| 3399 | mutex_lock(&trace_types_lock); | ||
| 3400 | |||
| 3401 | ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); | ||
| 3402 | if (max_tr.buffer) | ||
| 3403 | ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); | ||
| 3404 | |||
| 3405 | mutex_unlock(&trace_types_lock); | ||
| 3406 | |||
| 3407 | *fpos += cnt; | ||
| 3408 | |||
| 3409 | return cnt; | ||
| 3410 | } | ||
| 3411 | |||
| 3319 | static const struct file_operations tracing_max_lat_fops = { | 3412 | static const struct file_operations tracing_max_lat_fops = { |
| 3320 | .open = tracing_open_generic, | 3413 | .open = tracing_open_generic, |
| 3321 | .read = tracing_max_lat_read, | 3414 | .read = tracing_max_lat_read, |
| @@ -3353,6 +3446,12 @@ static const struct file_operations tracing_mark_fops = { | |||
| 3353 | .write = tracing_mark_write, | 3446 | .write = tracing_mark_write, |
| 3354 | }; | 3447 | }; |
| 3355 | 3448 | ||
| 3449 | static const struct file_operations trace_clock_fops = { | ||
| 3450 | .open = tracing_open_generic, | ||
| 3451 | .read = tracing_clock_read, | ||
| 3452 | .write = tracing_clock_write, | ||
| 3453 | }; | ||
| 3454 | |||
| 3356 | struct ftrace_buffer_info { | 3455 | struct ftrace_buffer_info { |
| 3357 | struct trace_array *tr; | 3456 | struct trace_array *tr; |
| 3358 | void *spare; | 3457 | void *spare; |
| @@ -3620,7 +3719,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
| 3620 | 3719 | ||
| 3621 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 3720 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
| 3622 | if (!s) | 3721 | if (!s) |
| 3623 | return ENOMEM; | 3722 | return -ENOMEM; |
| 3624 | 3723 | ||
| 3625 | trace_seq_init(s); | 3724 | trace_seq_init(s); |
| 3626 | 3725 | ||
| @@ -3633,9 +3732,6 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
| 3633 | cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); | 3732 | cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); |
| 3634 | trace_seq_printf(s, "commit overrun: %ld\n", cnt); | 3733 | trace_seq_printf(s, "commit overrun: %ld\n", cnt); |
| 3635 | 3734 | ||
| 3636 | cnt = ring_buffer_nmi_dropped_cpu(tr->buffer, cpu); | ||
| 3637 | trace_seq_printf(s, "nmi dropped: %ld\n", cnt); | ||
| 3638 | |||
| 3639 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 3735 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); |
| 3640 | 3736 | ||
| 3641 | kfree(s); | 3737 | kfree(s); |
| @@ -4066,11 +4162,13 @@ static __init int tracer_init_debugfs(void) | |||
| 4066 | trace_create_file("current_tracer", 0644, d_tracer, | 4162 | trace_create_file("current_tracer", 0644, d_tracer, |
| 4067 | &global_trace, &set_tracer_fops); | 4163 | &global_trace, &set_tracer_fops); |
| 4068 | 4164 | ||
| 4165 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
| 4069 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 4166 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
| 4070 | &tracing_max_latency, &tracing_max_lat_fops); | 4167 | &tracing_max_latency, &tracing_max_lat_fops); |
| 4071 | 4168 | ||
| 4072 | trace_create_file("tracing_thresh", 0644, d_tracer, | 4169 | trace_create_file("tracing_thresh", 0644, d_tracer, |
| 4073 | &tracing_thresh, &tracing_max_lat_fops); | 4170 | &tracing_thresh, &tracing_max_lat_fops); |
| 4171 | #endif | ||
| 4074 | 4172 | ||
| 4075 | trace_create_file("README", 0444, d_tracer, | 4173 | trace_create_file("README", 0444, d_tracer, |
| 4076 | NULL, &tracing_readme_fops); | 4174 | NULL, &tracing_readme_fops); |
| @@ -4087,6 +4185,9 @@ static __init int tracer_init_debugfs(void) | |||
| 4087 | trace_create_file("saved_cmdlines", 0444, d_tracer, | 4185 | trace_create_file("saved_cmdlines", 0444, d_tracer, |
| 4088 | NULL, &tracing_saved_cmdlines_fops); | 4186 | NULL, &tracing_saved_cmdlines_fops); |
| 4089 | 4187 | ||
| 4188 | trace_create_file("trace_clock", 0644, d_tracer, NULL, | ||
| 4189 | &trace_clock_fops); | ||
| 4190 | |||
| 4090 | #ifdef CONFIG_DYNAMIC_FTRACE | 4191 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 4091 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4192 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
| 4092 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4193 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
| @@ -4265,7 +4366,6 @@ void ftrace_dump(void) | |||
| 4265 | 4366 | ||
| 4266 | __init static int tracer_alloc_buffers(void) | 4367 | __init static int tracer_alloc_buffers(void) |
| 4267 | { | 4368 | { |
| 4268 | struct trace_array_cpu *data; | ||
| 4269 | int ring_buf_size; | 4369 | int ring_buf_size; |
| 4270 | int i; | 4370 | int i; |
| 4271 | int ret = -ENOMEM; | 4371 | int ret = -ENOMEM; |
| @@ -4276,7 +4376,7 @@ __init static int tracer_alloc_buffers(void) | |||
| 4276 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 4376 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
| 4277 | goto out_free_buffer_mask; | 4377 | goto out_free_buffer_mask; |
| 4278 | 4378 | ||
| 4279 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | 4379 | if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) |
| 4280 | goto out_free_tracing_cpumask; | 4380 | goto out_free_tracing_cpumask; |
| 4281 | 4381 | ||
| 4282 | /* To save memory, keep the ring buffer size to its minimum */ | 4382 | /* To save memory, keep the ring buffer size to its minimum */ |
| @@ -4287,7 +4387,6 @@ __init static int tracer_alloc_buffers(void) | |||
| 4287 | 4387 | ||
| 4288 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 4388 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
| 4289 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 4389 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
| 4290 | cpumask_clear(tracing_reader_cpumask); | ||
| 4291 | 4390 | ||
| 4292 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 4391 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
| 4293 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, | 4392 | global_trace.buffer = ring_buffer_alloc(ring_buf_size, |
| @@ -4315,7 +4414,7 @@ __init static int tracer_alloc_buffers(void) | |||
| 4315 | 4414 | ||
| 4316 | /* Allocate the first page for all buffers */ | 4415 | /* Allocate the first page for all buffers */ |
| 4317 | for_each_tracing_cpu(i) { | 4416 | for_each_tracing_cpu(i) { |
| 4318 | data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4417 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
| 4319 | max_tr.data[i] = &per_cpu(max_data, i); | 4418 | max_tr.data[i] = &per_cpu(max_data, i); |
| 4320 | } | 4419 | } |
| 4321 | 4420 | ||
