aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c704
1 files changed, 355 insertions, 349 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8bc8d8afea6a..5c75deeefe30 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,14 +43,11 @@
43 43
44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
45 45
46unsigned long __read_mostly tracing_max_latency;
47unsigned long __read_mostly tracing_thresh;
48
49/* 46/*
50 * On boot up, the ring buffer is set to the minimum size, so that 47 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing. 48 * we do not waste memory on systems that are not using tracing.
52 */ 49 */
53static int ring_buffer_expanded; 50int ring_buffer_expanded;
54 51
55/* 52/*
56 * We need to change this state when a selftest is running. 53 * We need to change this state when a selftest is running.
@@ -64,7 +61,7 @@ static bool __read_mostly tracing_selftest_running;
64/* 61/*
65 * If a tracer is running, we do not want to run SELFTEST. 62 * If a tracer is running, we do not want to run SELFTEST.
66 */ 63 */
67static bool __read_mostly tracing_selftest_disabled; 64bool __read_mostly tracing_selftest_disabled;
68 65
69/* For tracers that don't implement custom flags */ 66/* For tracers that don't implement custom flags */
70static struct tracer_opt dummy_tracer_opt[] = { 67static struct tracer_opt dummy_tracer_opt[] = {
@@ -89,7 +86,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
89 */ 86 */
90static int tracing_disabled = 1; 87static int tracing_disabled = 1;
91 88
92static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 89DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
93 90
94static inline void ftrace_disable_cpu(void) 91static inline void ftrace_disable_cpu(void)
95{ 92{
@@ -172,10 +169,11 @@ static struct trace_array global_trace;
172 169
173static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 170static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
174 171
175int filter_current_check_discard(struct ftrace_event_call *call, void *rec, 172int filter_current_check_discard(struct ring_buffer *buffer,
173 struct ftrace_event_call *call, void *rec,
176 struct ring_buffer_event *event) 174 struct ring_buffer_event *event)
177{ 175{
178 return filter_check_discard(call, rec, global_trace.buffer, event); 176 return filter_check_discard(call, rec, buffer, event);
179} 177}
180EXPORT_SYMBOL_GPL(filter_current_check_discard); 178EXPORT_SYMBOL_GPL(filter_current_check_discard);
181 179
@@ -266,6 +264,9 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
266 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 264 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
267 TRACE_ITER_GRAPH_TIME; 265 TRACE_ITER_GRAPH_TIME;
268 266
267static int trace_stop_count;
268static DEFINE_SPINLOCK(tracing_start_lock);
269
269/** 270/**
270 * trace_wake_up - wake up tasks waiting for trace input 271 * trace_wake_up - wake up tasks waiting for trace input
271 * 272 *
@@ -323,50 +324,20 @@ static const char *trace_options[] = {
323 "printk-msg-only", 324 "printk-msg-only",
324 "context-info", 325 "context-info",
325 "latency-format", 326 "latency-format",
326 "global-clock",
327 "sleep-time", 327 "sleep-time",
328 "graph-time", 328 "graph-time",
329 NULL 329 NULL
330}; 330};
331 331
332/* 332static struct {
333 * ftrace_max_lock is used to protect the swapping of buffers 333 u64 (*func)(void);
334 * when taking a max snapshot. The buffers themselves are 334 const char *name;
335 * protected by per_cpu spinlocks. But the action of the swap 335} trace_clocks[] = {
336 * needs its own lock. 336 { trace_clock_local, "local" },
337 * 337 { trace_clock_global, "global" },
338 * This is defined as a raw_spinlock_t in order to help 338};
339 * with performance when lockdep debugging is enabled.
340 */
341static raw_spinlock_t ftrace_max_lock =
342 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
343
344/*
345 * Copy the new maximum trace into the separate maximum-trace
346 * structure. (this way the maximum trace is permanently saved,
347 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
348 */
349static void
350__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
351{
352 struct trace_array_cpu *data = tr->data[cpu];
353
354 max_tr.cpu = cpu;
355 max_tr.time_start = data->preempt_timestamp;
356
357 data = max_tr.data[cpu];
358 data->saved_latency = tracing_max_latency;
359
360 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
361 data->pid = tsk->pid;
362 data->uid = task_uid(tsk);
363 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
364 data->policy = tsk->policy;
365 data->rt_priority = tsk->rt_priority;
366 339
367 /* record this tasks comm */ 340int trace_clock_id;
368 tracing_record_cmdline(tsk);
369}
370 341
371ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 342ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
372{ 343{
@@ -411,6 +382,56 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
411 return cnt; 382 return cnt;
412} 383}
413 384
385/*
386 * ftrace_max_lock is used to protect the swapping of buffers
387 * when taking a max snapshot. The buffers themselves are
388 * protected by per_cpu spinlocks. But the action of the swap
389 * needs its own lock.
390 *
391 * This is defined as a raw_spinlock_t in order to help
392 * with performance when lockdep debugging is enabled.
393 *
394 * It is also used in other places outside the update_max_tr
395 * so it needs to be defined outside of the
396 * CONFIG_TRACER_MAX_TRACE.
397 */
398static raw_spinlock_t ftrace_max_lock =
399 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
400
401#ifdef CONFIG_TRACER_MAX_TRACE
402unsigned long __read_mostly tracing_max_latency;
403unsigned long __read_mostly tracing_thresh;
404
405/*
406 * Copy the new maximum trace into the separate maximum-trace
407 * structure. (this way the maximum trace is permanently saved,
408 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
409 */
410static void
411__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
412{
413 struct trace_array_cpu *data = tr->data[cpu];
414 struct trace_array_cpu *max_data = tr->data[cpu];
415
416 max_tr.cpu = cpu;
417 max_tr.time_start = data->preempt_timestamp;
418
419 max_data = max_tr.data[cpu];
420 max_data->saved_latency = tracing_max_latency;
421 max_data->critical_start = data->critical_start;
422 max_data->critical_end = data->critical_end;
423
424 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
425 max_data->pid = tsk->pid;
426 max_data->uid = task_uid(tsk);
427 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
428 max_data->policy = tsk->policy;
429 max_data->rt_priority = tsk->rt_priority;
430
431 /* record this tasks comm */
432 tracing_record_cmdline(tsk);
433}
434
414/** 435/**
415 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 436 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
416 * @tr: tracer 437 * @tr: tracer
@@ -425,16 +446,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
425{ 446{
426 struct ring_buffer *buf = tr->buffer; 447 struct ring_buffer *buf = tr->buffer;
427 448
449 if (trace_stop_count)
450 return;
451
428 WARN_ON_ONCE(!irqs_disabled()); 452 WARN_ON_ONCE(!irqs_disabled());
429 __raw_spin_lock(&ftrace_max_lock); 453 __raw_spin_lock(&ftrace_max_lock);
430 454
431 tr->buffer = max_tr.buffer; 455 tr->buffer = max_tr.buffer;
432 max_tr.buffer = buf; 456 max_tr.buffer = buf;
433 457
434 ftrace_disable_cpu();
435 ring_buffer_reset(tr->buffer);
436 ftrace_enable_cpu();
437
438 __update_max_tr(tr, tsk, cpu); 458 __update_max_tr(tr, tsk, cpu);
439 __raw_spin_unlock(&ftrace_max_lock); 459 __raw_spin_unlock(&ftrace_max_lock);
440} 460}
@@ -452,21 +472,35 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
452{ 472{
453 int ret; 473 int ret;
454 474
475 if (trace_stop_count)
476 return;
477
455 WARN_ON_ONCE(!irqs_disabled()); 478 WARN_ON_ONCE(!irqs_disabled());
456 __raw_spin_lock(&ftrace_max_lock); 479 __raw_spin_lock(&ftrace_max_lock);
457 480
458 ftrace_disable_cpu(); 481 ftrace_disable_cpu();
459 482
460 ring_buffer_reset(max_tr.buffer);
461 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 483 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
462 484
485 if (ret == -EBUSY) {
486 /*
487 * We failed to swap the buffer due to a commit taking
488 * place on this CPU. We fail to record, but we reset
489 * the max trace buffer (no one writes directly to it)
490 * and flag that it failed.
491 */
492 trace_array_printk(&max_tr, _THIS_IP_,
493 "Failed to swap buffers due to commit in progress\n");
494 }
495
463 ftrace_enable_cpu(); 496 ftrace_enable_cpu();
464 497
465 WARN_ON_ONCE(ret && ret != -EAGAIN); 498 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
466 499
467 __update_max_tr(tr, tsk, cpu); 500 __update_max_tr(tr, tsk, cpu);
468 __raw_spin_unlock(&ftrace_max_lock); 501 __raw_spin_unlock(&ftrace_max_lock);
469} 502}
503#endif /* CONFIG_TRACER_MAX_TRACE */
470 504
471/** 505/**
472 * register_tracer - register a tracer with the ftrace system. 506 * register_tracer - register a tracer with the ftrace system.
@@ -523,7 +557,6 @@ __acquires(kernel_lock)
523 if (type->selftest && !tracing_selftest_disabled) { 557 if (type->selftest && !tracing_selftest_disabled) {
524 struct tracer *saved_tracer = current_trace; 558 struct tracer *saved_tracer = current_trace;
525 struct trace_array *tr = &global_trace; 559 struct trace_array *tr = &global_trace;
526 int i;
527 560
528 /* 561 /*
529 * Run a selftest on this tracer. 562 * Run a selftest on this tracer.
@@ -532,8 +565,7 @@ __acquires(kernel_lock)
532 * internal tracing to verify that everything is in order. 565 * internal tracing to verify that everything is in order.
533 * If we fail, we do not register this tracer. 566 * If we fail, we do not register this tracer.
534 */ 567 */
535 for_each_tracing_cpu(i) 568 tracing_reset_online_cpus(tr);
536 tracing_reset(tr, i);
537 569
538 current_trace = type; 570 current_trace = type;
539 /* the test is responsible for initializing and enabling */ 571 /* the test is responsible for initializing and enabling */
@@ -546,8 +578,7 @@ __acquires(kernel_lock)
546 goto out; 578 goto out;
547 } 579 }
548 /* Only reset on passing, to avoid touching corrupted buffers */ 580 /* Only reset on passing, to avoid touching corrupted buffers */
549 for_each_tracing_cpu(i) 581 tracing_reset_online_cpus(tr);
550 tracing_reset(tr, i);
551 582
552 printk(KERN_CONT "PASSED\n"); 583 printk(KERN_CONT "PASSED\n");
553 } 584 }
@@ -622,21 +653,42 @@ void unregister_tracer(struct tracer *type)
622 mutex_unlock(&trace_types_lock); 653 mutex_unlock(&trace_types_lock);
623} 654}
624 655
625void tracing_reset(struct trace_array *tr, int cpu) 656static void __tracing_reset(struct trace_array *tr, int cpu)
626{ 657{
627 ftrace_disable_cpu(); 658 ftrace_disable_cpu();
628 ring_buffer_reset_cpu(tr->buffer, cpu); 659 ring_buffer_reset_cpu(tr->buffer, cpu);
629 ftrace_enable_cpu(); 660 ftrace_enable_cpu();
630} 661}
631 662
663void tracing_reset(struct trace_array *tr, int cpu)
664{
665 struct ring_buffer *buffer = tr->buffer;
666
667 ring_buffer_record_disable(buffer);
668
669 /* Make sure all commits have finished */
670 synchronize_sched();
671 __tracing_reset(tr, cpu);
672
673 ring_buffer_record_enable(buffer);
674}
675
632void tracing_reset_online_cpus(struct trace_array *tr) 676void tracing_reset_online_cpus(struct trace_array *tr)
633{ 677{
678 struct ring_buffer *buffer = tr->buffer;
634 int cpu; 679 int cpu;
635 680
681 ring_buffer_record_disable(buffer);
682
683 /* Make sure all commits have finished */
684 synchronize_sched();
685
636 tr->time_start = ftrace_now(tr->cpu); 686 tr->time_start = ftrace_now(tr->cpu);
637 687
638 for_each_online_cpu(cpu) 688 for_each_online_cpu(cpu)
639 tracing_reset(tr, cpu); 689 __tracing_reset(tr, cpu);
690
691 ring_buffer_record_enable(buffer);
640} 692}
641 693
642void tracing_reset_current(int cpu) 694void tracing_reset_current(int cpu)
@@ -667,9 +719,6 @@ static void trace_init_cmdlines(void)
667 cmdline_idx = 0; 719 cmdline_idx = 0;
668} 720}
669 721
670static int trace_stop_count;
671static DEFINE_SPINLOCK(tracing_start_lock);
672
673/** 722/**
674 * ftrace_off_permanent - disable all ftrace code permanently 723 * ftrace_off_permanent - disable all ftrace code permanently
675 * 724 *
@@ -848,15 +897,17 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 897 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 898 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
850} 899}
900EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
851 901
852struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 902struct ring_buffer_event *
853 int type, 903trace_buffer_lock_reserve(struct ring_buffer *buffer,
854 unsigned long len, 904 int type,
855 unsigned long flags, int pc) 905 unsigned long len,
906 unsigned long flags, int pc)
856{ 907{
857 struct ring_buffer_event *event; 908 struct ring_buffer_event *event;
858 909
859 event = ring_buffer_lock_reserve(tr->buffer, len); 910 event = ring_buffer_lock_reserve(buffer, len);
860 if (event != NULL) { 911 if (event != NULL) {
861 struct trace_entry *ent = ring_buffer_event_data(event); 912 struct trace_entry *ent = ring_buffer_event_data(event);
862 913
@@ -866,58 +917,60 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
866 917
867 return event; 918 return event;
868} 919}
869static void ftrace_trace_stack(struct trace_array *tr,
870 unsigned long flags, int skip, int pc);
871static void ftrace_trace_userstack(struct trace_array *tr,
872 unsigned long flags, int pc);
873 920
874static inline void __trace_buffer_unlock_commit(struct trace_array *tr, 921static inline void
875 struct ring_buffer_event *event, 922__trace_buffer_unlock_commit(struct ring_buffer *buffer,
876 unsigned long flags, int pc, 923 struct ring_buffer_event *event,
877 int wake) 924 unsigned long flags, int pc,
925 int wake)
878{ 926{
879 ring_buffer_unlock_commit(tr->buffer, event); 927 ring_buffer_unlock_commit(buffer, event);
880 928
881 ftrace_trace_stack(tr, flags, 6, pc); 929 ftrace_trace_stack(buffer, flags, 6, pc);
882 ftrace_trace_userstack(tr, flags, pc); 930 ftrace_trace_userstack(buffer, flags, pc);
883 931
884 if (wake) 932 if (wake)
885 trace_wake_up(); 933 trace_wake_up();
886} 934}
887 935
888void trace_buffer_unlock_commit(struct trace_array *tr, 936void trace_buffer_unlock_commit(struct ring_buffer *buffer,
889 struct ring_buffer_event *event, 937 struct ring_buffer_event *event,
890 unsigned long flags, int pc) 938 unsigned long flags, int pc)
891{ 939{
892 __trace_buffer_unlock_commit(tr, event, flags, pc, 1); 940 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
893} 941}
894 942
895struct ring_buffer_event * 943struct ring_buffer_event *
896trace_current_buffer_lock_reserve(int type, unsigned long len, 944trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
945 int type, unsigned long len,
897 unsigned long flags, int pc) 946 unsigned long flags, int pc)
898{ 947{
899 return trace_buffer_lock_reserve(&global_trace, 948 *current_rb = global_trace.buffer;
949 return trace_buffer_lock_reserve(*current_rb,
900 type, len, flags, pc); 950 type, len, flags, pc);
901} 951}
902EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 952EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
903 953
904void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, 954void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
955 struct ring_buffer_event *event,
905 unsigned long flags, int pc) 956 unsigned long flags, int pc)
906{ 957{
907 __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); 958 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
908} 959}
909EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 960EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
910 961
911void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, 962void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
912 unsigned long flags, int pc) 963 struct ring_buffer_event *event,
964 unsigned long flags, int pc)
913{ 965{
914 __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); 966 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
915} 967}
916EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); 968EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
917 969
918void trace_current_buffer_discard_commit(struct ring_buffer_event *event) 970void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
971 struct ring_buffer_event *event)
919{ 972{
920 ring_buffer_discard_commit(global_trace.buffer, event); 973 ring_buffer_discard_commit(buffer, event);
921} 974}
922EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 975EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
923 976
@@ -927,6 +980,7 @@ trace_function(struct trace_array *tr,
927 int pc) 980 int pc)
928{ 981{
929 struct ftrace_event_call *call = &event_function; 982 struct ftrace_event_call *call = &event_function;
983 struct ring_buffer *buffer = tr->buffer;
930 struct ring_buffer_event *event; 984 struct ring_buffer_event *event;
931 struct ftrace_entry *entry; 985 struct ftrace_entry *entry;
932 986
@@ -934,7 +988,7 @@ trace_function(struct trace_array *tr,
934 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 988 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
935 return; 989 return;
936 990
937 event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), 991 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
938 flags, pc); 992 flags, pc);
939 if (!event) 993 if (!event)
940 return; 994 return;
@@ -942,57 +996,9 @@ trace_function(struct trace_array *tr,
942 entry->ip = ip; 996 entry->ip = ip;
943 entry->parent_ip = parent_ip; 997 entry->parent_ip = parent_ip;
944 998
945 if (!filter_check_discard(call, entry, tr->buffer, event)) 999 if (!filter_check_discard(call, entry, buffer, event))
946 ring_buffer_unlock_commit(tr->buffer, event); 1000 ring_buffer_unlock_commit(buffer, event);
947}
948
949#ifdef CONFIG_FUNCTION_GRAPH_TRACER
950static int __trace_graph_entry(struct trace_array *tr,
951 struct ftrace_graph_ent *trace,
952 unsigned long flags,
953 int pc)
954{
955 struct ftrace_event_call *call = &event_funcgraph_entry;
956 struct ring_buffer_event *event;
957 struct ftrace_graph_ent_entry *entry;
958
959 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
960 return 0;
961
962 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
963 sizeof(*entry), flags, pc);
964 if (!event)
965 return 0;
966 entry = ring_buffer_event_data(event);
967 entry->graph_ent = *trace;
968 if (!filter_current_check_discard(call, entry, event))
969 ring_buffer_unlock_commit(global_trace.buffer, event);
970
971 return 1;
972}
973
974static void __trace_graph_return(struct trace_array *tr,
975 struct ftrace_graph_ret *trace,
976 unsigned long flags,
977 int pc)
978{
979 struct ftrace_event_call *call = &event_funcgraph_exit;
980 struct ring_buffer_event *event;
981 struct ftrace_graph_ret_entry *entry;
982
983 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
984 return;
985
986 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
987 sizeof(*entry), flags, pc);
988 if (!event)
989 return;
990 entry = ring_buffer_event_data(event);
991 entry->ret = *trace;
992 if (!filter_current_check_discard(call, entry, event))
993 ring_buffer_unlock_commit(global_trace.buffer, event);
994} 1001}
995#endif
996 1002
997void 1003void
998ftrace(struct trace_array *tr, struct trace_array_cpu *data, 1004ftrace(struct trace_array *tr, struct trace_array_cpu *data,
@@ -1003,17 +1009,17 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1003 trace_function(tr, ip, parent_ip, flags, pc); 1009 trace_function(tr, ip, parent_ip, flags, pc);
1004} 1010}
1005 1011
1006static void __ftrace_trace_stack(struct trace_array *tr, 1012#ifdef CONFIG_STACKTRACE
1013static void __ftrace_trace_stack(struct ring_buffer *buffer,
1007 unsigned long flags, 1014 unsigned long flags,
1008 int skip, int pc) 1015 int skip, int pc)
1009{ 1016{
1010#ifdef CONFIG_STACKTRACE
1011 struct ftrace_event_call *call = &event_kernel_stack; 1017 struct ftrace_event_call *call = &event_kernel_stack;
1012 struct ring_buffer_event *event; 1018 struct ring_buffer_event *event;
1013 struct stack_entry *entry; 1019 struct stack_entry *entry;
1014 struct stack_trace trace; 1020 struct stack_trace trace;
1015 1021
1016 event = trace_buffer_lock_reserve(tr, TRACE_STACK, 1022 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1017 sizeof(*entry), flags, pc); 1023 sizeof(*entry), flags, pc);
1018 if (!event) 1024 if (!event)
1019 return; 1025 return;
@@ -1026,32 +1032,28 @@ static void __ftrace_trace_stack(struct trace_array *tr,
1026 trace.entries = entry->caller; 1032 trace.entries = entry->caller;
1027 1033
1028 save_stack_trace(&trace); 1034 save_stack_trace(&trace);
1029 if (!filter_check_discard(call, entry, tr->buffer, event)) 1035 if (!filter_check_discard(call, entry, buffer, event))
1030 ring_buffer_unlock_commit(tr->buffer, event); 1036 ring_buffer_unlock_commit(buffer, event);
1031#endif
1032} 1037}
1033 1038
1034static void ftrace_trace_stack(struct trace_array *tr, 1039void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1035 unsigned long flags, 1040 int skip, int pc)
1036 int skip, int pc)
1037{ 1041{
1038 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1042 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1039 return; 1043 return;
1040 1044
1041 __ftrace_trace_stack(tr, flags, skip, pc); 1045 __ftrace_trace_stack(buffer, flags, skip, pc);
1042} 1046}
1043 1047
1044void __trace_stack(struct trace_array *tr, 1048void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1045 unsigned long flags, 1049 int pc)
1046 int skip, int pc)
1047{ 1050{
1048 __ftrace_trace_stack(tr, flags, skip, pc); 1051 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1049} 1052}
1050 1053
1051static void ftrace_trace_userstack(struct trace_array *tr, 1054void
1052 unsigned long flags, int pc) 1055ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1053{ 1056{
1054#ifdef CONFIG_STACKTRACE
1055 struct ftrace_event_call *call = &event_user_stack; 1057 struct ftrace_event_call *call = &event_user_stack;
1056 struct ring_buffer_event *event; 1058 struct ring_buffer_event *event;
1057 struct userstack_entry *entry; 1059 struct userstack_entry *entry;
@@ -1060,7 +1062,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1060 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1062 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1061 return; 1063 return;
1062 1064
1063 event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, 1065 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1064 sizeof(*entry), flags, pc); 1066 sizeof(*entry), flags, pc);
1065 if (!event) 1067 if (!event)
1066 return; 1068 return;
@@ -1074,9 +1076,8 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1074 trace.entries = entry->caller; 1076 trace.entries = entry->caller;
1075 1077
1076 save_stack_trace_user(&trace); 1078 save_stack_trace_user(&trace);
1077 if (!filter_check_discard(call, entry, tr->buffer, event)) 1079 if (!filter_check_discard(call, entry, buffer, event))
1078 ring_buffer_unlock_commit(tr->buffer, event); 1080 ring_buffer_unlock_commit(buffer, event);
1079#endif
1080} 1081}
1081 1082
1082#ifdef UNUSED 1083#ifdef UNUSED
@@ -1086,6 +1087,8 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1086} 1087}
1087#endif /* UNUSED */ 1088#endif /* UNUSED */
1088 1089
1090#endif /* CONFIG_STACKTRACE */
1091
1089static void 1092static void
1090ftrace_trace_special(void *__tr, 1093ftrace_trace_special(void *__tr,
1091 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1094 unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -1093,9 +1096,10 @@ ftrace_trace_special(void *__tr,
1093{ 1096{
1094 struct ring_buffer_event *event; 1097 struct ring_buffer_event *event;
1095 struct trace_array *tr = __tr; 1098 struct trace_array *tr = __tr;
1099 struct ring_buffer *buffer = tr->buffer;
1096 struct special_entry *entry; 1100 struct special_entry *entry;
1097 1101
1098 event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, 1102 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1099 sizeof(*entry), 0, pc); 1103 sizeof(*entry), 0, pc);
1100 if (!event) 1104 if (!event)
1101 return; 1105 return;
@@ -1103,7 +1107,7 @@ ftrace_trace_special(void *__tr,
1103 entry->arg1 = arg1; 1107 entry->arg1 = arg1;
1104 entry->arg2 = arg2; 1108 entry->arg2 = arg2;
1105 entry->arg3 = arg3; 1109 entry->arg3 = arg3;
1106 trace_buffer_unlock_commit(tr, event, 0, pc); 1110 trace_buffer_unlock_commit(buffer, event, 0, pc);
1107} 1111}
1108 1112
1109void 1113void
@@ -1114,62 +1118,6 @@ __trace_special(void *__tr, void *__data,
1114} 1118}
1115 1119
1116void 1120void
1117tracing_sched_switch_trace(struct trace_array *tr,
1118 struct task_struct *prev,
1119 struct task_struct *next,
1120 unsigned long flags, int pc)
1121{
1122 struct ftrace_event_call *call = &event_context_switch;
1123 struct ring_buffer_event *event;
1124 struct ctx_switch_entry *entry;
1125
1126 event = trace_buffer_lock_reserve(tr, TRACE_CTX,
1127 sizeof(*entry), flags, pc);
1128 if (!event)
1129 return;
1130 entry = ring_buffer_event_data(event);
1131 entry->prev_pid = prev->pid;
1132 entry->prev_prio = prev->prio;
1133 entry->prev_state = prev->state;
1134 entry->next_pid = next->pid;
1135 entry->next_prio = next->prio;
1136 entry->next_state = next->state;
1137 entry->next_cpu = task_cpu(next);
1138
1139 if (!filter_check_discard(call, entry, tr->buffer, event))
1140 trace_buffer_unlock_commit(tr, event, flags, pc);
1141}
1142
1143void
1144tracing_sched_wakeup_trace(struct trace_array *tr,
1145 struct task_struct *wakee,
1146 struct task_struct *curr,
1147 unsigned long flags, int pc)
1148{
1149 struct ftrace_event_call *call = &event_wakeup;
1150 struct ring_buffer_event *event;
1151 struct ctx_switch_entry *entry;
1152
1153 event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
1154 sizeof(*entry), flags, pc);
1155 if (!event)
1156 return;
1157 entry = ring_buffer_event_data(event);
1158 entry->prev_pid = curr->pid;
1159 entry->prev_prio = curr->prio;
1160 entry->prev_state = curr->state;
1161 entry->next_pid = wakee->pid;
1162 entry->next_prio = wakee->prio;
1163 entry->next_state = wakee->state;
1164 entry->next_cpu = task_cpu(wakee);
1165
1166 if (!filter_check_discard(call, entry, tr->buffer, event))
1167 ring_buffer_unlock_commit(tr->buffer, event);
1168 ftrace_trace_stack(tr, flags, 6, pc);
1169 ftrace_trace_userstack(tr, flags, pc);
1170}
1171
1172void
1173ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) 1121ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1174{ 1122{
1175 struct trace_array *tr = &global_trace; 1123 struct trace_array *tr = &global_trace;
@@ -1193,68 +1141,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1193 local_irq_restore(flags); 1141 local_irq_restore(flags);
1194} 1142}
1195 1143
1196#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1197int trace_graph_entry(struct ftrace_graph_ent *trace)
1198{
1199 struct trace_array *tr = &global_trace;
1200 struct trace_array_cpu *data;
1201 unsigned long flags;
1202 long disabled;
1203 int ret;
1204 int cpu;
1205 int pc;
1206
1207 if (!ftrace_trace_task(current))
1208 return 0;
1209
1210 if (!ftrace_graph_addr(trace->func))
1211 return 0;
1212
1213 local_irq_save(flags);
1214 cpu = raw_smp_processor_id();
1215 data = tr->data[cpu];
1216 disabled = atomic_inc_return(&data->disabled);
1217 if (likely(disabled == 1)) {
1218 pc = preempt_count();
1219 ret = __trace_graph_entry(tr, trace, flags, pc);
1220 } else {
1221 ret = 0;
1222 }
1223 /* Only do the atomic if it is not already set */
1224 if (!test_tsk_trace_graph(current))
1225 set_tsk_trace_graph(current);
1226
1227 atomic_dec(&data->disabled);
1228 local_irq_restore(flags);
1229
1230 return ret;
1231}
1232
1233void trace_graph_return(struct ftrace_graph_ret *trace)
1234{
1235 struct trace_array *tr = &global_trace;
1236 struct trace_array_cpu *data;
1237 unsigned long flags;
1238 long disabled;
1239 int cpu;
1240 int pc;
1241
1242 local_irq_save(flags);
1243 cpu = raw_smp_processor_id();
1244 data = tr->data[cpu];
1245 disabled = atomic_inc_return(&data->disabled);
1246 if (likely(disabled == 1)) {
1247 pc = preempt_count();
1248 __trace_graph_return(tr, trace, flags, pc);
1249 }
1250 if (!trace->depth)
1251 clear_tsk_trace_graph(current);
1252 atomic_dec(&data->disabled);
1253 local_irq_restore(flags);
1254}
1255#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1256
1257
1258/** 1144/**
1259 * trace_vbprintk - write binary msg to tracing buffer 1145 * trace_vbprintk - write binary msg to tracing buffer
1260 * 1146 *
@@ -1267,6 +1153,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1267 1153
1268 struct ftrace_event_call *call = &event_bprint; 1154 struct ftrace_event_call *call = &event_bprint;
1269 struct ring_buffer_event *event; 1155 struct ring_buffer_event *event;
1156 struct ring_buffer *buffer;
1270 struct trace_array *tr = &global_trace; 1157 struct trace_array *tr = &global_trace;
1271 struct trace_array_cpu *data; 1158 struct trace_array_cpu *data;
1272 struct bprint_entry *entry; 1159 struct bprint_entry *entry;
@@ -1299,7 +1186,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1299 goto out_unlock; 1186 goto out_unlock;
1300 1187
1301 size = sizeof(*entry) + sizeof(u32) * len; 1188 size = sizeof(*entry) + sizeof(u32) * len;
1302 event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); 1189 buffer = tr->buffer;
1190 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1191 flags, pc);
1303 if (!event) 1192 if (!event)
1304 goto out_unlock; 1193 goto out_unlock;
1305 entry = ring_buffer_event_data(event); 1194 entry = ring_buffer_event_data(event);
@@ -1307,8 +1196,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1307 entry->fmt = fmt; 1196 entry->fmt = fmt;
1308 1197
1309 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1198 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1310 if (!filter_check_discard(call, entry, tr->buffer, event)) 1199 if (!filter_check_discard(call, entry, buffer, event))
1311 ring_buffer_unlock_commit(tr->buffer, event); 1200 ring_buffer_unlock_commit(buffer, event);
1312 1201
1313out_unlock: 1202out_unlock:
1314 __raw_spin_unlock(&trace_buf_lock); 1203 __raw_spin_unlock(&trace_buf_lock);
@@ -1323,14 +1212,30 @@ out:
1323} 1212}
1324EXPORT_SYMBOL_GPL(trace_vbprintk); 1213EXPORT_SYMBOL_GPL(trace_vbprintk);
1325 1214
1326int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1215int trace_array_printk(struct trace_array *tr,
1216 unsigned long ip, const char *fmt, ...)
1217{
1218 int ret;
1219 va_list ap;
1220
1221 if (!(trace_flags & TRACE_ITER_PRINTK))
1222 return 0;
1223
1224 va_start(ap, fmt);
1225 ret = trace_array_vprintk(tr, ip, fmt, ap);
1226 va_end(ap);
1227 return ret;
1228}
1229
1230int trace_array_vprintk(struct trace_array *tr,
1231 unsigned long ip, const char *fmt, va_list args)
1327{ 1232{
1328 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1233 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1329 static char trace_buf[TRACE_BUF_SIZE]; 1234 static char trace_buf[TRACE_BUF_SIZE];
1330 1235
1331 struct ftrace_event_call *call = &event_print; 1236 struct ftrace_event_call *call = &event_print;
1332 struct ring_buffer_event *event; 1237 struct ring_buffer_event *event;
1333 struct trace_array *tr = &global_trace; 1238 struct ring_buffer *buffer;
1334 struct trace_array_cpu *data; 1239 struct trace_array_cpu *data;
1335 int cpu, len = 0, size, pc; 1240 int cpu, len = 0, size, pc;
1336 struct print_entry *entry; 1241 struct print_entry *entry;
@@ -1358,7 +1263,9 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1358 trace_buf[len] = 0; 1263 trace_buf[len] = 0;
1359 1264
1360 size = sizeof(*entry) + len + 1; 1265 size = sizeof(*entry) + len + 1;
1361 event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); 1266 buffer = tr->buffer;
1267 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1268 irq_flags, pc);
1362 if (!event) 1269 if (!event)
1363 goto out_unlock; 1270 goto out_unlock;
1364 entry = ring_buffer_event_data(event); 1271 entry = ring_buffer_event_data(event);
@@ -1366,8 +1273,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1366 1273
1367 memcpy(&entry->buf, trace_buf, len); 1274 memcpy(&entry->buf, trace_buf, len);
1368 entry->buf[len] = 0; 1275 entry->buf[len] = 0;
1369 if (!filter_check_discard(call, entry, tr->buffer, event)) 1276 if (!filter_check_discard(call, entry, buffer, event))
1370 ring_buffer_unlock_commit(tr->buffer, event); 1277 ring_buffer_unlock_commit(buffer, event);
1371 1278
1372 out_unlock: 1279 out_unlock:
1373 __raw_spin_unlock(&trace_buf_lock); 1280 __raw_spin_unlock(&trace_buf_lock);
@@ -1379,6 +1286,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1379 1286
1380 return len; 1287 return len;
1381} 1288}
1289
1290int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1291{
1292 return trace_array_printk(&global_trace, ip, fmt, args);
1293}
1382EXPORT_SYMBOL_GPL(trace_vprintk); 1294EXPORT_SYMBOL_GPL(trace_vprintk);
1383 1295
1384enum trace_file_type { 1296enum trace_file_type {
@@ -1518,6 +1430,37 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1518 return ent; 1430 return ent;
1519} 1431}
1520 1432
1433static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1434{
1435 struct trace_array *tr = iter->tr;
1436 struct ring_buffer_event *event;
1437 struct ring_buffer_iter *buf_iter;
1438 unsigned long entries = 0;
1439 u64 ts;
1440
1441 tr->data[cpu]->skipped_entries = 0;
1442
1443 if (!iter->buffer_iter[cpu])
1444 return;
1445
1446 buf_iter = iter->buffer_iter[cpu];
1447 ring_buffer_iter_reset(buf_iter);
1448
1449 /*
1450 * We could have the case with the max latency tracers
1451 * that a reset never took place on a cpu. This is evident
1452 * by the timestamp being before the start of the buffer.
1453 */
1454 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1455 if (ts >= iter->tr->time_start)
1456 break;
1457 entries++;
1458 ring_buffer_read(buf_iter, NULL);
1459 }
1460
1461 tr->data[cpu]->skipped_entries = entries;
1462}
1463
1521/* 1464/*
1522 * No necessary locking here. The worst thing which can 1465 * No necessary locking here. The worst thing which can
1523 * happen is loosing events consumed at the same time 1466 * happen is loosing events consumed at the same time
@@ -1556,10 +1499,9 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1556 1499
1557 if (cpu_file == TRACE_PIPE_ALL_CPU) { 1500 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1558 for_each_tracing_cpu(cpu) 1501 for_each_tracing_cpu(cpu)
1559 ring_buffer_iter_reset(iter->buffer_iter[cpu]); 1502 tracing_iter_reset(iter, cpu);
1560 } else 1503 } else
1561 ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); 1504 tracing_iter_reset(iter, cpu_file);
1562
1563 1505
1564 ftrace_enable_cpu(); 1506 ftrace_enable_cpu();
1565 1507
@@ -1608,16 +1550,32 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1608 struct trace_array *tr = iter->tr; 1550 struct trace_array *tr = iter->tr;
1609 struct trace_array_cpu *data = tr->data[tr->cpu]; 1551 struct trace_array_cpu *data = tr->data[tr->cpu];
1610 struct tracer *type = current_trace; 1552 struct tracer *type = current_trace;
1611 unsigned long total; 1553 unsigned long entries = 0;
1612 unsigned long entries; 1554 unsigned long total = 0;
1555 unsigned long count;
1613 const char *name = "preemption"; 1556 const char *name = "preemption";
1557 int cpu;
1614 1558
1615 if (type) 1559 if (type)
1616 name = type->name; 1560 name = type->name;
1617 1561
1618 entries = ring_buffer_entries(iter->tr->buffer); 1562
1619 total = entries + 1563 for_each_tracing_cpu(cpu) {
1620 ring_buffer_overruns(iter->tr->buffer); 1564 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1565 /*
1566 * If this buffer has skipped entries, then we hold all
1567 * entries for the trace and we need to ignore the
1568 * ones before the time stamp.
1569 */
1570 if (tr->data[cpu]->skipped_entries) {
1571 count -= tr->data[cpu]->skipped_entries;
1572 /* total is the same as the entries */
1573 total += count;
1574 } else
1575 total += count +
1576 ring_buffer_overrun_cpu(tr->buffer, cpu);
1577 entries += count;
1578 }
1621 1579
1622 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 1580 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1623 name, UTS_RELEASE); 1581 name, UTS_RELEASE);
@@ -1659,7 +1617,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1659 seq_puts(m, "\n# => ended at: "); 1617 seq_puts(m, "\n# => ended at: ");
1660 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 1618 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1661 trace_print_seq(m, &iter->seq); 1619 trace_print_seq(m, &iter->seq);
1662 seq_puts(m, "#\n"); 1620 seq_puts(m, "\n#\n");
1663 } 1621 }
1664 1622
1665 seq_puts(m, "#\n"); 1623 seq_puts(m, "#\n");
@@ -1678,6 +1636,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1678 if (cpumask_test_cpu(iter->cpu, iter->started)) 1636 if (cpumask_test_cpu(iter->cpu, iter->started))
1679 return; 1637 return;
1680 1638
1639 if (iter->tr->data[iter->cpu]->skipped_entries)
1640 return;
1641
1681 cpumask_set_cpu(iter->cpu, iter->started); 1642 cpumask_set_cpu(iter->cpu, iter->started);
1682 1643
1683 /* Don't print started cpu buffer for the first entry of the trace */ 1644 /* Don't print started cpu buffer for the first entry of the trace */
@@ -1940,19 +1901,23 @@ __tracing_open(struct inode *inode, struct file *file)
1940 if (ring_buffer_overruns(iter->tr->buffer)) 1901 if (ring_buffer_overruns(iter->tr->buffer))
1941 iter->iter_flags |= TRACE_FILE_ANNOTATE; 1902 iter->iter_flags |= TRACE_FILE_ANNOTATE;
1942 1903
1904 /* stop the trace while dumping */
1905 tracing_stop();
1906
1943 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 1907 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
1944 for_each_tracing_cpu(cpu) { 1908 for_each_tracing_cpu(cpu) {
1945 1909
1946 iter->buffer_iter[cpu] = 1910 iter->buffer_iter[cpu] =
1947 ring_buffer_read_start(iter->tr->buffer, cpu); 1911 ring_buffer_read_start(iter->tr->buffer, cpu);
1912 tracing_iter_reset(iter, cpu);
1948 } 1913 }
1949 } else { 1914 } else {
1950 cpu = iter->cpu_file; 1915 cpu = iter->cpu_file;
1951 iter->buffer_iter[cpu] = 1916 iter->buffer_iter[cpu] =
1952 ring_buffer_read_start(iter->tr->buffer, cpu); 1917 ring_buffer_read_start(iter->tr->buffer, cpu);
1918 tracing_iter_reset(iter, cpu);
1953 } 1919 }
1954 1920
1955 /* TODO stop tracer */
1956 ret = seq_open(file, &tracer_seq_ops); 1921 ret = seq_open(file, &tracer_seq_ops);
1957 if (ret < 0) { 1922 if (ret < 0) {
1958 fail_ret = ERR_PTR(ret); 1923 fail_ret = ERR_PTR(ret);
@@ -1962,9 +1927,6 @@ __tracing_open(struct inode *inode, struct file *file)
1962 m = file->private_data; 1927 m = file->private_data;
1963 m->private = iter; 1928 m->private = iter;
1964 1929
1965 /* stop the trace while dumping */
1966 tracing_stop();
1967
1968 mutex_unlock(&trace_types_lock); 1930 mutex_unlock(&trace_types_lock);
1969 1931
1970 return iter; 1932 return iter;
@@ -1975,6 +1937,7 @@ __tracing_open(struct inode *inode, struct file *file)
1975 ring_buffer_read_finish(iter->buffer_iter[cpu]); 1937 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1976 } 1938 }
1977 free_cpumask_var(iter->started); 1939 free_cpumask_var(iter->started);
1940 tracing_start();
1978 fail: 1941 fail:
1979 mutex_unlock(&trace_types_lock); 1942 mutex_unlock(&trace_types_lock);
1980 kfree(iter->trace); 1943 kfree(iter->trace);
@@ -2031,7 +1994,7 @@ static int tracing_open(struct inode *inode, struct file *file)
2031 1994
2032 /* If this file was open for write, then erase contents */ 1995 /* If this file was open for write, then erase contents */
2033 if ((file->f_mode & FMODE_WRITE) && 1996 if ((file->f_mode & FMODE_WRITE) &&
2034 !(file->f_flags & O_APPEND)) { 1997 (file->f_flags & O_TRUNC)) {
2035 long cpu = (long) inode->i_private; 1998 long cpu = (long) inode->i_private;
2036 1999
2037 if (cpu == TRACE_PIPE_ALL_CPU) 2000 if (cpu == TRACE_PIPE_ALL_CPU)
@@ -2256,8 +2219,8 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
2256 len += 3; /* "no" and newline */ 2219 len += 3; /* "no" and newline */
2257 } 2220 }
2258 2221
2259 /* +2 for \n and \0 */ 2222 /* +1 for \0 */
2260 buf = kmalloc(len + 2, GFP_KERNEL); 2223 buf = kmalloc(len + 1, GFP_KERNEL);
2261 if (!buf) { 2224 if (!buf) {
2262 mutex_unlock(&trace_types_lock); 2225 mutex_unlock(&trace_types_lock);
2263 return -ENOMEM; 2226 return -ENOMEM;
@@ -2280,7 +2243,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
2280 } 2243 }
2281 mutex_unlock(&trace_types_lock); 2244 mutex_unlock(&trace_types_lock);
2282 2245
2283 WARN_ON(r >= len + 2); 2246 WARN_ON(r >= len + 1);
2284 2247
2285 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2248 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2286 2249
@@ -2291,23 +2254,23 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
2291/* Try to assign a tracer specific option */ 2254/* Try to assign a tracer specific option */
2292static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2255static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2293{ 2256{
2294 struct tracer_flags *trace_flags = trace->flags; 2257 struct tracer_flags *tracer_flags = trace->flags;
2295 struct tracer_opt *opts = NULL; 2258 struct tracer_opt *opts = NULL;
2296 int ret = 0, i = 0; 2259 int ret = 0, i = 0;
2297 int len; 2260 int len;
2298 2261
2299 for (i = 0; trace_flags->opts[i].name; i++) { 2262 for (i = 0; tracer_flags->opts[i].name; i++) {
2300 opts = &trace_flags->opts[i]; 2263 opts = &tracer_flags->opts[i];
2301 len = strlen(opts->name); 2264 len = strlen(opts->name);
2302 2265
2303 if (strncmp(cmp, opts->name, len) == 0) { 2266 if (strncmp(cmp, opts->name, len) == 0) {
2304 ret = trace->set_flag(trace_flags->val, 2267 ret = trace->set_flag(tracer_flags->val,
2305 opts->bit, !neg); 2268 opts->bit, !neg);
2306 break; 2269 break;
2307 } 2270 }
2308 } 2271 }
2309 /* Not found */ 2272 /* Not found */
2310 if (!trace_flags->opts[i].name) 2273 if (!tracer_flags->opts[i].name)
2311 return -EINVAL; 2274 return -EINVAL;
2312 2275
2313 /* Refused to handle */ 2276 /* Refused to handle */
@@ -2315,9 +2278,9 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2315 return ret; 2278 return ret;
2316 2279
2317 if (neg) 2280 if (neg)
2318 trace_flags->val &= ~opts->bit; 2281 tracer_flags->val &= ~opts->bit;
2319 else 2282 else
2320 trace_flags->val |= opts->bit; 2283 tracer_flags->val |= opts->bit;
2321 2284
2322 return 0; 2285 return 0;
2323} 2286}
@@ -2332,22 +2295,6 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2332 trace_flags |= mask; 2295 trace_flags |= mask;
2333 else 2296 else
2334 trace_flags &= ~mask; 2297 trace_flags &= ~mask;
2335
2336 if (mask == TRACE_ITER_GLOBAL_CLK) {
2337 u64 (*func)(void);
2338
2339 if (enabled)
2340 func = trace_clock_global;
2341 else
2342 func = trace_clock_local;
2343
2344 mutex_lock(&trace_types_lock);
2345 ring_buffer_set_clock(global_trace.buffer, func);
2346
2347 if (max_tr.buffer)
2348 ring_buffer_set_clock(max_tr.buffer, func);
2349 mutex_unlock(&trace_types_lock);
2350 }
2351} 2298}
2352 2299
2353static ssize_t 2300static ssize_t
@@ -3085,7 +3032,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3085 break; 3032 break;
3086 } 3033 }
3087 3034
3088 trace_consume(iter); 3035 if (ret != TRACE_TYPE_NO_CONSUME)
3036 trace_consume(iter);
3089 rem -= count; 3037 rem -= count;
3090 if (!find_next_entry_inc(iter)) { 3038 if (!find_next_entry_inc(iter)) {
3091 rem = 0; 3039 rem = 0;
@@ -3314,6 +3262,62 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3314 return cnt; 3262 return cnt;
3315} 3263}
3316 3264
3265static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
3266 size_t cnt, loff_t *ppos)
3267{
3268 char buf[64];
3269 int bufiter = 0;
3270 int i;
3271
3272 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3273 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
3274 "%s%s%s%s", i ? " " : "",
3275 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3276 i == trace_clock_id ? "]" : "");
3277 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
3278
3279 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
3280}
3281
3282static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3283 size_t cnt, loff_t *fpos)
3284{
3285 char buf[64];
3286 const char *clockstr;
3287 int i;
3288
3289 if (cnt >= sizeof(buf))
3290 return -EINVAL;
3291
3292 if (copy_from_user(&buf, ubuf, cnt))
3293 return -EFAULT;
3294
3295 buf[cnt] = 0;
3296
3297 clockstr = strstrip(buf);
3298
3299 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3300 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3301 break;
3302 }
3303 if (i == ARRAY_SIZE(trace_clocks))
3304 return -EINVAL;
3305
3306 trace_clock_id = i;
3307
3308 mutex_lock(&trace_types_lock);
3309
3310 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3311 if (max_tr.buffer)
3312 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3313
3314 mutex_unlock(&trace_types_lock);
3315
3316 *fpos += cnt;
3317
3318 return cnt;
3319}
3320
3317static const struct file_operations tracing_max_lat_fops = { 3321static const struct file_operations tracing_max_lat_fops = {
3318 .open = tracing_open_generic, 3322 .open = tracing_open_generic,
3319 .read = tracing_max_lat_read, 3323 .read = tracing_max_lat_read,
@@ -3351,6 +3355,12 @@ static const struct file_operations tracing_mark_fops = {
3351 .write = tracing_mark_write, 3355 .write = tracing_mark_write,
3352}; 3356};
3353 3357
3358static const struct file_operations trace_clock_fops = {
3359 .open = tracing_open_generic,
3360 .read = tracing_clock_read,
3361 .write = tracing_clock_write,
3362};
3363
3354struct ftrace_buffer_info { 3364struct ftrace_buffer_info {
3355 struct trace_array *tr; 3365 struct trace_array *tr;
3356 void *spare; 3366 void *spare;
@@ -3631,9 +3641,6 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3631 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); 3641 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
3632 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 3642 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
3633 3643
3634 cnt = ring_buffer_nmi_dropped_cpu(tr->buffer, cpu);
3635 trace_seq_printf(s, "nmi dropped: %ld\n", cnt);
3636
3637 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 3644 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
3638 3645
3639 kfree(s); 3646 kfree(s);
@@ -3894,17 +3901,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3894 if (ret < 0) 3901 if (ret < 0)
3895 return ret; 3902 return ret;
3896 3903
3897 switch (val) { 3904 if (val != 0 && val != 1)
3898 case 0:
3899 trace_flags &= ~(1 << index);
3900 break;
3901 case 1:
3902 trace_flags |= 1 << index;
3903 break;
3904
3905 default:
3906 return -EINVAL; 3905 return -EINVAL;
3907 } 3906 set_tracer_flags(1 << index, val);
3908 3907
3909 *ppos += cnt; 3908 *ppos += cnt;
3910 3909
@@ -4072,11 +4071,13 @@ static __init int tracer_init_debugfs(void)
4072 trace_create_file("current_tracer", 0644, d_tracer, 4071 trace_create_file("current_tracer", 0644, d_tracer,
4073 &global_trace, &set_tracer_fops); 4072 &global_trace, &set_tracer_fops);
4074 4073
4074#ifdef CONFIG_TRACER_MAX_TRACE
4075 trace_create_file("tracing_max_latency", 0644, d_tracer, 4075 trace_create_file("tracing_max_latency", 0644, d_tracer,
4076 &tracing_max_latency, &tracing_max_lat_fops); 4076 &tracing_max_latency, &tracing_max_lat_fops);
4077 4077
4078 trace_create_file("tracing_thresh", 0644, d_tracer, 4078 trace_create_file("tracing_thresh", 0644, d_tracer,
4079 &tracing_thresh, &tracing_max_lat_fops); 4079 &tracing_thresh, &tracing_max_lat_fops);
4080#endif
4080 4081
4081 trace_create_file("README", 0444, d_tracer, 4082 trace_create_file("README", 0444, d_tracer,
4082 NULL, &tracing_readme_fops); 4083 NULL, &tracing_readme_fops);
@@ -4093,6 +4094,9 @@ static __init int tracer_init_debugfs(void)
4093 trace_create_file("saved_cmdlines", 0444, d_tracer, 4094 trace_create_file("saved_cmdlines", 0444, d_tracer,
4094 NULL, &tracing_saved_cmdlines_fops); 4095 NULL, &tracing_saved_cmdlines_fops);
4095 4096
4097 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4098 &trace_clock_fops);
4099
4096#ifdef CONFIG_DYNAMIC_FTRACE 4100#ifdef CONFIG_DYNAMIC_FTRACE
4097 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4101 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4098 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 4102 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4233,8 +4237,11 @@ static void __ftrace_dump(bool disable_tracing)
4233 iter.pos = -1; 4237 iter.pos = -1;
4234 4238
4235 if (find_next_entry_inc(&iter) != NULL) { 4239 if (find_next_entry_inc(&iter) != NULL) {
4236 print_trace_line(&iter); 4240 int ret;
4237 trace_consume(&iter); 4241
4242 ret = print_trace_line(&iter);
4243 if (ret != TRACE_TYPE_NO_CONSUME)
4244 trace_consume(&iter);
4238 } 4245 }
4239 4246
4240 trace_printk_seq(&iter.seq); 4247 trace_printk_seq(&iter.seq);
@@ -4268,7 +4275,6 @@ void ftrace_dump(void)
4268 4275
4269__init static int tracer_alloc_buffers(void) 4276__init static int tracer_alloc_buffers(void)
4270{ 4277{
4271 struct trace_array_cpu *data;
4272 int ring_buf_size; 4278 int ring_buf_size;
4273 int i; 4279 int i;
4274 int ret = -ENOMEM; 4280 int ret = -ENOMEM;
@@ -4318,7 +4324,7 @@ __init static int tracer_alloc_buffers(void)
4318 4324
4319 /* Allocate the first page for all buffers */ 4325 /* Allocate the first page for all buffers */
4320 for_each_tracing_cpu(i) { 4326 for_each_tracing_cpu(i) {
4321 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4327 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4322 max_tr.data[i] = &per_cpu(max_data, i); 4328 max_tr.data[i] = &per_cpu(max_data, i);
4323 } 4329 }
4324 4330