aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c387
1 files changed, 254 insertions, 133 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 63dbc7ff213f..5c75deeefe30 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,9 +43,6 @@
43 43
44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
45 45
46unsigned long __read_mostly tracing_max_latency;
47unsigned long __read_mostly tracing_thresh;
48
49/* 46/*
50 * On boot up, the ring buffer is set to the minimum size, so that 47 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing. 48 * we do not waste memory on systems that are not using tracing.
@@ -172,10 +169,11 @@ static struct trace_array global_trace;
172 169
173static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 170static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
174 171
175int filter_current_check_discard(struct ftrace_event_call *call, void *rec, 172int filter_current_check_discard(struct ring_buffer *buffer,
173 struct ftrace_event_call *call, void *rec,
176 struct ring_buffer_event *event) 174 struct ring_buffer_event *event)
177{ 175{
178 return filter_check_discard(call, rec, global_trace.buffer, event); 176 return filter_check_discard(call, rec, buffer, event);
179} 177}
180EXPORT_SYMBOL_GPL(filter_current_check_discard); 178EXPORT_SYMBOL_GPL(filter_current_check_discard);
181 179
@@ -266,6 +264,9 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
266 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 264 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
267 TRACE_ITER_GRAPH_TIME; 265 TRACE_ITER_GRAPH_TIME;
268 266
267static int trace_stop_count;
268static DEFINE_SPINLOCK(tracing_start_lock);
269
269/** 270/**
270 * trace_wake_up - wake up tasks waiting for trace input 271 * trace_wake_up - wake up tasks waiting for trace input
271 * 272 *
@@ -338,45 +339,6 @@ static struct {
338 339
339int trace_clock_id; 340int trace_clock_id;
340 341
341/*
342 * ftrace_max_lock is used to protect the swapping of buffers
343 * when taking a max snapshot. The buffers themselves are
344 * protected by per_cpu spinlocks. But the action of the swap
345 * needs its own lock.
346 *
347 * This is defined as a raw_spinlock_t in order to help
348 * with performance when lockdep debugging is enabled.
349 */
350static raw_spinlock_t ftrace_max_lock =
351 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
352
353/*
354 * Copy the new maximum trace into the separate maximum-trace
355 * structure. (this way the maximum trace is permanently saved,
356 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
357 */
358static void
359__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
360{
361 struct trace_array_cpu *data = tr->data[cpu];
362
363 max_tr.cpu = cpu;
364 max_tr.time_start = data->preempt_timestamp;
365
366 data = max_tr.data[cpu];
367 data->saved_latency = tracing_max_latency;
368
369 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
370 data->pid = tsk->pid;
371 data->uid = task_uid(tsk);
372 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
373 data->policy = tsk->policy;
374 data->rt_priority = tsk->rt_priority;
375
376 /* record this tasks comm */
377 tracing_record_cmdline(tsk);
378}
379
380ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 342ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
381{ 343{
382 int len; 344 int len;
@@ -420,6 +382,56 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
420 return cnt; 382 return cnt;
421} 383}
422 384
385/*
386 * ftrace_max_lock is used to protect the swapping of buffers
387 * when taking a max snapshot. The buffers themselves are
388 * protected by per_cpu spinlocks. But the action of the swap
389 * needs its own lock.
390 *
391 * This is defined as a raw_spinlock_t in order to help
392 * with performance when lockdep debugging is enabled.
393 *
394 * It is also used in other places outside the update_max_tr
395 * so it needs to be defined outside of the
396 * CONFIG_TRACER_MAX_TRACE.
397 */
398static raw_spinlock_t ftrace_max_lock =
399 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
400
401#ifdef CONFIG_TRACER_MAX_TRACE
402unsigned long __read_mostly tracing_max_latency;
403unsigned long __read_mostly tracing_thresh;
404
405/*
406 * Copy the new maximum trace into the separate maximum-trace
407 * structure. (this way the maximum trace is permanently saved,
408 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
409 */
410static void
411__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
412{
413 struct trace_array_cpu *data = tr->data[cpu];
414 struct trace_array_cpu *max_data = tr->data[cpu];
415
416 max_tr.cpu = cpu;
417 max_tr.time_start = data->preempt_timestamp;
418
419 max_data = max_tr.data[cpu];
420 max_data->saved_latency = tracing_max_latency;
421 max_data->critical_start = data->critical_start;
422 max_data->critical_end = data->critical_end;
423
424 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
425 max_data->pid = tsk->pid;
426 max_data->uid = task_uid(tsk);
427 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
428 max_data->policy = tsk->policy;
429 max_data->rt_priority = tsk->rt_priority;
430
431 /* record this tasks comm */
432 tracing_record_cmdline(tsk);
433}
434
423/** 435/**
424 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 436 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
425 * @tr: tracer 437 * @tr: tracer
@@ -434,16 +446,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
434{ 446{
435 struct ring_buffer *buf = tr->buffer; 447 struct ring_buffer *buf = tr->buffer;
436 448
449 if (trace_stop_count)
450 return;
451
437 WARN_ON_ONCE(!irqs_disabled()); 452 WARN_ON_ONCE(!irqs_disabled());
438 __raw_spin_lock(&ftrace_max_lock); 453 __raw_spin_lock(&ftrace_max_lock);
439 454
440 tr->buffer = max_tr.buffer; 455 tr->buffer = max_tr.buffer;
441 max_tr.buffer = buf; 456 max_tr.buffer = buf;
442 457
443 ftrace_disable_cpu();
444 ring_buffer_reset(tr->buffer);
445 ftrace_enable_cpu();
446
447 __update_max_tr(tr, tsk, cpu); 458 __update_max_tr(tr, tsk, cpu);
448 __raw_spin_unlock(&ftrace_max_lock); 459 __raw_spin_unlock(&ftrace_max_lock);
449} 460}
@@ -461,21 +472,35 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
461{ 472{
462 int ret; 473 int ret;
463 474
475 if (trace_stop_count)
476 return;
477
464 WARN_ON_ONCE(!irqs_disabled()); 478 WARN_ON_ONCE(!irqs_disabled());
465 __raw_spin_lock(&ftrace_max_lock); 479 __raw_spin_lock(&ftrace_max_lock);
466 480
467 ftrace_disable_cpu(); 481 ftrace_disable_cpu();
468 482
469 ring_buffer_reset(max_tr.buffer);
470 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 483 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
471 484
485 if (ret == -EBUSY) {
486 /*
487 * We failed to swap the buffer due to a commit taking
488 * place on this CPU. We fail to record, but we reset
489 * the max trace buffer (no one writes directly to it)
490 * and flag that it failed.
491 */
492 trace_array_printk(&max_tr, _THIS_IP_,
493 "Failed to swap buffers due to commit in progress\n");
494 }
495
472 ftrace_enable_cpu(); 496 ftrace_enable_cpu();
473 497
474 WARN_ON_ONCE(ret && ret != -EAGAIN); 498 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
475 499
476 __update_max_tr(tr, tsk, cpu); 500 __update_max_tr(tr, tsk, cpu);
477 __raw_spin_unlock(&ftrace_max_lock); 501 __raw_spin_unlock(&ftrace_max_lock);
478} 502}
503#endif /* CONFIG_TRACER_MAX_TRACE */
479 504
480/** 505/**
481 * register_tracer - register a tracer with the ftrace system. 506 * register_tracer - register a tracer with the ftrace system.
@@ -532,7 +557,6 @@ __acquires(kernel_lock)
532 if (type->selftest && !tracing_selftest_disabled) { 557 if (type->selftest && !tracing_selftest_disabled) {
533 struct tracer *saved_tracer = current_trace; 558 struct tracer *saved_tracer = current_trace;
534 struct trace_array *tr = &global_trace; 559 struct trace_array *tr = &global_trace;
535 int i;
536 560
537 /* 561 /*
538 * Run a selftest on this tracer. 562 * Run a selftest on this tracer.
@@ -541,8 +565,7 @@ __acquires(kernel_lock)
541 * internal tracing to verify that everything is in order. 565 * internal tracing to verify that everything is in order.
542 * If we fail, we do not register this tracer. 566 * If we fail, we do not register this tracer.
543 */ 567 */
544 for_each_tracing_cpu(i) 568 tracing_reset_online_cpus(tr);
545 tracing_reset(tr, i);
546 569
547 current_trace = type; 570 current_trace = type;
548 /* the test is responsible for initializing and enabling */ 571 /* the test is responsible for initializing and enabling */
@@ -555,8 +578,7 @@ __acquires(kernel_lock)
555 goto out; 578 goto out;
556 } 579 }
557 /* Only reset on passing, to avoid touching corrupted buffers */ 580 /* Only reset on passing, to avoid touching corrupted buffers */
558 for_each_tracing_cpu(i) 581 tracing_reset_online_cpus(tr);
559 tracing_reset(tr, i);
560 582
561 printk(KERN_CONT "PASSED\n"); 583 printk(KERN_CONT "PASSED\n");
562 } 584 }
@@ -631,21 +653,42 @@ void unregister_tracer(struct tracer *type)
631 mutex_unlock(&trace_types_lock); 653 mutex_unlock(&trace_types_lock);
632} 654}
633 655
634void tracing_reset(struct trace_array *tr, int cpu) 656static void __tracing_reset(struct trace_array *tr, int cpu)
635{ 657{
636 ftrace_disable_cpu(); 658 ftrace_disable_cpu();
637 ring_buffer_reset_cpu(tr->buffer, cpu); 659 ring_buffer_reset_cpu(tr->buffer, cpu);
638 ftrace_enable_cpu(); 660 ftrace_enable_cpu();
639} 661}
640 662
663void tracing_reset(struct trace_array *tr, int cpu)
664{
665 struct ring_buffer *buffer = tr->buffer;
666
667 ring_buffer_record_disable(buffer);
668
669 /* Make sure all commits have finished */
670 synchronize_sched();
671 __tracing_reset(tr, cpu);
672
673 ring_buffer_record_enable(buffer);
674}
675
641void tracing_reset_online_cpus(struct trace_array *tr) 676void tracing_reset_online_cpus(struct trace_array *tr)
642{ 677{
678 struct ring_buffer *buffer = tr->buffer;
643 int cpu; 679 int cpu;
644 680
681 ring_buffer_record_disable(buffer);
682
683 /* Make sure all commits have finished */
684 synchronize_sched();
685
645 tr->time_start = ftrace_now(tr->cpu); 686 tr->time_start = ftrace_now(tr->cpu);
646 687
647 for_each_online_cpu(cpu) 688 for_each_online_cpu(cpu)
648 tracing_reset(tr, cpu); 689 __tracing_reset(tr, cpu);
690
691 ring_buffer_record_enable(buffer);
649} 692}
650 693
651void tracing_reset_current(int cpu) 694void tracing_reset_current(int cpu)
@@ -676,9 +719,6 @@ static void trace_init_cmdlines(void)
676 cmdline_idx = 0; 719 cmdline_idx = 0;
677} 720}
678 721
679static int trace_stop_count;
680static DEFINE_SPINLOCK(tracing_start_lock);
681
682/** 722/**
683 * ftrace_off_permanent - disable all ftrace code permanently 723 * ftrace_off_permanent - disable all ftrace code permanently
684 * 724 *
@@ -859,14 +899,15 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
859} 899}
860EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 900EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
861 901
862struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 902struct ring_buffer_event *
863 int type, 903trace_buffer_lock_reserve(struct ring_buffer *buffer,
864 unsigned long len, 904 int type,
865 unsigned long flags, int pc) 905 unsigned long len,
906 unsigned long flags, int pc)
866{ 907{
867 struct ring_buffer_event *event; 908 struct ring_buffer_event *event;
868 909
869 event = ring_buffer_lock_reserve(tr->buffer, len); 910 event = ring_buffer_lock_reserve(buffer, len);
870 if (event != NULL) { 911 if (event != NULL) {
871 struct trace_entry *ent = ring_buffer_event_data(event); 912 struct trace_entry *ent = ring_buffer_event_data(event);
872 913
@@ -877,53 +918,59 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
877 return event; 918 return event;
878} 919}
879 920
880static inline void __trace_buffer_unlock_commit(struct trace_array *tr, 921static inline void
881 struct ring_buffer_event *event, 922__trace_buffer_unlock_commit(struct ring_buffer *buffer,
882 unsigned long flags, int pc, 923 struct ring_buffer_event *event,
883 int wake) 924 unsigned long flags, int pc,
925 int wake)
884{ 926{
885 ring_buffer_unlock_commit(tr->buffer, event); 927 ring_buffer_unlock_commit(buffer, event);
886 928
887 ftrace_trace_stack(tr, flags, 6, pc); 929 ftrace_trace_stack(buffer, flags, 6, pc);
888 ftrace_trace_userstack(tr, flags, pc); 930 ftrace_trace_userstack(buffer, flags, pc);
889 931
890 if (wake) 932 if (wake)
891 trace_wake_up(); 933 trace_wake_up();
892} 934}
893 935
894void trace_buffer_unlock_commit(struct trace_array *tr, 936void trace_buffer_unlock_commit(struct ring_buffer *buffer,
895 struct ring_buffer_event *event, 937 struct ring_buffer_event *event,
896 unsigned long flags, int pc) 938 unsigned long flags, int pc)
897{ 939{
898 __trace_buffer_unlock_commit(tr, event, flags, pc, 1); 940 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
899} 941}
900 942
901struct ring_buffer_event * 943struct ring_buffer_event *
902trace_current_buffer_lock_reserve(int type, unsigned long len, 944trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
945 int type, unsigned long len,
903 unsigned long flags, int pc) 946 unsigned long flags, int pc)
904{ 947{
905 return trace_buffer_lock_reserve(&global_trace, 948 *current_rb = global_trace.buffer;
949 return trace_buffer_lock_reserve(*current_rb,
906 type, len, flags, pc); 950 type, len, flags, pc);
907} 951}
908EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 952EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
909 953
910void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, 954void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
955 struct ring_buffer_event *event,
911 unsigned long flags, int pc) 956 unsigned long flags, int pc)
912{ 957{
913 __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); 958 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
914} 959}
915EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 960EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
916 961
917void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, 962void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
918 unsigned long flags, int pc) 963 struct ring_buffer_event *event,
964 unsigned long flags, int pc)
919{ 965{
920 __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); 966 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
921} 967}
922EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); 968EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
923 969
924void trace_current_buffer_discard_commit(struct ring_buffer_event *event) 970void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
971 struct ring_buffer_event *event)
925{ 972{
926 ring_buffer_discard_commit(global_trace.buffer, event); 973 ring_buffer_discard_commit(buffer, event);
927} 974}
928EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 975EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
929 976
@@ -933,6 +980,7 @@ trace_function(struct trace_array *tr,
933 int pc) 980 int pc)
934{ 981{
935 struct ftrace_event_call *call = &event_function; 982 struct ftrace_event_call *call = &event_function;
983 struct ring_buffer *buffer = tr->buffer;
936 struct ring_buffer_event *event; 984 struct ring_buffer_event *event;
937 struct ftrace_entry *entry; 985 struct ftrace_entry *entry;
938 986
@@ -940,7 +988,7 @@ trace_function(struct trace_array *tr,
940 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 988 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
941 return; 989 return;
942 990
943 event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), 991 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
944 flags, pc); 992 flags, pc);
945 if (!event) 993 if (!event)
946 return; 994 return;
@@ -948,8 +996,8 @@ trace_function(struct trace_array *tr,
948 entry->ip = ip; 996 entry->ip = ip;
949 entry->parent_ip = parent_ip; 997 entry->parent_ip = parent_ip;
950 998
951 if (!filter_check_discard(call, entry, tr->buffer, event)) 999 if (!filter_check_discard(call, entry, buffer, event))
952 ring_buffer_unlock_commit(tr->buffer, event); 1000 ring_buffer_unlock_commit(buffer, event);
953} 1001}
954 1002
955void 1003void
@@ -962,7 +1010,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
962} 1010}
963 1011
964#ifdef CONFIG_STACKTRACE 1012#ifdef CONFIG_STACKTRACE
965static void __ftrace_trace_stack(struct trace_array *tr, 1013static void __ftrace_trace_stack(struct ring_buffer *buffer,
966 unsigned long flags, 1014 unsigned long flags,
967 int skip, int pc) 1015 int skip, int pc)
968{ 1016{
@@ -971,7 +1019,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
971 struct stack_entry *entry; 1019 struct stack_entry *entry;
972 struct stack_trace trace; 1020 struct stack_trace trace;
973 1021
974 event = trace_buffer_lock_reserve(tr, TRACE_STACK, 1022 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
975 sizeof(*entry), flags, pc); 1023 sizeof(*entry), flags, pc);
976 if (!event) 1024 if (!event)
977 return; 1025 return;
@@ -984,26 +1032,27 @@ static void __ftrace_trace_stack(struct trace_array *tr,
984 trace.entries = entry->caller; 1032 trace.entries = entry->caller;
985 1033
986 save_stack_trace(&trace); 1034 save_stack_trace(&trace);
987 if (!filter_check_discard(call, entry, tr->buffer, event)) 1035 if (!filter_check_discard(call, entry, buffer, event))
988 ring_buffer_unlock_commit(tr->buffer, event); 1036 ring_buffer_unlock_commit(buffer, event);
989} 1037}
990 1038
991void ftrace_trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1039void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
992 int pc) 1040 int skip, int pc)
993{ 1041{
994 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1042 if (!(trace_flags & TRACE_ITER_STACKTRACE))
995 return; 1043 return;
996 1044
997 __ftrace_trace_stack(tr, flags, skip, pc); 1045 __ftrace_trace_stack(buffer, flags, skip, pc);
998} 1046}
999 1047
1000void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1048void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1001 int pc) 1049 int pc)
1002{ 1050{
1003 __ftrace_trace_stack(tr, flags, skip, pc); 1051 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1004} 1052}
1005 1053
1006void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc) 1054void
1055ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1007{ 1056{
1008 struct ftrace_event_call *call = &event_user_stack; 1057 struct ftrace_event_call *call = &event_user_stack;
1009 struct ring_buffer_event *event; 1058 struct ring_buffer_event *event;
@@ -1013,7 +1062,7 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc)
1013 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1062 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1014 return; 1063 return;
1015 1064
1016 event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, 1065 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1017 sizeof(*entry), flags, pc); 1066 sizeof(*entry), flags, pc);
1018 if (!event) 1067 if (!event)
1019 return; 1068 return;
@@ -1027,8 +1076,8 @@ void ftrace_trace_userstack(struct trace_array *tr, unsigned long flags, int pc)
1027 trace.entries = entry->caller; 1076 trace.entries = entry->caller;
1028 1077
1029 save_stack_trace_user(&trace); 1078 save_stack_trace_user(&trace);
1030 if (!filter_check_discard(call, entry, tr->buffer, event)) 1079 if (!filter_check_discard(call, entry, buffer, event))
1031 ring_buffer_unlock_commit(tr->buffer, event); 1080 ring_buffer_unlock_commit(buffer, event);
1032} 1081}
1033 1082
1034#ifdef UNUSED 1083#ifdef UNUSED
@@ -1047,9 +1096,10 @@ ftrace_trace_special(void *__tr,
1047{ 1096{
1048 struct ring_buffer_event *event; 1097 struct ring_buffer_event *event;
1049 struct trace_array *tr = __tr; 1098 struct trace_array *tr = __tr;
1099 struct ring_buffer *buffer = tr->buffer;
1050 struct special_entry *entry; 1100 struct special_entry *entry;
1051 1101
1052 event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, 1102 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1053 sizeof(*entry), 0, pc); 1103 sizeof(*entry), 0, pc);
1054 if (!event) 1104 if (!event)
1055 return; 1105 return;
@@ -1057,7 +1107,7 @@ ftrace_trace_special(void *__tr,
1057 entry->arg1 = arg1; 1107 entry->arg1 = arg1;
1058 entry->arg2 = arg2; 1108 entry->arg2 = arg2;
1059 entry->arg3 = arg3; 1109 entry->arg3 = arg3;
1060 trace_buffer_unlock_commit(tr, event, 0, pc); 1110 trace_buffer_unlock_commit(buffer, event, 0, pc);
1061} 1111}
1062 1112
1063void 1113void
@@ -1103,6 +1153,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1103 1153
1104 struct ftrace_event_call *call = &event_bprint; 1154 struct ftrace_event_call *call = &event_bprint;
1105 struct ring_buffer_event *event; 1155 struct ring_buffer_event *event;
1156 struct ring_buffer *buffer;
1106 struct trace_array *tr = &global_trace; 1157 struct trace_array *tr = &global_trace;
1107 struct trace_array_cpu *data; 1158 struct trace_array_cpu *data;
1108 struct bprint_entry *entry; 1159 struct bprint_entry *entry;
@@ -1135,7 +1186,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1135 goto out_unlock; 1186 goto out_unlock;
1136 1187
1137 size = sizeof(*entry) + sizeof(u32) * len; 1188 size = sizeof(*entry) + sizeof(u32) * len;
1138 event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); 1189 buffer = tr->buffer;
1190 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1191 flags, pc);
1139 if (!event) 1192 if (!event)
1140 goto out_unlock; 1193 goto out_unlock;
1141 entry = ring_buffer_event_data(event); 1194 entry = ring_buffer_event_data(event);
@@ -1143,8 +1196,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1143 entry->fmt = fmt; 1196 entry->fmt = fmt;
1144 1197
1145 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1198 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1146 if (!filter_check_discard(call, entry, tr->buffer, event)) 1199 if (!filter_check_discard(call, entry, buffer, event))
1147 ring_buffer_unlock_commit(tr->buffer, event); 1200 ring_buffer_unlock_commit(buffer, event);
1148 1201
1149out_unlock: 1202out_unlock:
1150 __raw_spin_unlock(&trace_buf_lock); 1203 __raw_spin_unlock(&trace_buf_lock);
@@ -1159,14 +1212,30 @@ out:
1159} 1212}
1160EXPORT_SYMBOL_GPL(trace_vbprintk); 1213EXPORT_SYMBOL_GPL(trace_vbprintk);
1161 1214
1162int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1215int trace_array_printk(struct trace_array *tr,
1216 unsigned long ip, const char *fmt, ...)
1217{
1218 int ret;
1219 va_list ap;
1220
1221 if (!(trace_flags & TRACE_ITER_PRINTK))
1222 return 0;
1223
1224 va_start(ap, fmt);
1225 ret = trace_array_vprintk(tr, ip, fmt, ap);
1226 va_end(ap);
1227 return ret;
1228}
1229
1230int trace_array_vprintk(struct trace_array *tr,
1231 unsigned long ip, const char *fmt, va_list args)
1163{ 1232{
1164 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1233 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1165 static char trace_buf[TRACE_BUF_SIZE]; 1234 static char trace_buf[TRACE_BUF_SIZE];
1166 1235
1167 struct ftrace_event_call *call = &event_print; 1236 struct ftrace_event_call *call = &event_print;
1168 struct ring_buffer_event *event; 1237 struct ring_buffer_event *event;
1169 struct trace_array *tr = &global_trace; 1238 struct ring_buffer *buffer;
1170 struct trace_array_cpu *data; 1239 struct trace_array_cpu *data;
1171 int cpu, len = 0, size, pc; 1240 int cpu, len = 0, size, pc;
1172 struct print_entry *entry; 1241 struct print_entry *entry;
@@ -1194,7 +1263,9 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1194 trace_buf[len] = 0; 1263 trace_buf[len] = 0;
1195 1264
1196 size = sizeof(*entry) + len + 1; 1265 size = sizeof(*entry) + len + 1;
1197 event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); 1266 buffer = tr->buffer;
1267 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1268 irq_flags, pc);
1198 if (!event) 1269 if (!event)
1199 goto out_unlock; 1270 goto out_unlock;
1200 entry = ring_buffer_event_data(event); 1271 entry = ring_buffer_event_data(event);
@@ -1202,8 +1273,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1202 1273
1203 memcpy(&entry->buf, trace_buf, len); 1274 memcpy(&entry->buf, trace_buf, len);
1204 entry->buf[len] = 0; 1275 entry->buf[len] = 0;
1205 if (!filter_check_discard(call, entry, tr->buffer, event)) 1276 if (!filter_check_discard(call, entry, buffer, event))
1206 ring_buffer_unlock_commit(tr->buffer, event); 1277 ring_buffer_unlock_commit(buffer, event);
1207 1278
1208 out_unlock: 1279 out_unlock:
1209 __raw_spin_unlock(&trace_buf_lock); 1280 __raw_spin_unlock(&trace_buf_lock);
@@ -1215,6 +1286,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1215 1286
1216 return len; 1287 return len;
1217} 1288}
1289
1290int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1291{
1292 return trace_array_printk(&global_trace, ip, fmt, args);
1293}
1218EXPORT_SYMBOL_GPL(trace_vprintk); 1294EXPORT_SYMBOL_GPL(trace_vprintk);
1219 1295
1220enum trace_file_type { 1296enum trace_file_type {
@@ -1354,6 +1430,37 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1354 return ent; 1430 return ent;
1355} 1431}
1356 1432
1433static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1434{
1435 struct trace_array *tr = iter->tr;
1436 struct ring_buffer_event *event;
1437 struct ring_buffer_iter *buf_iter;
1438 unsigned long entries = 0;
1439 u64 ts;
1440
1441 tr->data[cpu]->skipped_entries = 0;
1442
1443 if (!iter->buffer_iter[cpu])
1444 return;
1445
1446 buf_iter = iter->buffer_iter[cpu];
1447 ring_buffer_iter_reset(buf_iter);
1448
1449 /*
1450 * We could have the case with the max latency tracers
1451 * that a reset never took place on a cpu. This is evident
1452 * by the timestamp being before the start of the buffer.
1453 */
1454 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1455 if (ts >= iter->tr->time_start)
1456 break;
1457 entries++;
1458 ring_buffer_read(buf_iter, NULL);
1459 }
1460
1461 tr->data[cpu]->skipped_entries = entries;
1462}
1463
1357/* 1464/*
1358 * No necessary locking here. The worst thing which can 1465 * No necessary locking here. The worst thing which can
1359 * happen is loosing events consumed at the same time 1466 * happen is loosing events consumed at the same time
@@ -1392,10 +1499,9 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1392 1499
1393 if (cpu_file == TRACE_PIPE_ALL_CPU) { 1500 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1394 for_each_tracing_cpu(cpu) 1501 for_each_tracing_cpu(cpu)
1395 ring_buffer_iter_reset(iter->buffer_iter[cpu]); 1502 tracing_iter_reset(iter, cpu);
1396 } else 1503 } else
1397 ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); 1504 tracing_iter_reset(iter, cpu_file);
1398
1399 1505
1400 ftrace_enable_cpu(); 1506 ftrace_enable_cpu();
1401 1507
@@ -1444,16 +1550,32 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1444 struct trace_array *tr = iter->tr; 1550 struct trace_array *tr = iter->tr;
1445 struct trace_array_cpu *data = tr->data[tr->cpu]; 1551 struct trace_array_cpu *data = tr->data[tr->cpu];
1446 struct tracer *type = current_trace; 1552 struct tracer *type = current_trace;
1447 unsigned long total; 1553 unsigned long entries = 0;
1448 unsigned long entries; 1554 unsigned long total = 0;
1555 unsigned long count;
1449 const char *name = "preemption"; 1556 const char *name = "preemption";
1557 int cpu;
1450 1558
1451 if (type) 1559 if (type)
1452 name = type->name; 1560 name = type->name;
1453 1561
1454 entries = ring_buffer_entries(iter->tr->buffer); 1562
1455 total = entries + 1563 for_each_tracing_cpu(cpu) {
1456 ring_buffer_overruns(iter->tr->buffer); 1564 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1565 /*
1566 * If this buffer has skipped entries, then we hold all
1567 * entries for the trace and we need to ignore the
1568 * ones before the time stamp.
1569 */
1570 if (tr->data[cpu]->skipped_entries) {
1571 count -= tr->data[cpu]->skipped_entries;
1572 /* total is the same as the entries */
1573 total += count;
1574 } else
1575 total += count +
1576 ring_buffer_overrun_cpu(tr->buffer, cpu);
1577 entries += count;
1578 }
1457 1579
1458 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 1580 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1459 name, UTS_RELEASE); 1581 name, UTS_RELEASE);
@@ -1495,7 +1617,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1495 seq_puts(m, "\n# => ended at: "); 1617 seq_puts(m, "\n# => ended at: ");
1496 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 1618 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1497 trace_print_seq(m, &iter->seq); 1619 trace_print_seq(m, &iter->seq);
1498 seq_puts(m, "#\n"); 1620 seq_puts(m, "\n#\n");
1499 } 1621 }
1500 1622
1501 seq_puts(m, "#\n"); 1623 seq_puts(m, "#\n");
@@ -1514,6 +1636,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1514 if (cpumask_test_cpu(iter->cpu, iter->started)) 1636 if (cpumask_test_cpu(iter->cpu, iter->started))
1515 return; 1637 return;
1516 1638
1639 if (iter->tr->data[iter->cpu]->skipped_entries)
1640 return;
1641
1517 cpumask_set_cpu(iter->cpu, iter->started); 1642 cpumask_set_cpu(iter->cpu, iter->started);
1518 1643
1519 /* Don't print started cpu buffer for the first entry of the trace */ 1644 /* Don't print started cpu buffer for the first entry of the trace */
@@ -1776,19 +1901,23 @@ __tracing_open(struct inode *inode, struct file *file)
1776 if (ring_buffer_overruns(iter->tr->buffer)) 1901 if (ring_buffer_overruns(iter->tr->buffer))
1777 iter->iter_flags |= TRACE_FILE_ANNOTATE; 1902 iter->iter_flags |= TRACE_FILE_ANNOTATE;
1778 1903
1904 /* stop the trace while dumping */
1905 tracing_stop();
1906
1779 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 1907 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
1780 for_each_tracing_cpu(cpu) { 1908 for_each_tracing_cpu(cpu) {
1781 1909
1782 iter->buffer_iter[cpu] = 1910 iter->buffer_iter[cpu] =
1783 ring_buffer_read_start(iter->tr->buffer, cpu); 1911 ring_buffer_read_start(iter->tr->buffer, cpu);
1912 tracing_iter_reset(iter, cpu);
1784 } 1913 }
1785 } else { 1914 } else {
1786 cpu = iter->cpu_file; 1915 cpu = iter->cpu_file;
1787 iter->buffer_iter[cpu] = 1916 iter->buffer_iter[cpu] =
1788 ring_buffer_read_start(iter->tr->buffer, cpu); 1917 ring_buffer_read_start(iter->tr->buffer, cpu);
1918 tracing_iter_reset(iter, cpu);
1789 } 1919 }
1790 1920
1791 /* TODO stop tracer */
1792 ret = seq_open(file, &tracer_seq_ops); 1921 ret = seq_open(file, &tracer_seq_ops);
1793 if (ret < 0) { 1922 if (ret < 0) {
1794 fail_ret = ERR_PTR(ret); 1923 fail_ret = ERR_PTR(ret);
@@ -1798,9 +1927,6 @@ __tracing_open(struct inode *inode, struct file *file)
1798 m = file->private_data; 1927 m = file->private_data;
1799 m->private = iter; 1928 m->private = iter;
1800 1929
1801 /* stop the trace while dumping */
1802 tracing_stop();
1803
1804 mutex_unlock(&trace_types_lock); 1930 mutex_unlock(&trace_types_lock);
1805 1931
1806 return iter; 1932 return iter;
@@ -1811,6 +1937,7 @@ __tracing_open(struct inode *inode, struct file *file)
1811 ring_buffer_read_finish(iter->buffer_iter[cpu]); 1937 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1812 } 1938 }
1813 free_cpumask_var(iter->started); 1939 free_cpumask_var(iter->started);
1940 tracing_start();
1814 fail: 1941 fail:
1815 mutex_unlock(&trace_types_lock); 1942 mutex_unlock(&trace_types_lock);
1816 kfree(iter->trace); 1943 kfree(iter->trace);
@@ -3774,17 +3901,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3774 if (ret < 0) 3901 if (ret < 0)
3775 return ret; 3902 return ret;
3776 3903
3777 switch (val) { 3904 if (val != 0 && val != 1)
3778 case 0:
3779 trace_flags &= ~(1 << index);
3780 break;
3781 case 1:
3782 trace_flags |= 1 << index;
3783 break;
3784
3785 default:
3786 return -EINVAL; 3905 return -EINVAL;
3787 } 3906 set_tracer_flags(1 << index, val);
3788 3907
3789 *ppos += cnt; 3908 *ppos += cnt;
3790 3909
@@ -3952,11 +4071,13 @@ static __init int tracer_init_debugfs(void)
3952 trace_create_file("current_tracer", 0644, d_tracer, 4071 trace_create_file("current_tracer", 0644, d_tracer,
3953 &global_trace, &set_tracer_fops); 4072 &global_trace, &set_tracer_fops);
3954 4073
4074#ifdef CONFIG_TRACER_MAX_TRACE
3955 trace_create_file("tracing_max_latency", 0644, d_tracer, 4075 trace_create_file("tracing_max_latency", 0644, d_tracer,
3956 &tracing_max_latency, &tracing_max_lat_fops); 4076 &tracing_max_latency, &tracing_max_lat_fops);
3957 4077
3958 trace_create_file("tracing_thresh", 0644, d_tracer, 4078 trace_create_file("tracing_thresh", 0644, d_tracer,
3959 &tracing_thresh, &tracing_max_lat_fops); 4079 &tracing_thresh, &tracing_max_lat_fops);
4080#endif
3960 4081
3961 trace_create_file("README", 0444, d_tracer, 4082 trace_create_file("README", 0444, d_tracer,
3962 NULL, &tracing_readme_fops); 4083 NULL, &tracing_readme_fops);