aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/trace.c486
-rw-r--r--kernel/trace/trace.h37
-rw-r--r--kernel/trace/trace_functions.c8
-rw-r--r--kernel/trace/trace_functions_graph.c12
-rw-r--r--kernel/trace/trace_irqsoff.c10
-rw-r--r--kernel/trace/trace_kdb.c8
-rw-r--r--kernel/trace/trace_mmiotrace.c12
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_sched_switch.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c16
-rw-r--r--kernel/trace/trace_selftest.c42
-rw-r--r--kernel/trace/trace_syscalls.c4
14 files changed, 365 insertions, 286 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index d6964244e567..d84c4a575514 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -8,6 +8,7 @@
8#include <linux/perf_event.h> 8#include <linux/perf_event.h>
9 9
10struct trace_array; 10struct trace_array;
11struct trace_buffer;
11struct tracer; 12struct tracer;
12struct dentry; 13struct dentry;
13 14
@@ -67,6 +68,7 @@ struct trace_entry {
67struct trace_iterator { 68struct trace_iterator {
68 struct trace_array *tr; 69 struct trace_array *tr;
69 struct tracer *trace; 70 struct tracer *trace;
71 struct trace_buffer *trace_buffer;
70 void *private; 72 void *private;
71 int cpu_file; 73 int cpu_file;
72 struct mutex mutex; 74 struct mutex mutex;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 71259e2b6b61..90a55054744c 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -72,7 +72,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
72 bool blk_tracer = blk_tracer_enabled; 72 bool blk_tracer = blk_tracer_enabled;
73 73
74 if (blk_tracer) { 74 if (blk_tracer) {
75 buffer = blk_tr->buffer; 75 buffer = blk_tr->trace_buffer.buffer;
76 pc = preempt_count(); 76 pc = preempt_count();
77 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 77 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
78 sizeof(*t) + len, 78 sizeof(*t) + len,
@@ -218,7 +218,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
218 if (blk_tracer) { 218 if (blk_tracer) {
219 tracing_record_cmdline(current); 219 tracing_record_cmdline(current);
220 220
221 buffer = blk_tr->buffer; 221 buffer = blk_tr->trace_buffer.buffer;
222 pc = preempt_count(); 222 pc = preempt_count();
223 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 223 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
224 sizeof(*t) + pdu_len, 224 sizeof(*t) + pdu_len,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c8a852a55db4..a08c127db865 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -195,27 +195,15 @@ cycle_t ftrace_now(int cpu)
195 u64 ts; 195 u64 ts;
196 196
197 /* Early boot up does not have a buffer yet */ 197 /* Early boot up does not have a buffer yet */
198 if (!global_trace.buffer) 198 if (!global_trace.trace_buffer.buffer)
199 return trace_clock_local(); 199 return trace_clock_local();
200 200
201 ts = ring_buffer_time_stamp(global_trace.buffer, cpu); 201 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
202 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); 202 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
203 203
204 return ts; 204 return ts;
205} 205}
206 206
207/*
208 * The max_tr is used to snapshot the global_trace when a maximum
209 * latency is reached. Some tracers will use this to store a maximum
210 * trace while it continues examining live traces.
211 *
212 * The buffers for the max_tr are set up the same as the global_trace.
213 * When a snapshot is taken, the link list of the max_tr is swapped
214 * with the link list of the global_trace and the buffers are reset for
215 * the global_trace so the tracing can continue.
216 */
217static struct trace_array max_tr;
218
219int tracing_is_enabled(void) 207int tracing_is_enabled(void)
220{ 208{
221 return tracing_is_on(); 209 return tracing_is_on();
@@ -339,8 +327,8 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
339 */ 327 */
340void tracing_on(void) 328void tracing_on(void)
341{ 329{
342 if (global_trace.buffer) 330 if (global_trace.trace_buffer.buffer)
343 ring_buffer_record_on(global_trace.buffer); 331 ring_buffer_record_on(global_trace.trace_buffer.buffer);
344 /* 332 /*
345 * This flag is only looked at when buffers haven't been 333 * This flag is only looked at when buffers haven't been
346 * allocated yet. We don't really care about the race 334 * allocated yet. We don't really care about the race
@@ -361,8 +349,8 @@ EXPORT_SYMBOL_GPL(tracing_on);
361 */ 349 */
362void tracing_off(void) 350void tracing_off(void)
363{ 351{
364 if (global_trace.buffer) 352 if (global_trace.trace_buffer.buffer)
365 ring_buffer_record_off(global_trace.buffer); 353 ring_buffer_record_off(global_trace.trace_buffer.buffer);
366 /* 354 /*
367 * This flag is only looked at when buffers haven't been 355 * This flag is only looked at when buffers haven't been
368 * allocated yet. We don't really care about the race 356 * allocated yet. We don't really care about the race
@@ -378,8 +366,8 @@ EXPORT_SYMBOL_GPL(tracing_off);
378 */ 366 */
379int tracing_is_on(void) 367int tracing_is_on(void)
380{ 368{
381 if (global_trace.buffer) 369 if (global_trace.trace_buffer.buffer)
382 return ring_buffer_record_is_on(global_trace.buffer); 370 return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
383 return !global_trace.buffer_disabled; 371 return !global_trace.buffer_disabled;
384} 372}
385EXPORT_SYMBOL_GPL(tracing_is_on); 373EXPORT_SYMBOL_GPL(tracing_is_on);
@@ -637,13 +625,14 @@ unsigned long __read_mostly tracing_max_latency;
637static void 625static void
638__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 626__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
639{ 627{
640 struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu); 628 struct trace_buffer *trace_buf = &tr->trace_buffer;
641 struct trace_array_cpu *max_data; 629 struct trace_buffer *max_buf = &tr->max_buffer;
630 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
631 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
642 632
643 max_tr.cpu = cpu; 633 max_buf->cpu = cpu;
644 max_tr.time_start = data->preempt_timestamp; 634 max_buf->time_start = data->preempt_timestamp;
645 635
646 max_data = per_cpu_ptr(max_tr.data, cpu);
647 max_data->saved_latency = tracing_max_latency; 636 max_data->saved_latency = tracing_max_latency;
648 max_data->critical_start = data->critical_start; 637 max_data->critical_start = data->critical_start;
649 max_data->critical_end = data->critical_end; 638 max_data->critical_end = data->critical_end;
@@ -686,9 +675,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
686 675
687 arch_spin_lock(&ftrace_max_lock); 676 arch_spin_lock(&ftrace_max_lock);
688 677
689 buf = tr->buffer; 678 buf = tr->trace_buffer.buffer;
690 tr->buffer = max_tr.buffer; 679 tr->trace_buffer.buffer = tr->max_buffer.buffer;
691 max_tr.buffer = buf; 680 tr->max_buffer.buffer = buf;
692 681
693 __update_max_tr(tr, tsk, cpu); 682 __update_max_tr(tr, tsk, cpu);
694 arch_spin_unlock(&ftrace_max_lock); 683 arch_spin_unlock(&ftrace_max_lock);
@@ -716,7 +705,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
716 705
717 arch_spin_lock(&ftrace_max_lock); 706 arch_spin_lock(&ftrace_max_lock);
718 707
719 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 708 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
720 709
721 if (ret == -EBUSY) { 710 if (ret == -EBUSY) {
722 /* 711 /*
@@ -725,7 +714,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
725 * the max trace buffer (no one writes directly to it) 714 * the max trace buffer (no one writes directly to it)
726 * and flag that it failed. 715 * and flag that it failed.
727 */ 716 */
728 trace_array_printk(&max_tr, _THIS_IP_, 717 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
729 "Failed to swap buffers due to commit in progress\n"); 718 "Failed to swap buffers due to commit in progress\n");
730 } 719 }
731 720
@@ -742,7 +731,7 @@ static void default_wait_pipe(struct trace_iterator *iter)
742 if (trace_buffer_iter(iter, iter->cpu_file)) 731 if (trace_buffer_iter(iter, iter->cpu_file))
743 return; 732 return;
744 733
745 ring_buffer_wait(iter->tr->buffer, iter->cpu_file); 734 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
746} 735}
747 736
748/** 737/**
@@ -803,17 +792,19 @@ int register_tracer(struct tracer *type)
803 * internal tracing to verify that everything is in order. 792 * internal tracing to verify that everything is in order.
804 * If we fail, we do not register this tracer. 793 * If we fail, we do not register this tracer.
805 */ 794 */
806 tracing_reset_online_cpus(tr); 795 tracing_reset_online_cpus(&tr->trace_buffer);
807 796
808 tr->current_trace = type; 797 tr->current_trace = type;
809 798
799#ifdef CONFIG_TRACER_MAX_TRACE
810 if (type->use_max_tr) { 800 if (type->use_max_tr) {
811 /* If we expanded the buffers, make sure the max is expanded too */ 801 /* If we expanded the buffers, make sure the max is expanded too */
812 if (ring_buffer_expanded) 802 if (ring_buffer_expanded)
813 ring_buffer_resize(max_tr.buffer, trace_buf_size, 803 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
814 RING_BUFFER_ALL_CPUS); 804 RING_BUFFER_ALL_CPUS);
815 type->allocated_snapshot = true; 805 type->allocated_snapshot = true;
816 } 806 }
807#endif
817 808
818 /* the test is responsible for initializing and enabling */ 809 /* the test is responsible for initializing and enabling */
819 pr_info("Testing tracer %s: ", type->name); 810 pr_info("Testing tracer %s: ", type->name);
@@ -827,16 +818,18 @@ int register_tracer(struct tracer *type)
827 goto out; 818 goto out;
828 } 819 }
829 /* Only reset on passing, to avoid touching corrupted buffers */ 820 /* Only reset on passing, to avoid touching corrupted buffers */
830 tracing_reset_online_cpus(tr); 821 tracing_reset_online_cpus(&tr->trace_buffer);
831 822
823#ifdef CONFIG_TRACER_MAX_TRACE
832 if (type->use_max_tr) { 824 if (type->use_max_tr) {
833 type->allocated_snapshot = false; 825 type->allocated_snapshot = false;
834 826
835 /* Shrink the max buffer again */ 827 /* Shrink the max buffer again */
836 if (ring_buffer_expanded) 828 if (ring_buffer_expanded)
837 ring_buffer_resize(max_tr.buffer, 1, 829 ring_buffer_resize(tr->max_buffer.buffer, 1,
838 RING_BUFFER_ALL_CPUS); 830 RING_BUFFER_ALL_CPUS);
839 } 831 }
832#endif
840 833
841 printk(KERN_CONT "PASSED\n"); 834 printk(KERN_CONT "PASSED\n");
842 } 835 }
@@ -870,9 +863,9 @@ int register_tracer(struct tracer *type)
870 return ret; 863 return ret;
871} 864}
872 865
873void tracing_reset(struct trace_array *tr, int cpu) 866void tracing_reset(struct trace_buffer *buf, int cpu)
874{ 867{
875 struct ring_buffer *buffer = tr->buffer; 868 struct ring_buffer *buffer = buf->buffer;
876 869
877 if (!buffer) 870 if (!buffer)
878 return; 871 return;
@@ -886,9 +879,9 @@ void tracing_reset(struct trace_array *tr, int cpu)
886 ring_buffer_record_enable(buffer); 879 ring_buffer_record_enable(buffer);
887} 880}
888 881
889void tracing_reset_online_cpus(struct trace_array *tr) 882void tracing_reset_online_cpus(struct trace_buffer *buf)
890{ 883{
891 struct ring_buffer *buffer = tr->buffer; 884 struct ring_buffer *buffer = buf->buffer;
892 int cpu; 885 int cpu;
893 886
894 if (!buffer) 887 if (!buffer)
@@ -899,7 +892,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
899 /* Make sure all commits have finished */ 892 /* Make sure all commits have finished */
900 synchronize_sched(); 893 synchronize_sched();
901 894
902 tr->time_start = ftrace_now(tr->cpu); 895 buf->time_start = ftrace_now(buf->cpu);
903 896
904 for_each_online_cpu(cpu) 897 for_each_online_cpu(cpu)
905 ring_buffer_reset_cpu(buffer, cpu); 898 ring_buffer_reset_cpu(buffer, cpu);
@@ -909,7 +902,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
909 902
910void tracing_reset_current(int cpu) 903void tracing_reset_current(int cpu)
911{ 904{
912 tracing_reset(&global_trace, cpu); 905 tracing_reset(&global_trace.trace_buffer, cpu);
913} 906}
914 907
915void tracing_reset_all_online_cpus(void) 908void tracing_reset_all_online_cpus(void)
@@ -918,7 +911,10 @@ void tracing_reset_all_online_cpus(void)
918 911
919 mutex_lock(&trace_types_lock); 912 mutex_lock(&trace_types_lock);
920 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 913 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
921 tracing_reset_online_cpus(tr); 914 tracing_reset_online_cpus(&tr->trace_buffer);
915#ifdef CONFIG_TRACER_MAX_TRACE
916 tracing_reset_online_cpus(&tr->max_buffer);
917#endif
922 } 918 }
923 mutex_unlock(&trace_types_lock); 919 mutex_unlock(&trace_types_lock);
924} 920}
@@ -988,13 +984,15 @@ void tracing_start(void)
988 /* Prevent the buffers from switching */ 984 /* Prevent the buffers from switching */
989 arch_spin_lock(&ftrace_max_lock); 985 arch_spin_lock(&ftrace_max_lock);
990 986
991 buffer = global_trace.buffer; 987 buffer = global_trace.trace_buffer.buffer;
992 if (buffer) 988 if (buffer)
993 ring_buffer_record_enable(buffer); 989 ring_buffer_record_enable(buffer);
994 990
995 buffer = max_tr.buffer; 991#ifdef CONFIG_TRACER_MAX_TRACE
992 buffer = global_trace.max_buffer.buffer;
996 if (buffer) 993 if (buffer)
997 ring_buffer_record_enable(buffer); 994 ring_buffer_record_enable(buffer);
995#endif
998 996
999 arch_spin_unlock(&ftrace_max_lock); 997 arch_spin_unlock(&ftrace_max_lock);
1000 998
@@ -1026,7 +1024,7 @@ static void tracing_start_tr(struct trace_array *tr)
1026 goto out; 1024 goto out;
1027 } 1025 }
1028 1026
1029 buffer = tr->buffer; 1027 buffer = tr->trace_buffer.buffer;
1030 if (buffer) 1028 if (buffer)
1031 ring_buffer_record_enable(buffer); 1029 ring_buffer_record_enable(buffer);
1032 1030
@@ -1053,13 +1051,15 @@ void tracing_stop(void)
1053 /* Prevent the buffers from switching */ 1051 /* Prevent the buffers from switching */
1054 arch_spin_lock(&ftrace_max_lock); 1052 arch_spin_lock(&ftrace_max_lock);
1055 1053
1056 buffer = global_trace.buffer; 1054 buffer = global_trace.trace_buffer.buffer;
1057 if (buffer) 1055 if (buffer)
1058 ring_buffer_record_disable(buffer); 1056 ring_buffer_record_disable(buffer);
1059 1057
1060 buffer = max_tr.buffer; 1058#ifdef CONFIG_TRACER_MAX_TRACE
1059 buffer = global_trace.max_buffer.buffer;
1061 if (buffer) 1060 if (buffer)
1062 ring_buffer_record_disable(buffer); 1061 ring_buffer_record_disable(buffer);
1062#endif
1063 1063
1064 arch_spin_unlock(&ftrace_max_lock); 1064 arch_spin_unlock(&ftrace_max_lock);
1065 1065
@@ -1080,7 +1080,7 @@ static void tracing_stop_tr(struct trace_array *tr)
1080 if (tr->stop_count++) 1080 if (tr->stop_count++)
1081 goto out; 1081 goto out;
1082 1082
1083 buffer = tr->buffer; 1083 buffer = tr->trace_buffer.buffer;
1084 if (buffer) 1084 if (buffer)
1085 ring_buffer_record_disable(buffer); 1085 ring_buffer_record_disable(buffer);
1086 1086
@@ -1246,7 +1246,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1246 int type, unsigned long len, 1246 int type, unsigned long len,
1247 unsigned long flags, int pc) 1247 unsigned long flags, int pc)
1248{ 1248{
1249 *current_rb = ftrace_file->tr->buffer; 1249 *current_rb = ftrace_file->tr->trace_buffer.buffer;
1250 return trace_buffer_lock_reserve(*current_rb, 1250 return trace_buffer_lock_reserve(*current_rb,
1251 type, len, flags, pc); 1251 type, len, flags, pc);
1252} 1252}
@@ -1257,7 +1257,7 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1257 int type, unsigned long len, 1257 int type, unsigned long len,
1258 unsigned long flags, int pc) 1258 unsigned long flags, int pc)
1259{ 1259{
1260 *current_rb = global_trace.buffer; 1260 *current_rb = global_trace.trace_buffer.buffer;
1261 return trace_buffer_lock_reserve(*current_rb, 1261 return trace_buffer_lock_reserve(*current_rb,
1262 type, len, flags, pc); 1262 type, len, flags, pc);
1263} 1263}
@@ -1296,7 +1296,7 @@ trace_function(struct trace_array *tr,
1296 int pc) 1296 int pc)
1297{ 1297{
1298 struct ftrace_event_call *call = &event_function; 1298 struct ftrace_event_call *call = &event_function;
1299 struct ring_buffer *buffer = tr->buffer; 1299 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1300 struct ring_buffer_event *event; 1300 struct ring_buffer_event *event;
1301 struct ftrace_entry *entry; 1301 struct ftrace_entry *entry;
1302 1302
@@ -1437,7 +1437,7 @@ void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1437void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1437void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1438 int pc) 1438 int pc)
1439{ 1439{
1440 __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL); 1440 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1441} 1441}
1442 1442
1443/** 1443/**
@@ -1453,7 +1453,8 @@ void trace_dump_stack(void)
1453 local_save_flags(flags); 1453 local_save_flags(flags);
1454 1454
1455 /* skipping 3 traces, seems to get us at the caller of this function */ 1455 /* skipping 3 traces, seems to get us at the caller of this function */
1456 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL); 1456 __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, 3,
1457 preempt_count(), NULL);
1457} 1458}
1458 1459
1459static DEFINE_PER_CPU(int, user_stack_count); 1460static DEFINE_PER_CPU(int, user_stack_count);
@@ -1623,7 +1624,7 @@ void trace_printk_init_buffers(void)
1623 * directly here. If the global_trace.buffer is already 1624 * directly here. If the global_trace.buffer is already
1624 * allocated here, then this was called by module code. 1625 * allocated here, then this was called by module code.
1625 */ 1626 */
1626 if (global_trace.buffer) 1627 if (global_trace.trace_buffer.buffer)
1627 tracing_start_cmdline_record(); 1628 tracing_start_cmdline_record();
1628} 1629}
1629 1630
@@ -1683,7 +1684,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1683 1684
1684 local_save_flags(flags); 1685 local_save_flags(flags);
1685 size = sizeof(*entry) + sizeof(u32) * len; 1686 size = sizeof(*entry) + sizeof(u32) * len;
1686 buffer = tr->buffer; 1687 buffer = tr->trace_buffer.buffer;
1687 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 1688 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1688 flags, pc); 1689 flags, pc);
1689 if (!event) 1690 if (!event)
@@ -1706,27 +1707,12 @@ out:
1706} 1707}
1707EXPORT_SYMBOL_GPL(trace_vbprintk); 1708EXPORT_SYMBOL_GPL(trace_vbprintk);
1708 1709
1709int trace_array_printk(struct trace_array *tr, 1710static int
1710 unsigned long ip, const char *fmt, ...) 1711__trace_array_vprintk(struct ring_buffer *buffer,
1711{ 1712 unsigned long ip, const char *fmt, va_list args)
1712 int ret;
1713 va_list ap;
1714
1715 if (!(trace_flags & TRACE_ITER_PRINTK))
1716 return 0;
1717
1718 va_start(ap, fmt);
1719 ret = trace_array_vprintk(tr, ip, fmt, ap);
1720 va_end(ap);
1721 return ret;
1722}
1723
1724int trace_array_vprintk(struct trace_array *tr,
1725 unsigned long ip, const char *fmt, va_list args)
1726{ 1713{
1727 struct ftrace_event_call *call = &event_print; 1714 struct ftrace_event_call *call = &event_print;
1728 struct ring_buffer_event *event; 1715 struct ring_buffer_event *event;
1729 struct ring_buffer *buffer;
1730 int len = 0, size, pc; 1716 int len = 0, size, pc;
1731 struct print_entry *entry; 1717 struct print_entry *entry;
1732 unsigned long flags; 1718 unsigned long flags;
@@ -1754,7 +1740,6 @@ int trace_array_vprintk(struct trace_array *tr,
1754 1740
1755 local_save_flags(flags); 1741 local_save_flags(flags);
1756 size = sizeof(*entry) + len + 1; 1742 size = sizeof(*entry) + len + 1;
1757 buffer = tr->buffer;
1758 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 1743 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1759 flags, pc); 1744 flags, pc);
1760 if (!event) 1745 if (!event)
@@ -1775,6 +1760,42 @@ int trace_array_vprintk(struct trace_array *tr,
1775 return len; 1760 return len;
1776} 1761}
1777 1762
1763int trace_array_vprintk(struct trace_array *tr,
1764 unsigned long ip, const char *fmt, va_list args)
1765{
1766 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
1767}
1768
1769int trace_array_printk(struct trace_array *tr,
1770 unsigned long ip, const char *fmt, ...)
1771{
1772 int ret;
1773 va_list ap;
1774
1775 if (!(trace_flags & TRACE_ITER_PRINTK))
1776 return 0;
1777
1778 va_start(ap, fmt);
1779 ret = trace_array_vprintk(tr, ip, fmt, ap);
1780 va_end(ap);
1781 return ret;
1782}
1783
1784int trace_array_printk_buf(struct ring_buffer *buffer,
1785 unsigned long ip, const char *fmt, ...)
1786{
1787 int ret;
1788 va_list ap;
1789
1790 if (!(trace_flags & TRACE_ITER_PRINTK))
1791 return 0;
1792
1793 va_start(ap, fmt);
1794 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
1795 va_end(ap);
1796 return ret;
1797}
1798
1778int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1799int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1779{ 1800{
1780 return trace_array_vprintk(&global_trace, ip, fmt, args); 1801 return trace_array_vprintk(&global_trace, ip, fmt, args);
@@ -1800,7 +1821,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1800 if (buf_iter) 1821 if (buf_iter)
1801 event = ring_buffer_iter_peek(buf_iter, ts); 1822 event = ring_buffer_iter_peek(buf_iter, ts);
1802 else 1823 else
1803 event = ring_buffer_peek(iter->tr->buffer, cpu, ts, 1824 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
1804 lost_events); 1825 lost_events);
1805 1826
1806 if (event) { 1827 if (event) {
@@ -1815,7 +1836,7 @@ static struct trace_entry *
1815__find_next_entry(struct trace_iterator *iter, int *ent_cpu, 1836__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1816 unsigned long *missing_events, u64 *ent_ts) 1837 unsigned long *missing_events, u64 *ent_ts)
1817{ 1838{
1818 struct ring_buffer *buffer = iter->tr->buffer; 1839 struct ring_buffer *buffer = iter->trace_buffer->buffer;
1819 struct trace_entry *ent, *next = NULL; 1840 struct trace_entry *ent, *next = NULL;
1820 unsigned long lost_events = 0, next_lost = 0; 1841 unsigned long lost_events = 0, next_lost = 0;
1821 int cpu_file = iter->cpu_file; 1842 int cpu_file = iter->cpu_file;
@@ -1892,7 +1913,7 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)
1892 1913
1893static void trace_consume(struct trace_iterator *iter) 1914static void trace_consume(struct trace_iterator *iter)
1894{ 1915{
1895 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, 1916 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
1896 &iter->lost_events); 1917 &iter->lost_events);
1897} 1918}
1898 1919
@@ -1925,13 +1946,12 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1925 1946
1926void tracing_iter_reset(struct trace_iterator *iter, int cpu) 1947void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1927{ 1948{
1928 struct trace_array *tr = iter->tr;
1929 struct ring_buffer_event *event; 1949 struct ring_buffer_event *event;
1930 struct ring_buffer_iter *buf_iter; 1950 struct ring_buffer_iter *buf_iter;
1931 unsigned long entries = 0; 1951 unsigned long entries = 0;
1932 u64 ts; 1952 u64 ts;
1933 1953
1934 per_cpu_ptr(tr->data, cpu)->skipped_entries = 0; 1954 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
1935 1955
1936 buf_iter = trace_buffer_iter(iter, cpu); 1956 buf_iter = trace_buffer_iter(iter, cpu);
1937 if (!buf_iter) 1957 if (!buf_iter)
@@ -1945,13 +1965,13 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1945 * by the timestamp being before the start of the buffer. 1965 * by the timestamp being before the start of the buffer.
1946 */ 1966 */
1947 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 1967 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1948 if (ts >= iter->tr->time_start) 1968 if (ts >= iter->trace_buffer->time_start)
1949 break; 1969 break;
1950 entries++; 1970 entries++;
1951 ring_buffer_read(buf_iter, NULL); 1971 ring_buffer_read(buf_iter, NULL);
1952 } 1972 }
1953 1973
1954 per_cpu_ptr(tr->data, cpu)->skipped_entries = entries; 1974 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
1955} 1975}
1956 1976
1957/* 1977/*
@@ -1978,8 +1998,10 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1978 *iter->trace = *tr->current_trace; 1998 *iter->trace = *tr->current_trace;
1979 mutex_unlock(&trace_types_lock); 1999 mutex_unlock(&trace_types_lock);
1980 2000
2001#ifdef CONFIG_TRACER_MAX_TRACE
1981 if (iter->snapshot && iter->trace->use_max_tr) 2002 if (iter->snapshot && iter->trace->use_max_tr)
1982 return ERR_PTR(-EBUSY); 2003 return ERR_PTR(-EBUSY);
2004#endif
1983 2005
1984 if (!iter->snapshot) 2006 if (!iter->snapshot)
1985 atomic_inc(&trace_record_cmdline_disabled); 2007 atomic_inc(&trace_record_cmdline_disabled);
@@ -2021,17 +2043,21 @@ static void s_stop(struct seq_file *m, void *p)
2021{ 2043{
2022 struct trace_iterator *iter = m->private; 2044 struct trace_iterator *iter = m->private;
2023 2045
2046#ifdef CONFIG_TRACER_MAX_TRACE
2024 if (iter->snapshot && iter->trace->use_max_tr) 2047 if (iter->snapshot && iter->trace->use_max_tr)
2025 return; 2048 return;
2049#endif
2026 2050
2027 if (!iter->snapshot) 2051 if (!iter->snapshot)
2028 atomic_dec(&trace_record_cmdline_disabled); 2052 atomic_dec(&trace_record_cmdline_disabled);
2053
2029 trace_access_unlock(iter->cpu_file); 2054 trace_access_unlock(iter->cpu_file);
2030 trace_event_read_unlock(); 2055 trace_event_read_unlock();
2031} 2056}
2032 2057
2033static void 2058static void
2034get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries) 2059get_total_entries(struct trace_buffer *buf,
2060 unsigned long *total, unsigned long *entries)
2035{ 2061{
2036 unsigned long count; 2062 unsigned long count;
2037 int cpu; 2063 int cpu;
@@ -2040,19 +2066,19 @@ get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *e
2040 *entries = 0; 2066 *entries = 0;
2041 2067
2042 for_each_tracing_cpu(cpu) { 2068 for_each_tracing_cpu(cpu) {
2043 count = ring_buffer_entries_cpu(tr->buffer, cpu); 2069 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2044 /* 2070 /*
2045 * If this buffer has skipped entries, then we hold all 2071 * If this buffer has skipped entries, then we hold all
2046 * entries for the trace and we need to ignore the 2072 * entries for the trace and we need to ignore the
2047 * ones before the time stamp. 2073 * ones before the time stamp.
2048 */ 2074 */
2049 if (per_cpu_ptr(tr->data, cpu)->skipped_entries) { 2075 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2050 count -= per_cpu_ptr(tr->data, cpu)->skipped_entries; 2076 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2051 /* total is the same as the entries */ 2077 /* total is the same as the entries */
2052 *total += count; 2078 *total += count;
2053 } else 2079 } else
2054 *total += count + 2080 *total += count +
2055 ring_buffer_overrun_cpu(tr->buffer, cpu); 2081 ring_buffer_overrun_cpu(buf->buffer, cpu);
2056 *entries += count; 2082 *entries += count;
2057 } 2083 }
2058} 2084}
@@ -2069,27 +2095,27 @@ static void print_lat_help_header(struct seq_file *m)
2069 seq_puts(m, "# \\ / ||||| \\ | / \n"); 2095 seq_puts(m, "# \\ / ||||| \\ | / \n");
2070} 2096}
2071 2097
2072static void print_event_info(struct trace_array *tr, struct seq_file *m) 2098static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2073{ 2099{
2074 unsigned long total; 2100 unsigned long total;
2075 unsigned long entries; 2101 unsigned long entries;
2076 2102
2077 get_total_entries(tr, &total, &entries); 2103 get_total_entries(buf, &total, &entries);
2078 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 2104 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2079 entries, total, num_online_cpus()); 2105 entries, total, num_online_cpus());
2080 seq_puts(m, "#\n"); 2106 seq_puts(m, "#\n");
2081} 2107}
2082 2108
2083static void print_func_help_header(struct trace_array *tr, struct seq_file *m) 2109static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2084{ 2110{
2085 print_event_info(tr, m); 2111 print_event_info(buf, m);
2086 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 2112 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
2087 seq_puts(m, "# | | | | |\n"); 2113 seq_puts(m, "# | | | | |\n");
2088} 2114}
2089 2115
2090static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) 2116static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2091{ 2117{
2092 print_event_info(tr, m); 2118 print_event_info(buf, m);
2093 seq_puts(m, "# _-----=> irqs-off\n"); 2119 seq_puts(m, "# _-----=> irqs-off\n");
2094 seq_puts(m, "# / _----=> need-resched\n"); 2120 seq_puts(m, "# / _----=> need-resched\n");
2095 seq_puts(m, "# | / _---=> hardirq/softirq\n"); 2121 seq_puts(m, "# | / _---=> hardirq/softirq\n");
@@ -2103,8 +2129,8 @@ void
2103print_trace_header(struct seq_file *m, struct trace_iterator *iter) 2129print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2104{ 2130{
2105 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2131 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2106 struct trace_array *tr = iter->tr; 2132 struct trace_buffer *buf = iter->trace_buffer;
2107 struct trace_array_cpu *data = per_cpu_ptr(tr->data, tr->cpu); 2133 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2108 struct tracer *type = iter->trace; 2134 struct tracer *type = iter->trace;
2109 unsigned long entries; 2135 unsigned long entries;
2110 unsigned long total; 2136 unsigned long total;
@@ -2112,7 +2138,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2112 2138
2113 name = type->name; 2139 name = type->name;
2114 2140
2115 get_total_entries(tr, &total, &entries); 2141 get_total_entries(buf, &total, &entries);
2116 2142
2117 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 2143 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2118 name, UTS_RELEASE); 2144 name, UTS_RELEASE);
@@ -2123,7 +2149,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2123 nsecs_to_usecs(data->saved_latency), 2149 nsecs_to_usecs(data->saved_latency),
2124 entries, 2150 entries,
2125 total, 2151 total,
2126 tr->cpu, 2152 buf->cpu,
2127#if defined(CONFIG_PREEMPT_NONE) 2153#if defined(CONFIG_PREEMPT_NONE)
2128 "server", 2154 "server",
2129#elif defined(CONFIG_PREEMPT_VOLUNTARY) 2155#elif defined(CONFIG_PREEMPT_VOLUNTARY)
@@ -2174,7 +2200,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
2174 if (cpumask_test_cpu(iter->cpu, iter->started)) 2200 if (cpumask_test_cpu(iter->cpu, iter->started))
2175 return; 2201 return;
2176 2202
2177 if (per_cpu_ptr(iter->tr->data, iter->cpu)->skipped_entries) 2203 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2178 return; 2204 return;
2179 2205
2180 cpumask_set_cpu(iter->cpu, iter->started); 2206 cpumask_set_cpu(iter->cpu, iter->started);
@@ -2304,7 +2330,7 @@ int trace_empty(struct trace_iterator *iter)
2304 if (!ring_buffer_iter_empty(buf_iter)) 2330 if (!ring_buffer_iter_empty(buf_iter))
2305 return 0; 2331 return 0;
2306 } else { 2332 } else {
2307 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 2333 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2308 return 0; 2334 return 0;
2309 } 2335 }
2310 return 1; 2336 return 1;
@@ -2316,7 +2342,7 @@ int trace_empty(struct trace_iterator *iter)
2316 if (!ring_buffer_iter_empty(buf_iter)) 2342 if (!ring_buffer_iter_empty(buf_iter))
2317 return 0; 2343 return 0;
2318 } else { 2344 } else {
2319 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 2345 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2320 return 0; 2346 return 0;
2321 } 2347 }
2322 } 2348 }
@@ -2394,9 +2420,9 @@ void trace_default_header(struct seq_file *m)
2394 } else { 2420 } else {
2395 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 2421 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2396 if (trace_flags & TRACE_ITER_IRQ_INFO) 2422 if (trace_flags & TRACE_ITER_IRQ_INFO)
2397 print_func_help_header_irq(iter->tr, m); 2423 print_func_help_header_irq(iter->trace_buffer, m);
2398 else 2424 else
2399 print_func_help_header(iter->tr, m); 2425 print_func_help_header(iter->trace_buffer, m);
2400 } 2426 }
2401 } 2427 }
2402} 2428}
@@ -2515,11 +2541,15 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2515 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2541 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2516 goto fail; 2542 goto fail;
2517 2543
2544 iter->tr = tr;
2545
2546#ifdef CONFIG_TRACER_MAX_TRACE
2518 /* Currently only the top directory has a snapshot */ 2547 /* Currently only the top directory has a snapshot */
2519 if (tr->current_trace->print_max || snapshot) 2548 if (tr->current_trace->print_max || snapshot)
2520 iter->tr = &max_tr; 2549 iter->trace_buffer = &tr->max_buffer;
2521 else 2550 else
2522 iter->tr = tr; 2551#endif
2552 iter->trace_buffer = &tr->trace_buffer;
2523 iter->snapshot = snapshot; 2553 iter->snapshot = snapshot;
2524 iter->pos = -1; 2554 iter->pos = -1;
2525 mutex_init(&iter->mutex); 2555 mutex_init(&iter->mutex);
@@ -2530,7 +2560,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2530 iter->trace->open(iter); 2560 iter->trace->open(iter);
2531 2561
2532 /* Annotate start of buffers if we had overruns */ 2562 /* Annotate start of buffers if we had overruns */
2533 if (ring_buffer_overruns(iter->tr->buffer)) 2563 if (ring_buffer_overruns(iter->trace_buffer->buffer))
2534 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2564 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2535 2565
2536 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 2566 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -2544,7 +2574,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2544 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 2574 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2545 for_each_tracing_cpu(cpu) { 2575 for_each_tracing_cpu(cpu) {
2546 iter->buffer_iter[cpu] = 2576 iter->buffer_iter[cpu] =
2547 ring_buffer_read_prepare(iter->tr->buffer, cpu); 2577 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2548 } 2578 }
2549 ring_buffer_read_prepare_sync(); 2579 ring_buffer_read_prepare_sync();
2550 for_each_tracing_cpu(cpu) { 2580 for_each_tracing_cpu(cpu) {
@@ -2554,7 +2584,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2554 } else { 2584 } else {
2555 cpu = iter->cpu_file; 2585 cpu = iter->cpu_file;
2556 iter->buffer_iter[cpu] = 2586 iter->buffer_iter[cpu] =
2557 ring_buffer_read_prepare(iter->tr->buffer, cpu); 2587 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
2558 ring_buffer_read_prepare_sync(); 2588 ring_buffer_read_prepare_sync();
2559 ring_buffer_read_start(iter->buffer_iter[cpu]); 2589 ring_buffer_read_start(iter->buffer_iter[cpu]);
2560 tracing_iter_reset(iter, cpu); 2590 tracing_iter_reset(iter, cpu);
@@ -2593,12 +2623,7 @@ static int tracing_release(struct inode *inode, struct file *file)
2593 return 0; 2623 return 0;
2594 2624
2595 iter = m->private; 2625 iter = m->private;
2596 2626 tr = iter->tr;
2597 /* Only the global tracer has a matching max_tr */
2598 if (iter->tr == &max_tr)
2599 tr = &global_trace;
2600 else
2601 tr = iter->tr;
2602 2627
2603 mutex_lock(&trace_types_lock); 2628 mutex_lock(&trace_types_lock);
2604 for_each_tracing_cpu(cpu) { 2629 for_each_tracing_cpu(cpu) {
@@ -2634,9 +2659,9 @@ static int tracing_open(struct inode *inode, struct file *file)
2634 struct trace_array *tr = tc->tr; 2659 struct trace_array *tr = tc->tr;
2635 2660
2636 if (tc->cpu == RING_BUFFER_ALL_CPUS) 2661 if (tc->cpu == RING_BUFFER_ALL_CPUS)
2637 tracing_reset_online_cpus(tr); 2662 tracing_reset_online_cpus(&tr->trace_buffer);
2638 else 2663 else
2639 tracing_reset(tr, tc->cpu); 2664 tracing_reset(&tr->trace_buffer, tc->cpu);
2640 } 2665 }
2641 2666
2642 if (file->f_mode & FMODE_READ) { 2667 if (file->f_mode & FMODE_READ) {
@@ -2805,13 +2830,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2805 */ 2830 */
2806 if (cpumask_test_cpu(cpu, tracing_cpumask) && 2831 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2807 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2832 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2808 atomic_inc(&per_cpu_ptr(tr->data, cpu)->disabled); 2833 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
2809 ring_buffer_record_disable_cpu(tr->buffer, cpu); 2834 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
2810 } 2835 }
2811 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2836 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2812 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2837 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2813 atomic_dec(&per_cpu_ptr(tr->data, cpu)->disabled); 2838 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
2814 ring_buffer_record_enable_cpu(tr->buffer, cpu); 2839 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
2815 } 2840 }
2816 } 2841 }
2817 arch_spin_unlock(&ftrace_max_lock); 2842 arch_spin_unlock(&ftrace_max_lock);
@@ -2930,9 +2955,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
2930 trace_event_enable_cmd_record(enabled); 2955 trace_event_enable_cmd_record(enabled);
2931 2956
2932 if (mask == TRACE_ITER_OVERWRITE) { 2957 if (mask == TRACE_ITER_OVERWRITE) {
2933 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2958 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
2934#ifdef CONFIG_TRACER_MAX_TRACE 2959#ifdef CONFIG_TRACER_MAX_TRACE
2935 ring_buffer_change_overwrite(max_tr.buffer, enabled); 2960 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
2936#endif 2961#endif
2937 } 2962 }
2938 2963
@@ -3116,42 +3141,44 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
3116 3141
3117int tracer_init(struct tracer *t, struct trace_array *tr) 3142int tracer_init(struct tracer *t, struct trace_array *tr)
3118{ 3143{
3119 tracing_reset_online_cpus(tr); 3144 tracing_reset_online_cpus(&tr->trace_buffer);
3120 return t->init(tr); 3145 return t->init(tr);
3121} 3146}
3122 3147
3123static void set_buffer_entries(struct trace_array *tr, unsigned long val) 3148static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3124{ 3149{
3125 int cpu; 3150 int cpu;
3126 for_each_tracing_cpu(cpu) 3151 for_each_tracing_cpu(cpu)
3127 per_cpu_ptr(tr->data, cpu)->entries = val; 3152 per_cpu_ptr(buf->data, cpu)->entries = val;
3128} 3153}
3129 3154
3155#ifdef CONFIG_TRACER_MAX_TRACE
3130/* resize @tr's buffer to the size of @size_tr's entries */ 3156/* resize @tr's buffer to the size of @size_tr's entries */
3131static int resize_buffer_duplicate_size(struct trace_array *tr, 3157static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3132 struct trace_array *size_tr, int cpu_id) 3158 struct trace_buffer *size_buf, int cpu_id)
3133{ 3159{
3134 int cpu, ret = 0; 3160 int cpu, ret = 0;
3135 3161
3136 if (cpu_id == RING_BUFFER_ALL_CPUS) { 3162 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3137 for_each_tracing_cpu(cpu) { 3163 for_each_tracing_cpu(cpu) {
3138 ret = ring_buffer_resize(tr->buffer, 3164 ret = ring_buffer_resize(trace_buf->buffer,
3139 per_cpu_ptr(size_tr->data, cpu)->entries, cpu); 3165 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3140 if (ret < 0) 3166 if (ret < 0)
3141 break; 3167 break;
3142 per_cpu_ptr(tr->data, cpu)->entries = 3168 per_cpu_ptr(trace_buf->data, cpu)->entries =
3143 per_cpu_ptr(size_tr->data, cpu)->entries; 3169 per_cpu_ptr(size_buf->data, cpu)->entries;
3144 } 3170 }
3145 } else { 3171 } else {
3146 ret = ring_buffer_resize(tr->buffer, 3172 ret = ring_buffer_resize(trace_buf->buffer,
3147 per_cpu_ptr(size_tr->data, cpu_id)->entries, cpu_id); 3173 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3148 if (ret == 0) 3174 if (ret == 0)
3149 per_cpu_ptr(tr->data, cpu_id)->entries = 3175 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3150 per_cpu_ptr(size_tr->data, cpu_id)->entries; 3176 per_cpu_ptr(size_buf->data, cpu_id)->entries;
3151 } 3177 }
3152 3178
3153 return ret; 3179 return ret;
3154} 3180}
3181#endif /* CONFIG_TRACER_MAX_TRACE */
3155 3182
3156static int __tracing_resize_ring_buffer(struct trace_array *tr, 3183static int __tracing_resize_ring_buffer(struct trace_array *tr,
3157 unsigned long size, int cpu) 3184 unsigned long size, int cpu)
@@ -3166,20 +3193,22 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
3166 ring_buffer_expanded = 1; 3193 ring_buffer_expanded = 1;
3167 3194
3168 /* May be called before buffers are initialized */ 3195 /* May be called before buffers are initialized */
3169 if (!tr->buffer) 3196 if (!tr->trace_buffer.buffer)
3170 return 0; 3197 return 0;
3171 3198
3172 ret = ring_buffer_resize(tr->buffer, size, cpu); 3199 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3173 if (ret < 0) 3200 if (ret < 0)
3174 return ret; 3201 return ret;
3175 3202
3203#ifdef CONFIG_TRACER_MAX_TRACE
3176 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || 3204 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3177 !tr->current_trace->use_max_tr) 3205 !tr->current_trace->use_max_tr)
3178 goto out; 3206 goto out;
3179 3207
3180 ret = ring_buffer_resize(max_tr.buffer, size, cpu); 3208 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3181 if (ret < 0) { 3209 if (ret < 0) {
3182 int r = resize_buffer_duplicate_size(tr, tr, cpu); 3210 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3211 &tr->trace_buffer, cpu);
3183 if (r < 0) { 3212 if (r < 0) {
3184 /* 3213 /*
3185 * AARGH! We are left with different 3214 * AARGH! We are left with different
@@ -3202,15 +3231,17 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
3202 } 3231 }
3203 3232
3204 if (cpu == RING_BUFFER_ALL_CPUS) 3233 if (cpu == RING_BUFFER_ALL_CPUS)
3205 set_buffer_entries(&max_tr, size); 3234 set_buffer_entries(&tr->max_buffer, size);
3206 else 3235 else
3207 per_cpu_ptr(max_tr.data, cpu)->entries = size; 3236 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3208 3237
3209 out: 3238 out:
3239#endif /* CONFIG_TRACER_MAX_TRACE */
3240
3210 if (cpu == RING_BUFFER_ALL_CPUS) 3241 if (cpu == RING_BUFFER_ALL_CPUS)
3211 set_buffer_entries(tr, size); 3242 set_buffer_entries(&tr->trace_buffer, size);
3212 else 3243 else
3213 per_cpu_ptr(tr->data, cpu)->entries = size; 3244 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3214 3245
3215 return ret; 3246 return ret;
3216} 3247}
@@ -3277,7 +3308,9 @@ static int tracing_set_tracer(const char *buf)
3277 static struct trace_option_dentry *topts; 3308 static struct trace_option_dentry *topts;
3278 struct trace_array *tr = &global_trace; 3309 struct trace_array *tr = &global_trace;
3279 struct tracer *t; 3310 struct tracer *t;
3311#ifdef CONFIG_TRACER_MAX_TRACE
3280 bool had_max_tr; 3312 bool had_max_tr;
3313#endif
3281 int ret = 0; 3314 int ret = 0;
3282 3315
3283 mutex_lock(&trace_types_lock); 3316 mutex_lock(&trace_types_lock);
@@ -3308,7 +3341,10 @@ static int tracing_set_tracer(const char *buf)
3308 if (tr->current_trace->reset) 3341 if (tr->current_trace->reset)
3309 tr->current_trace->reset(tr); 3342 tr->current_trace->reset(tr);
3310 3343
3344#ifdef CONFIG_TRACER_MAX_TRACE
3311 had_max_tr = tr->current_trace->allocated_snapshot; 3345 had_max_tr = tr->current_trace->allocated_snapshot;
3346
3347 /* Current trace needs to be nop_trace before synchronize_sched */
3312 tr->current_trace = &nop_trace; 3348 tr->current_trace = &nop_trace;
3313 3349
3314 if (had_max_tr && !t->use_max_tr) { 3350 if (had_max_tr && !t->use_max_tr) {
@@ -3325,22 +3361,28 @@ static int tracing_set_tracer(const char *buf)
3325 * The max_tr ring buffer has some state (e.g. ring->clock) and 3361 * The max_tr ring buffer has some state (e.g. ring->clock) and
3326 * we want preserve it. 3362 * we want preserve it.
3327 */ 3363 */
3328 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); 3364 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
3329 set_buffer_entries(&max_tr, 1); 3365 set_buffer_entries(&tr->max_buffer, 1);
3330 tracing_reset_online_cpus(&max_tr); 3366 tracing_reset_online_cpus(&tr->max_buffer);
3331 tr->current_trace->allocated_snapshot = false; 3367 tr->current_trace->allocated_snapshot = false;
3332 } 3368 }
3369#else
3370 tr->current_trace = &nop_trace;
3371#endif
3333 destroy_trace_option_files(topts); 3372 destroy_trace_option_files(topts);
3334 3373
3335 topts = create_trace_option_files(tr, t); 3374 topts = create_trace_option_files(tr, t);
3375
3376#ifdef CONFIG_TRACER_MAX_TRACE
3336 if (t->use_max_tr && !had_max_tr) { 3377 if (t->use_max_tr && !had_max_tr) {
3337 /* we need to make per cpu buffer sizes equivalent */ 3378 /* we need to make per cpu buffer sizes equivalent */
3338 ret = resize_buffer_duplicate_size(&max_tr, &global_trace, 3379 ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer,
3339 RING_BUFFER_ALL_CPUS); 3380 RING_BUFFER_ALL_CPUS);
3340 if (ret < 0) 3381 if (ret < 0)
3341 goto out; 3382 goto out;
3342 t->allocated_snapshot = true; 3383 t->allocated_snapshot = true;
3343 } 3384 }
3385#endif
3344 3386
3345 if (t->init) { 3387 if (t->init) {
3346 ret = tracer_init(t, tr); 3388 ret = tracer_init(t, tr);
@@ -3468,6 +3510,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3468 3510
3469 iter->cpu_file = tc->cpu; 3511 iter->cpu_file = tc->cpu;
3470 iter->tr = tc->tr; 3512 iter->tr = tc->tr;
3513 iter->trace_buffer = &tc->tr->trace_buffer;
3471 mutex_init(&iter->mutex); 3514 mutex_init(&iter->mutex);
3472 filp->private_data = iter; 3515 filp->private_data = iter;
3473 3516
@@ -3518,7 +3561,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
3518 */ 3561 */
3519 return POLLIN | POLLRDNORM; 3562 return POLLIN | POLLRDNORM;
3520 else 3563 else
3521 return ring_buffer_poll_wait(iter->tr->buffer, iter->cpu_file, 3564 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
3522 filp, poll_table); 3565 filp, poll_table);
3523} 3566}
3524 3567
@@ -3857,8 +3900,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
3857 for_each_tracing_cpu(cpu) { 3900 for_each_tracing_cpu(cpu) {
3858 /* fill in the size from first enabled cpu */ 3901 /* fill in the size from first enabled cpu */
3859 if (size == 0) 3902 if (size == 0)
3860 size = per_cpu_ptr(tr->data, cpu)->entries; 3903 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
3861 if (size != per_cpu_ptr(tr->data, cpu)->entries) { 3904 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
3862 buf_size_same = 0; 3905 buf_size_same = 0;
3863 break; 3906 break;
3864 } 3907 }
@@ -3874,7 +3917,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
3874 } else 3917 } else
3875 r = sprintf(buf, "X\n"); 3918 r = sprintf(buf, "X\n");
3876 } else 3919 } else
3877 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->data, tc->cpu)->entries >> 10); 3920 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
3878 3921
3879 mutex_unlock(&trace_types_lock); 3922 mutex_unlock(&trace_types_lock);
3880 3923
@@ -3921,7 +3964,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
3921 3964
3922 mutex_lock(&trace_types_lock); 3965 mutex_lock(&trace_types_lock);
3923 for_each_tracing_cpu(cpu) { 3966 for_each_tracing_cpu(cpu) {
3924 size += per_cpu_ptr(tr->data, cpu)->entries >> 10; 3967 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
3925 if (!ring_buffer_expanded) 3968 if (!ring_buffer_expanded)
3926 expanded_size += trace_buf_size >> 10; 3969 expanded_size += trace_buf_size >> 10;
3927 } 3970 }
@@ -4026,7 +4069,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
4026 4069
4027 local_save_flags(irq_flags); 4070 local_save_flags(irq_flags);
4028 size = sizeof(*entry) + cnt + 2; /* possible \n added */ 4071 size = sizeof(*entry) + cnt + 2; /* possible \n added */
4029 buffer = global_trace.buffer; 4072 buffer = global_trace.trace_buffer.buffer;
4030 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 4073 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4031 irq_flags, preempt_count()); 4074 irq_flags, preempt_count());
4032 if (!event) { 4075 if (!event) {
@@ -4111,16 +4154,19 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4111 4154
4112 tr->clock_id = i; 4155 tr->clock_id = i;
4113 4156
4114 ring_buffer_set_clock(tr->buffer, trace_clocks[i].func); 4157 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4115 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && max_tr.buffer)
4116 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
4117 4158
4118 /* 4159 /*
4119 * New clock may not be consistent with the previous clock. 4160 * New clock may not be consistent with the previous clock.
4120 * Reset the buffer so that it doesn't have incomparable timestamps. 4161 * Reset the buffer so that it doesn't have incomparable timestamps.
4121 */ 4162 */
4122 tracing_reset_online_cpus(&global_trace); 4163 tracing_reset_online_cpus(&global_trace.trace_buffer);
4123 tracing_reset_online_cpus(&max_tr); 4164
4165#ifdef CONFIG_TRACER_MAX_TRACE
4166 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4167 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4168 tracing_reset_online_cpus(&global_trace.max_buffer);
4169#endif
4124 4170
4125 mutex_unlock(&trace_types_lock); 4171 mutex_unlock(&trace_types_lock);
4126 4172
@@ -4160,6 +4206,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
4160 return -ENOMEM; 4206 return -ENOMEM;
4161 } 4207 }
4162 iter->tr = tc->tr; 4208 iter->tr = tc->tr;
4209 iter->trace_buffer = &tc->tr->max_buffer;
4163 m->private = iter; 4210 m->private = iter;
4164 file->private_data = m; 4211 file->private_data = m;
4165 } 4212 }
@@ -4196,18 +4243,18 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4196 case 0: 4243 case 0:
4197 if (tr->current_trace->allocated_snapshot) { 4244 if (tr->current_trace->allocated_snapshot) {
4198 /* free spare buffer */ 4245 /* free spare buffer */
4199 ring_buffer_resize(max_tr.buffer, 1, 4246 ring_buffer_resize(tr->max_buffer.buffer, 1,
4200 RING_BUFFER_ALL_CPUS); 4247 RING_BUFFER_ALL_CPUS);
4201 set_buffer_entries(&max_tr, 1); 4248 set_buffer_entries(&tr->max_buffer, 1);
4202 tracing_reset_online_cpus(&max_tr); 4249 tracing_reset_online_cpus(&tr->max_buffer);
4203 tr->current_trace->allocated_snapshot = false; 4250 tr->current_trace->allocated_snapshot = false;
4204 } 4251 }
4205 break; 4252 break;
4206 case 1: 4253 case 1:
4207 if (!tr->current_trace->allocated_snapshot) { 4254 if (!tr->current_trace->allocated_snapshot) {
4208 /* allocate spare buffer */ 4255 /* allocate spare buffer */
4209 ret = resize_buffer_duplicate_size(&max_tr, 4256 ret = resize_buffer_duplicate_size(&tr->max_buffer,
4210 &global_trace, RING_BUFFER_ALL_CPUS); 4257 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
4211 if (ret < 0) 4258 if (ret < 0)
4212 break; 4259 break;
4213 tr->current_trace->allocated_snapshot = true; 4260 tr->current_trace->allocated_snapshot = true;
@@ -4220,7 +4267,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4220 break; 4267 break;
4221 default: 4268 default:
4222 if (tr->current_trace->allocated_snapshot) 4269 if (tr->current_trace->allocated_snapshot)
4223 tracing_reset_online_cpus(&max_tr); 4270 tracing_reset_online_cpus(&tr->max_buffer);
4224 break; 4271 break;
4225 } 4272 }
4226 4273
@@ -4338,6 +4385,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4338 info->iter.tr = tr; 4385 info->iter.tr = tr;
4339 info->iter.cpu_file = tc->cpu; 4386 info->iter.cpu_file = tc->cpu;
4340 info->iter.trace = tr->current_trace; 4387 info->iter.trace = tr->current_trace;
4388 info->iter.trace_buffer = &tr->trace_buffer;
4341 info->spare = NULL; 4389 info->spare = NULL;
4342 /* Force reading ring buffer for first read */ 4390 /* Force reading ring buffer for first read */
4343 info->read = (unsigned int)-1; 4391 info->read = (unsigned int)-1;
@@ -4369,7 +4417,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
4369 return 0; 4417 return 0;
4370 4418
4371 if (!info->spare) 4419 if (!info->spare)
4372 info->spare = ring_buffer_alloc_read_page(iter->tr->buffer, iter->cpu_file); 4420 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
4421 iter->cpu_file);
4373 if (!info->spare) 4422 if (!info->spare)
4374 return -ENOMEM; 4423 return -ENOMEM;
4375 4424
@@ -4379,7 +4428,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
4379 4428
4380 again: 4429 again:
4381 trace_access_lock(iter->cpu_file); 4430 trace_access_lock(iter->cpu_file);
4382 ret = ring_buffer_read_page(iter->tr->buffer, 4431 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
4383 &info->spare, 4432 &info->spare,
4384 count, 4433 count,
4385 iter->cpu_file, 0); 4434 iter->cpu_file, 0);
@@ -4421,7 +4470,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
4421 struct trace_iterator *iter = &info->iter; 4470 struct trace_iterator *iter = &info->iter;
4422 4471
4423 if (info->spare) 4472 if (info->spare)
4424 ring_buffer_free_read_page(iter->tr->buffer, info->spare); 4473 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
4425 kfree(info); 4474 kfree(info);
4426 4475
4427 return 0; 4476 return 0;
@@ -4521,7 +4570,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4521 4570
4522 again: 4571 again:
4523 trace_access_lock(iter->cpu_file); 4572 trace_access_lock(iter->cpu_file);
4524 entries = ring_buffer_entries_cpu(iter->tr->buffer, iter->cpu_file); 4573 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
4525 4574
4526 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 4575 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4527 struct page *page; 4576 struct page *page;
@@ -4532,7 +4581,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4532 break; 4581 break;
4533 4582
4534 ref->ref = 1; 4583 ref->ref = 1;
4535 ref->buffer = iter->tr->buffer; 4584 ref->buffer = iter->trace_buffer->buffer;
4536 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 4585 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
4537 if (!ref->page) { 4586 if (!ref->page) {
4538 kfree(ref); 4587 kfree(ref);
@@ -4564,7 +4613,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4564 spd.nr_pages++; 4613 spd.nr_pages++;
4565 *ppos += PAGE_SIZE; 4614 *ppos += PAGE_SIZE;
4566 4615
4567 entries = ring_buffer_entries_cpu(iter->tr->buffer, iter->cpu_file); 4616 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
4568 } 4617 }
4569 4618
4570 trace_access_unlock(iter->cpu_file); 4619 trace_access_unlock(iter->cpu_file);
@@ -4605,6 +4654,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
4605{ 4654{
4606 struct trace_cpu *tc = filp->private_data; 4655 struct trace_cpu *tc = filp->private_data;
4607 struct trace_array *tr = tc->tr; 4656 struct trace_array *tr = tc->tr;
4657 struct trace_buffer *trace_buf = &tr->trace_buffer;
4608 struct trace_seq *s; 4658 struct trace_seq *s;
4609 unsigned long cnt; 4659 unsigned long cnt;
4610 unsigned long long t; 4660 unsigned long long t;
@@ -4617,41 +4667,41 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
4617 4667
4618 trace_seq_init(s); 4668 trace_seq_init(s);
4619 4669
4620 cnt = ring_buffer_entries_cpu(tr->buffer, cpu); 4670 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
4621 trace_seq_printf(s, "entries: %ld\n", cnt); 4671 trace_seq_printf(s, "entries: %ld\n", cnt);
4622 4672
4623 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); 4673 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
4624 trace_seq_printf(s, "overrun: %ld\n", cnt); 4674 trace_seq_printf(s, "overrun: %ld\n", cnt);
4625 4675
4626 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); 4676 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
4627 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 4677 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4628 4678
4629 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu); 4679 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
4630 trace_seq_printf(s, "bytes: %ld\n", cnt); 4680 trace_seq_printf(s, "bytes: %ld\n", cnt);
4631 4681
4632 if (trace_clocks[trace_clock_id].in_ns) { 4682 if (trace_clocks[trace_clock_id].in_ns) {
4633 /* local or global for trace_clock */ 4683 /* local or global for trace_clock */
4634 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu)); 4684 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
4635 usec_rem = do_div(t, USEC_PER_SEC); 4685 usec_rem = do_div(t, USEC_PER_SEC);
4636 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 4686 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
4637 t, usec_rem); 4687 t, usec_rem);
4638 4688
4639 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); 4689 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
4640 usec_rem = do_div(t, USEC_PER_SEC); 4690 usec_rem = do_div(t, USEC_PER_SEC);
4641 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 4691 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4642 } else { 4692 } else {
4643 /* counter or tsc mode for trace_clock */ 4693 /* counter or tsc mode for trace_clock */
4644 trace_seq_printf(s, "oldest event ts: %llu\n", 4694 trace_seq_printf(s, "oldest event ts: %llu\n",
4645 ring_buffer_oldest_event_ts(tr->buffer, cpu)); 4695 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
4646 4696
4647 trace_seq_printf(s, "now ts: %llu\n", 4697 trace_seq_printf(s, "now ts: %llu\n",
4648 ring_buffer_time_stamp(tr->buffer, cpu)); 4698 ring_buffer_time_stamp(trace_buf->buffer, cpu));
4649 } 4699 }
4650 4700
4651 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); 4701 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
4652 trace_seq_printf(s, "dropped events: %ld\n", cnt); 4702 trace_seq_printf(s, "dropped events: %ld\n", cnt);
4653 4703
4654 cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); 4704 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
4655 trace_seq_printf(s, "read events: %ld\n", cnt); 4705 trace_seq_printf(s, "read events: %ld\n", cnt);
4656 4706
4657 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4707 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
@@ -4754,7 +4804,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
4754static void 4804static void
4755tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 4805tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
4756{ 4806{
4757 struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu); 4807 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
4758 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 4808 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
4759 struct dentry *d_cpu; 4809 struct dentry *d_cpu;
4760 char cpu_dir[30]; /* 30 characters should be more than enough */ 4810 char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -5038,7 +5088,7 @@ rb_simple_read(struct file *filp, char __user *ubuf,
5038 size_t cnt, loff_t *ppos) 5088 size_t cnt, loff_t *ppos)
5039{ 5089{
5040 struct trace_array *tr = filp->private_data; 5090 struct trace_array *tr = filp->private_data;
5041 struct ring_buffer *buffer = tr->buffer; 5091 struct ring_buffer *buffer = tr->trace_buffer.buffer;
5042 char buf[64]; 5092 char buf[64];
5043 int r; 5093 int r;
5044 5094
@@ -5057,7 +5107,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
5057 size_t cnt, loff_t *ppos) 5107 size_t cnt, loff_t *ppos)
5058{ 5108{
5059 struct trace_array *tr = filp->private_data; 5109 struct trace_array *tr = filp->private_data;
5060 struct ring_buffer *buffer = tr->buffer; 5110 struct ring_buffer *buffer = tr->trace_buffer.buffer;
5061 unsigned long val; 5111 unsigned long val;
5062 int ret; 5112 int ret;
5063 5113
@@ -5129,18 +5179,18 @@ static int new_instance_create(const char *name)
5129 5179
5130 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 5180 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5131 5181
5132 tr->buffer = ring_buffer_alloc(trace_buf_size, rb_flags); 5182 tr->trace_buffer.buffer = ring_buffer_alloc(trace_buf_size, rb_flags);
5133 if (!tr->buffer) 5183 if (!tr->trace_buffer.buffer)
5134 goto out_free_tr; 5184 goto out_free_tr;
5135 5185
5136 tr->data = alloc_percpu(struct trace_array_cpu); 5186 tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5137 if (!tr->data) 5187 if (!tr->trace_buffer.data)
5138 goto out_free_tr; 5188 goto out_free_tr;
5139 5189
5140 for_each_tracing_cpu(i) { 5190 for_each_tracing_cpu(i) {
5141 memset(per_cpu_ptr(tr->data, i), 0, sizeof(struct trace_array_cpu)); 5191 memset(per_cpu_ptr(tr->trace_buffer.data, i), 0, sizeof(struct trace_array_cpu));
5142 per_cpu_ptr(tr->data, i)->trace_cpu.cpu = i; 5192 per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.cpu = i;
5143 per_cpu_ptr(tr->data, i)->trace_cpu.tr = tr; 5193 per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.tr = tr;
5144 } 5194 }
5145 5195
5146 /* Holder for file callbacks */ 5196 /* Holder for file callbacks */
@@ -5164,8 +5214,8 @@ static int new_instance_create(const char *name)
5164 return 0; 5214 return 0;
5165 5215
5166 out_free_tr: 5216 out_free_tr:
5167 if (tr->buffer) 5217 if (tr->trace_buffer.buffer)
5168 ring_buffer_free(tr->buffer); 5218 ring_buffer_free(tr->trace_buffer.buffer);
5169 kfree(tr->name); 5219 kfree(tr->name);
5170 kfree(tr); 5220 kfree(tr);
5171 5221
@@ -5198,8 +5248,8 @@ static int instance_delete(const char *name)
5198 5248
5199 event_trace_del_tracer(tr); 5249 event_trace_del_tracer(tr);
5200 debugfs_remove_recursive(tr->dir); 5250 debugfs_remove_recursive(tr->dir);
5201 free_percpu(tr->data); 5251 free_percpu(tr->trace_buffer.data);
5202 ring_buffer_free(tr->buffer); 5252 ring_buffer_free(tr->trace_buffer.buffer);
5203 5253
5204 kfree(tr->name); 5254 kfree(tr->name);
5205 kfree(tr); 5255 kfree(tr);
@@ -5439,6 +5489,7 @@ void trace_init_global_iter(struct trace_iterator *iter)
5439 iter->tr = &global_trace; 5489 iter->tr = &global_trace;
5440 iter->trace = iter->tr->current_trace; 5490 iter->trace = iter->tr->current_trace;
5441 iter->cpu_file = RING_BUFFER_ALL_CPUS; 5491 iter->cpu_file = RING_BUFFER_ALL_CPUS;
5492 iter->trace_buffer = &global_trace.trace_buffer;
5442} 5493}
5443 5494
5444static void 5495static void
@@ -5476,7 +5527,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5476 trace_init_global_iter(&iter); 5527 trace_init_global_iter(&iter);
5477 5528
5478 for_each_tracing_cpu(cpu) { 5529 for_each_tracing_cpu(cpu) {
5479 atomic_inc(&per_cpu_ptr(iter.tr->data, cpu)->disabled); 5530 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
5480 } 5531 }
5481 5532
5482 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 5533 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -5544,7 +5595,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5544 trace_flags |= old_userobj; 5595 trace_flags |= old_userobj;
5545 5596
5546 for_each_tracing_cpu(cpu) { 5597 for_each_tracing_cpu(cpu) {
5547 atomic_dec(&per_cpu_ptr(iter.tr->data, cpu)->disabled); 5598 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
5548 } 5599 }
5549 tracing_on(); 5600 tracing_on();
5550 } 5601 }
@@ -5594,58 +5645,59 @@ __init static int tracer_alloc_buffers(void)
5594 raw_spin_lock_init(&global_trace.start_lock); 5645 raw_spin_lock_init(&global_trace.start_lock);
5595 5646
5596 /* TODO: make the number of buffers hot pluggable with CPUS */ 5647 /* TODO: make the number of buffers hot pluggable with CPUS */
5597 global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); 5648 global_trace.trace_buffer.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
5598 if (!global_trace.buffer) { 5649 if (!global_trace.trace_buffer.buffer) {
5599 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 5650 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5600 WARN_ON(1); 5651 WARN_ON(1);
5601 goto out_free_cpumask; 5652 goto out_free_cpumask;
5602 } 5653 }
5603 5654
5604 global_trace.data = alloc_percpu(struct trace_array_cpu); 5655 global_trace.trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5605 5656
5606 if (!global_trace.data) { 5657 if (!global_trace.trace_buffer.data) {
5607 printk(KERN_ERR "tracer: failed to allocate percpu memory!\n"); 5658 printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
5608 WARN_ON(1); 5659 WARN_ON(1);
5609 goto out_free_cpumask; 5660 goto out_free_cpumask;
5610 } 5661 }
5611 5662
5612 for_each_tracing_cpu(i) { 5663 for_each_tracing_cpu(i) {
5613 memset(per_cpu_ptr(global_trace.data, i), 0, sizeof(struct trace_array_cpu)); 5664 memset(per_cpu_ptr(global_trace.trace_buffer.data, i), 0,
5614 per_cpu_ptr(global_trace.data, i)->trace_cpu.cpu = i; 5665 sizeof(struct trace_array_cpu));
5615 per_cpu_ptr(global_trace.data, i)->trace_cpu.tr = &global_trace; 5666 per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.cpu = i;
5667 per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.tr = &global_trace;
5616 } 5668 }
5617 5669
5618 if (global_trace.buffer_disabled) 5670 if (global_trace.buffer_disabled)
5619 tracing_off(); 5671 tracing_off();
5620 5672
5621#ifdef CONFIG_TRACER_MAX_TRACE 5673#ifdef CONFIG_TRACER_MAX_TRACE
5622 max_tr.data = alloc_percpu(struct trace_array_cpu); 5674 global_trace.max_buffer.data = alloc_percpu(struct trace_array_cpu);
5623 if (!max_tr.data) { 5675 if (!global_trace.max_buffer.data) {
5624 printk(KERN_ERR "tracer: failed to allocate percpu memory!\n"); 5676 printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
5625 WARN_ON(1); 5677 WARN_ON(1);
5626 goto out_free_cpumask; 5678 goto out_free_cpumask;
5627 } 5679 }
5628 max_tr.buffer = ring_buffer_alloc(1, rb_flags); 5680 global_trace.max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
5629 raw_spin_lock_init(&max_tr.start_lock); 5681 if (!global_trace.max_buffer.buffer) {
5630 if (!max_tr.buffer) {
5631 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 5682 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5632 WARN_ON(1); 5683 WARN_ON(1);
5633 ring_buffer_free(global_trace.buffer); 5684 ring_buffer_free(global_trace.trace_buffer.buffer);
5634 goto out_free_cpumask; 5685 goto out_free_cpumask;
5635 } 5686 }
5636 5687
5637 for_each_tracing_cpu(i) { 5688 for_each_tracing_cpu(i) {
5638 memset(per_cpu_ptr(max_tr.data, i), 0, sizeof(struct trace_array_cpu)); 5689 memset(per_cpu_ptr(global_trace.max_buffer.data, i), 0,
5639 per_cpu_ptr(max_tr.data, i)->trace_cpu.cpu = i; 5690 sizeof(struct trace_array_cpu));
5640 per_cpu_ptr(max_tr.data, i)->trace_cpu.tr = &max_tr; 5691 per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.cpu = i;
5692 per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.tr = &global_trace;
5641 } 5693 }
5642#endif 5694#endif
5643 5695
5644 /* Allocate the first page for all buffers */ 5696 /* Allocate the first page for all buffers */
5645 set_buffer_entries(&global_trace, 5697 set_buffer_entries(&global_trace.trace_buffer,
5646 ring_buffer_size(global_trace.buffer, 0)); 5698 ring_buffer_size(global_trace.trace_buffer.buffer, 0));
5647#ifdef CONFIG_TRACER_MAX_TRACE 5699#ifdef CONFIG_TRACER_MAX_TRACE
5648 set_buffer_entries(&max_tr, 1); 5700 set_buffer_entries(&global_trace.max_buffer, 1);
5649#endif 5701#endif
5650 5702
5651 trace_init_cmdlines(); 5703 trace_init_cmdlines();
@@ -5682,8 +5734,10 @@ __init static int tracer_alloc_buffers(void)
5682 return 0; 5734 return 0;
5683 5735
5684out_free_cpumask: 5736out_free_cpumask:
5685 free_percpu(global_trace.data); 5737 free_percpu(global_trace.trace_buffer.data);
5686 free_percpu(max_tr.data); 5738#ifdef CONFIG_TRACER_MAX_TRACE
5739 free_percpu(global_trace.max_buffer.data);
5740#endif
5687 free_cpumask_var(tracing_cpumask); 5741 free_cpumask_var(tracing_cpumask);
5688out_free_buffer_mask: 5742out_free_buffer_mask:
5689 free_cpumask_var(tracing_buffer_mask); 5743 free_cpumask_var(tracing_buffer_mask);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fa60b2977524..986834f1f4dd 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -167,16 +167,37 @@ struct trace_array_cpu {
167 167
168struct tracer; 168struct tracer;
169 169
170struct trace_buffer {
171 struct trace_array *tr;
172 struct ring_buffer *buffer;
173 struct trace_array_cpu __percpu *data;
174 cycle_t time_start;
175 int cpu;
176};
177
170/* 178/*
171 * The trace array - an array of per-CPU trace arrays. This is the 179 * The trace array - an array of per-CPU trace arrays. This is the
172 * highest level data structure that individual tracers deal with. 180 * highest level data structure that individual tracers deal with.
173 * They have on/off state as well: 181 * They have on/off state as well:
174 */ 182 */
175struct trace_array { 183struct trace_array {
176 struct ring_buffer *buffer;
177 struct list_head list; 184 struct list_head list;
178 char *name; 185 char *name;
179 int cpu; 186 struct trace_buffer trace_buffer;
187#ifdef CONFIG_TRACER_MAX_TRACE
188 /*
189 * The max_buffer is used to snapshot the trace when a maximum
190 * latency is reached, or when the user initiates a snapshot.
191 * Some tracers will use this to store a maximum trace while
192 * it continues examining live traces.
193 *
194 * The buffers for the max_buffer are set up the same as the trace_buffer
195 * When a snapshot is taken, the buffer of the max_buffer is swapped
196 * with the buffer of the trace_buffer and the buffers are reset for
197 * the trace_buffer so the tracing can continue.
198 */
199 struct trace_buffer max_buffer;
200#endif
180 int buffer_disabled; 201 int buffer_disabled;
181 struct trace_cpu trace_cpu; /* place holder */ 202 struct trace_cpu trace_cpu; /* place holder */
182#ifdef CONFIG_FTRACE_SYSCALLS 203#ifdef CONFIG_FTRACE_SYSCALLS
@@ -189,7 +210,6 @@ struct trace_array {
189 int clock_id; 210 int clock_id;
190 struct tracer *current_trace; 211 struct tracer *current_trace;
191 unsigned int flags; 212 unsigned int flags;
192 cycle_t time_start;
193 raw_spinlock_t start_lock; 213 raw_spinlock_t start_lock;
194 struct dentry *dir; 214 struct dentry *dir;
195 struct dentry *options; 215 struct dentry *options;
@@ -198,7 +218,6 @@ struct trace_array {
198 struct list_head systems; 218 struct list_head systems;
199 struct list_head events; 219 struct list_head events;
200 struct task_struct *waiter; 220 struct task_struct *waiter;
201 struct trace_array_cpu __percpu *data;
202}; 221};
203 222
204enum { 223enum {
@@ -345,9 +364,11 @@ struct tracer {
345 struct tracer *next; 364 struct tracer *next;
346 struct tracer_flags *flags; 365 struct tracer_flags *flags;
347 bool print_max; 366 bool print_max;
367 bool enabled;
368#ifdef CONFIG_TRACER_MAX_TRACE
348 bool use_max_tr; 369 bool use_max_tr;
349 bool allocated_snapshot; 370 bool allocated_snapshot;
350 bool enabled; 371#endif
351}; 372};
352 373
353 374
@@ -493,8 +514,8 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
493 514
494int tracer_init(struct tracer *t, struct trace_array *tr); 515int tracer_init(struct tracer *t, struct trace_array *tr);
495int tracing_is_enabled(void); 516int tracing_is_enabled(void);
496void tracing_reset(struct trace_array *tr, int cpu); 517void tracing_reset(struct trace_buffer *buf, int cpu);
497void tracing_reset_online_cpus(struct trace_array *tr); 518void tracing_reset_online_cpus(struct trace_buffer *buf);
498void tracing_reset_current(int cpu); 519void tracing_reset_current(int cpu);
499void tracing_reset_all_online_cpus(void); 520void tracing_reset_all_online_cpus(void);
500int tracing_open_generic(struct inode *inode, struct file *filp); 521int tracing_open_generic(struct inode *inode, struct file *filp);
@@ -674,6 +695,8 @@ trace_array_vprintk(struct trace_array *tr,
674 unsigned long ip, const char *fmt, va_list args); 695 unsigned long ip, const char *fmt, va_list args);
675int trace_array_printk(struct trace_array *tr, 696int trace_array_printk(struct trace_array *tr,
676 unsigned long ip, const char *fmt, ...); 697 unsigned long ip, const char *fmt, ...);
698int trace_array_printk_buf(struct ring_buffer *buffer,
699 unsigned long ip, const char *fmt, ...);
677void trace_printk_seq(struct trace_seq *s); 700void trace_printk_seq(struct trace_seq *s);
678enum print_line_t print_trace_line(struct trace_iterator *iter); 701enum print_line_t print_trace_line(struct trace_iterator *iter);
679 702
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 9d73861efc6a..e467c0c7bdd5 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -28,7 +28,7 @@ static void tracing_stop_function_trace(void);
28static int function_trace_init(struct trace_array *tr) 28static int function_trace_init(struct trace_array *tr)
29{ 29{
30 func_trace = tr; 30 func_trace = tr;
31 tr->cpu = get_cpu(); 31 tr->trace_buffer.cpu = get_cpu();
32 put_cpu(); 32 put_cpu();
33 33
34 tracing_start_cmdline_record(); 34 tracing_start_cmdline_record();
@@ -44,7 +44,7 @@ static void function_trace_reset(struct trace_array *tr)
44 44
45static void function_trace_start(struct trace_array *tr) 45static void function_trace_start(struct trace_array *tr)
46{ 46{
47 tracing_reset_online_cpus(tr); 47 tracing_reset_online_cpus(&tr->trace_buffer);
48} 48}
49 49
50/* Our option */ 50/* Our option */
@@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
76 goto out; 76 goto out;
77 77
78 cpu = smp_processor_id(); 78 cpu = smp_processor_id();
79 data = per_cpu_ptr(tr->data, cpu); 79 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
80 if (!atomic_read(&data->disabled)) { 80 if (!atomic_read(&data->disabled)) {
81 local_save_flags(flags); 81 local_save_flags(flags);
82 trace_function(tr, ip, parent_ip, flags, pc); 82 trace_function(tr, ip, parent_ip, flags, pc);
@@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
107 */ 107 */
108 local_irq_save(flags); 108 local_irq_save(flags);
109 cpu = raw_smp_processor_id(); 109 cpu = raw_smp_processor_id();
110 data = per_cpu_ptr(tr->data, cpu); 110 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
111 disabled = atomic_inc_return(&data->disabled); 111 disabled = atomic_inc_return(&data->disabled);
112 112
113 if (likely(disabled == 1)) { 113 if (likely(disabled == 1)) {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index ca986d61a282..8388bc99f2ee 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -218,7 +218,7 @@ int __trace_graph_entry(struct trace_array *tr,
218{ 218{
219 struct ftrace_event_call *call = &event_funcgraph_entry; 219 struct ftrace_event_call *call = &event_funcgraph_entry;
220 struct ring_buffer_event *event; 220 struct ring_buffer_event *event;
221 struct ring_buffer *buffer = tr->buffer; 221 struct ring_buffer *buffer = tr->trace_buffer.buffer;
222 struct ftrace_graph_ent_entry *entry; 222 struct ftrace_graph_ent_entry *entry;
223 223
224 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 224 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 cpu = raw_smp_processor_id(); 267 cpu = raw_smp_processor_id();
268 data = per_cpu_ptr(tr->data, cpu); 268 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
269 disabled = atomic_inc_return(&data->disabled); 269 disabled = atomic_inc_return(&data->disabled);
270 if (likely(disabled == 1)) { 270 if (likely(disabled == 1)) {
271 pc = preempt_count(); 271 pc = preempt_count();
@@ -323,7 +323,7 @@ void __trace_graph_return(struct trace_array *tr,
323{ 323{
324 struct ftrace_event_call *call = &event_funcgraph_exit; 324 struct ftrace_event_call *call = &event_funcgraph_exit;
325 struct ring_buffer_event *event; 325 struct ring_buffer_event *event;
326 struct ring_buffer *buffer = tr->buffer; 326 struct ring_buffer *buffer = tr->trace_buffer.buffer;
327 struct ftrace_graph_ret_entry *entry; 327 struct ftrace_graph_ret_entry *entry;
328 328
329 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 329 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
350 350
351 local_irq_save(flags); 351 local_irq_save(flags);
352 cpu = raw_smp_processor_id(); 352 cpu = raw_smp_processor_id();
353 data = per_cpu_ptr(tr->data, cpu); 353 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
354 disabled = atomic_inc_return(&data->disabled); 354 disabled = atomic_inc_return(&data->disabled);
355 if (likely(disabled == 1)) { 355 if (likely(disabled == 1)) {
356 pc = preempt_count(); 356 pc = preempt_count();
@@ -560,9 +560,9 @@ get_return_for_leaf(struct trace_iterator *iter,
560 * We need to consume the current entry to see 560 * We need to consume the current entry to see
561 * the next one. 561 * the next one.
562 */ 562 */
563 ring_buffer_consume(iter->tr->buffer, iter->cpu, 563 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
564 NULL, NULL); 564 NULL, NULL);
565 event = ring_buffer_peek(iter->tr->buffer, iter->cpu, 565 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
566 NULL, NULL); 566 NULL, NULL);
567 } 567 }
568 568
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 9b52f9cf7a0d..5aa40ab72b57 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -121,7 +121,7 @@ static int func_prolog_dec(struct trace_array *tr,
121 if (!irqs_disabled_flags(*flags)) 121 if (!irqs_disabled_flags(*flags))
122 return 0; 122 return 0;
123 123
124 *data = per_cpu_ptr(tr->data, cpu); 124 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
125 disabled = atomic_inc_return(&(*data)->disabled); 125 disabled = atomic_inc_return(&(*data)->disabled);
126 126
127 if (likely(disabled == 1)) 127 if (likely(disabled == 1))
@@ -175,7 +175,7 @@ static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
175 per_cpu(tracing_cpu, cpu) = 0; 175 per_cpu(tracing_cpu, cpu) = 0;
176 176
177 tracing_max_latency = 0; 177 tracing_max_latency = 0;
178 tracing_reset_online_cpus(irqsoff_trace); 178 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
179 179
180 return start_irqsoff_tracer(irqsoff_trace, set); 180 return start_irqsoff_tracer(irqsoff_trace, set);
181} 181}
@@ -380,7 +380,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
380 if (per_cpu(tracing_cpu, cpu)) 380 if (per_cpu(tracing_cpu, cpu))
381 return; 381 return;
382 382
383 data = per_cpu_ptr(tr->data, cpu); 383 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
384 384
385 if (unlikely(!data) || atomic_read(&data->disabled)) 385 if (unlikely(!data) || atomic_read(&data->disabled))
386 return; 386 return;
@@ -418,7 +418,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
418 if (!tracer_enabled) 418 if (!tracer_enabled)
419 return; 419 return;
420 420
421 data = per_cpu_ptr(tr->data, cpu); 421 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
422 422
423 if (unlikely(!data) || 423 if (unlikely(!data) ||
424 !data->critical_start || atomic_read(&data->disabled)) 424 !data->critical_start || atomic_read(&data->disabled))
@@ -568,7 +568,7 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
568 irqsoff_trace = tr; 568 irqsoff_trace = tr;
569 /* make sure that the tracer is visible */ 569 /* make sure that the tracer is visible */
570 smp_wmb(); 570 smp_wmb();
571 tracing_reset_online_cpus(tr); 571 tracing_reset_online_cpus(&tr->trace_buffer);
572 572
573 if (start_irqsoff_tracer(tr, is_graph())) 573 if (start_irqsoff_tracer(tr, is_graph()))
574 printk(KERN_ERR "failed to start irqsoff tracer\n"); 574 printk(KERN_ERR "failed to start irqsoff tracer\n");
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 349f6941e8f2..bd90e1b06088 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -26,7 +26,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
26 trace_init_global_iter(&iter); 26 trace_init_global_iter(&iter);
27 27
28 for_each_tracing_cpu(cpu) { 28 for_each_tracing_cpu(cpu) {
29 atomic_inc(&per_cpu_ptr(iter.tr->data, cpu)->disabled); 29 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
30 } 30 }
31 31
32 old_userobj = trace_flags; 32 old_userobj = trace_flags;
@@ -46,14 +46,14 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
46 if (cpu_file == RING_BUFFER_ALL_CPUS) { 46 if (cpu_file == RING_BUFFER_ALL_CPUS) {
47 for_each_tracing_cpu(cpu) { 47 for_each_tracing_cpu(cpu) {
48 iter.buffer_iter[cpu] = 48 iter.buffer_iter[cpu] =
49 ring_buffer_read_prepare(iter.tr->buffer, cpu); 49 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
50 ring_buffer_read_start(iter.buffer_iter[cpu]); 50 ring_buffer_read_start(iter.buffer_iter[cpu]);
51 tracing_iter_reset(&iter, cpu); 51 tracing_iter_reset(&iter, cpu);
52 } 52 }
53 } else { 53 } else {
54 iter.cpu_file = cpu_file; 54 iter.cpu_file = cpu_file;
55 iter.buffer_iter[cpu_file] = 55 iter.buffer_iter[cpu_file] =
56 ring_buffer_read_prepare(iter.tr->buffer, cpu_file); 56 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
57 ring_buffer_read_start(iter.buffer_iter[cpu_file]); 57 ring_buffer_read_start(iter.buffer_iter[cpu_file]);
58 tracing_iter_reset(&iter, cpu_file); 58 tracing_iter_reset(&iter, cpu_file);
59 } 59 }
@@ -83,7 +83,7 @@ out:
83 trace_flags = old_userobj; 83 trace_flags = old_userobj;
84 84
85 for_each_tracing_cpu(cpu) { 85 for_each_tracing_cpu(cpu) {
86 atomic_dec(&per_cpu_ptr(iter.tr->data, cpu)->disabled); 86 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
87 } 87 }
88 88
89 for_each_tracing_cpu(cpu) 89 for_each_tracing_cpu(cpu)
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 2472f6f76b50..a5e8f4878bfa 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -31,7 +31,7 @@ static void mmio_reset_data(struct trace_array *tr)
31 overrun_detected = false; 31 overrun_detected = false;
32 prev_overruns = 0; 32 prev_overruns = 0;
33 33
34 tracing_reset_online_cpus(tr); 34 tracing_reset_online_cpus(&tr->trace_buffer);
35} 35}
36 36
37static int mmio_trace_init(struct trace_array *tr) 37static int mmio_trace_init(struct trace_array *tr)
@@ -128,7 +128,7 @@ static void mmio_close(struct trace_iterator *iter)
128static unsigned long count_overruns(struct trace_iterator *iter) 128static unsigned long count_overruns(struct trace_iterator *iter)
129{ 129{
130 unsigned long cnt = atomic_xchg(&dropped_count, 0); 130 unsigned long cnt = atomic_xchg(&dropped_count, 0);
131 unsigned long over = ring_buffer_overruns(iter->tr->buffer); 131 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
132 132
133 if (over > prev_overruns) 133 if (over > prev_overruns)
134 cnt += over - prev_overruns; 134 cnt += over - prev_overruns;
@@ -309,7 +309,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
309 struct mmiotrace_rw *rw) 309 struct mmiotrace_rw *rw)
310{ 310{
311 struct ftrace_event_call *call = &event_mmiotrace_rw; 311 struct ftrace_event_call *call = &event_mmiotrace_rw;
312 struct ring_buffer *buffer = tr->buffer; 312 struct ring_buffer *buffer = tr->trace_buffer.buffer;
313 struct ring_buffer_event *event; 313 struct ring_buffer_event *event;
314 struct trace_mmiotrace_rw *entry; 314 struct trace_mmiotrace_rw *entry;
315 int pc = preempt_count(); 315 int pc = preempt_count();
@@ -330,7 +330,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
330void mmio_trace_rw(struct mmiotrace_rw *rw) 330void mmio_trace_rw(struct mmiotrace_rw *rw)
331{ 331{
332 struct trace_array *tr = mmio_trace_array; 332 struct trace_array *tr = mmio_trace_array;
333 struct trace_array_cpu *data = per_cpu_ptr(tr->data, smp_processor_id()); 333 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
334 __trace_mmiotrace_rw(tr, data, rw); 334 __trace_mmiotrace_rw(tr, data, rw);
335} 335}
336 336
@@ -339,7 +339,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
339 struct mmiotrace_map *map) 339 struct mmiotrace_map *map)
340{ 340{
341 struct ftrace_event_call *call = &event_mmiotrace_map; 341 struct ftrace_event_call *call = &event_mmiotrace_map;
342 struct ring_buffer *buffer = tr->buffer; 342 struct ring_buffer *buffer = tr->trace_buffer.buffer;
343 struct ring_buffer_event *event; 343 struct ring_buffer_event *event;
344 struct trace_mmiotrace_map *entry; 344 struct trace_mmiotrace_map *entry;
345 int pc = preempt_count(); 345 int pc = preempt_count();
@@ -363,7 +363,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
363 struct trace_array_cpu *data; 363 struct trace_array_cpu *data;
364 364
365 preempt_disable(); 365 preempt_disable();
366 data = per_cpu_ptr(tr->data, smp_processor_id()); 366 data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
367 __trace_mmiotrace_map(tr, data, map); 367 __trace_mmiotrace_map(tr, data, map);
368 preempt_enable(); 368 preempt_enable();
369} 369}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index aa92ac322ba2..2edc7220d017 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -643,7 +643,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
643{ 643{
644 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE; 644 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
645 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; 645 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
646 unsigned long long abs_ts = iter->ts - iter->tr->time_start; 646 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
647 unsigned long long rel_ts = next_ts - iter->ts; 647 unsigned long long rel_ts = next_ts - iter->ts;
648 struct trace_seq *s = &iter->seq; 648 struct trace_seq *s = &iter->seq;
649 649
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 1ffe39abd6fc..4e98e3b257a3 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -28,7 +28,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
28 unsigned long flags, int pc) 28 unsigned long flags, int pc)
29{ 29{
30 struct ftrace_event_call *call = &event_context_switch; 30 struct ftrace_event_call *call = &event_context_switch;
31 struct ring_buffer *buffer = tr->buffer; 31 struct ring_buffer *buffer = tr->trace_buffer.buffer;
32 struct ring_buffer_event *event; 32 struct ring_buffer_event *event;
33 struct ctx_switch_entry *entry; 33 struct ctx_switch_entry *entry;
34 34
@@ -69,7 +69,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
69 pc = preempt_count(); 69 pc = preempt_count();
70 local_irq_save(flags); 70 local_irq_save(flags);
71 cpu = raw_smp_processor_id(); 71 cpu = raw_smp_processor_id();
72 data = per_cpu_ptr(ctx_trace->data, cpu); 72 data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
73 73
74 if (likely(!atomic_read(&data->disabled))) 74 if (likely(!atomic_read(&data->disabled)))
75 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); 75 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
@@ -86,7 +86,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
86 struct ftrace_event_call *call = &event_wakeup; 86 struct ftrace_event_call *call = &event_wakeup;
87 struct ring_buffer_event *event; 87 struct ring_buffer_event *event;
88 struct ctx_switch_entry *entry; 88 struct ctx_switch_entry *entry;
89 struct ring_buffer *buffer = tr->buffer; 89 struct ring_buffer *buffer = tr->trace_buffer.buffer;
90 90
91 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, 91 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
92 sizeof(*entry), flags, pc); 92 sizeof(*entry), flags, pc);
@@ -123,7 +123,7 @@ probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
123 pc = preempt_count(); 123 pc = preempt_count();
124 local_irq_save(flags); 124 local_irq_save(flags);
125 cpu = raw_smp_processor_id(); 125 cpu = raw_smp_processor_id();
126 data = per_cpu_ptr(ctx_trace->data, cpu); 126 data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
127 127
128 if (likely(!atomic_read(&data->disabled))) 128 if (likely(!atomic_read(&data->disabled)))
129 tracing_sched_wakeup_trace(ctx_trace, wakee, current, 129 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index f9ceb75a95b7..c16f8cd63c3c 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -89,7 +89,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
89 if (cpu != wakeup_current_cpu) 89 if (cpu != wakeup_current_cpu)
90 goto out_enable; 90 goto out_enable;
91 91
92 *data = per_cpu_ptr(tr->data, cpu); 92 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
93 disabled = atomic_inc_return(&(*data)->disabled); 93 disabled = atomic_inc_return(&(*data)->disabled);
94 if (unlikely(disabled != 1)) 94 if (unlikely(disabled != 1))
95 goto out; 95 goto out;
@@ -353,7 +353,7 @@ probe_wakeup_sched_switch(void *ignore,
353 353
354 /* disable local data, not wakeup_cpu data */ 354 /* disable local data, not wakeup_cpu data */
355 cpu = raw_smp_processor_id(); 355 cpu = raw_smp_processor_id();
356 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled); 356 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
357 if (likely(disabled != 1)) 357 if (likely(disabled != 1))
358 goto out; 358 goto out;
359 359
@@ -365,7 +365,7 @@ probe_wakeup_sched_switch(void *ignore,
365 goto out_unlock; 365 goto out_unlock;
366 366
367 /* The task we are waiting for is waking up */ 367 /* The task we are waiting for is waking up */
368 data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu); 368 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
369 369
370 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); 370 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
371 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 371 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
@@ -387,7 +387,7 @@ out_unlock:
387 arch_spin_unlock(&wakeup_lock); 387 arch_spin_unlock(&wakeup_lock);
388 local_irq_restore(flags); 388 local_irq_restore(flags);
389out: 389out:
390 atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled); 390 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
391} 391}
392 392
393static void __wakeup_reset(struct trace_array *tr) 393static void __wakeup_reset(struct trace_array *tr)
@@ -405,7 +405,7 @@ static void wakeup_reset(struct trace_array *tr)
405{ 405{
406 unsigned long flags; 406 unsigned long flags;
407 407
408 tracing_reset_online_cpus(tr); 408 tracing_reset_online_cpus(&tr->trace_buffer);
409 409
410 local_irq_save(flags); 410 local_irq_save(flags);
411 arch_spin_lock(&wakeup_lock); 411 arch_spin_lock(&wakeup_lock);
@@ -435,7 +435,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
435 return; 435 return;
436 436
437 pc = preempt_count(); 437 pc = preempt_count();
438 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled); 438 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
439 if (unlikely(disabled != 1)) 439 if (unlikely(disabled != 1))
440 goto out; 440 goto out;
441 441
@@ -458,7 +458,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
458 458
459 local_save_flags(flags); 459 local_save_flags(flags);
460 460
461 data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu); 461 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
462 data->preempt_timestamp = ftrace_now(cpu); 462 data->preempt_timestamp = ftrace_now(cpu);
463 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); 463 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
464 464
@@ -472,7 +472,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
472out_locked: 472out_locked:
473 arch_spin_unlock(&wakeup_lock); 473 arch_spin_unlock(&wakeup_lock);
474out: 474out:
475 atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled); 475 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
476} 476}
477 477
478static void start_wakeup_tracer(struct trace_array *tr) 478static void start_wakeup_tracer(struct trace_array *tr)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 51c819c12c29..8672c40cb153 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -21,13 +21,13 @@ static inline int trace_valid_entry(struct trace_entry *entry)
21 return 0; 21 return 0;
22} 22}
23 23
24static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) 24static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
25{ 25{
26 struct ring_buffer_event *event; 26 struct ring_buffer_event *event;
27 struct trace_entry *entry; 27 struct trace_entry *entry;
28 unsigned int loops = 0; 28 unsigned int loops = 0;
29 29
30 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { 30 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
31 entry = ring_buffer_event_data(event); 31 entry = ring_buffer_event_data(event);
32 32
33 /* 33 /*
@@ -58,7 +58,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
58 * Test the trace buffer to see if all the elements 58 * Test the trace buffer to see if all the elements
59 * are still sane. 59 * are still sane.
60 */ 60 */
61static int trace_test_buffer(struct trace_array *tr, unsigned long *count) 61static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
62{ 62{
63 unsigned long flags, cnt = 0; 63 unsigned long flags, cnt = 0;
64 int cpu, ret = 0; 64 int cpu, ret = 0;
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
67 local_irq_save(flags); 67 local_irq_save(flags);
68 arch_spin_lock(&ftrace_max_lock); 68 arch_spin_lock(&ftrace_max_lock);
69 69
70 cnt = ring_buffer_entries(tr->buffer); 70 cnt = ring_buffer_entries(buf->buffer);
71 71
72 /* 72 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data. 73 * The trace_test_buffer_cpu runs a while loop to consume all data.
@@ -78,7 +78,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
78 */ 78 */
79 tracing_off(); 79 tracing_off();
80 for_each_possible_cpu(cpu) { 80 for_each_possible_cpu(cpu) {
81 ret = trace_test_buffer_cpu(tr, cpu); 81 ret = trace_test_buffer_cpu(buf, cpu);
82 if (ret) 82 if (ret)
83 break; 83 break;
84 } 84 }
@@ -355,7 +355,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
355 msleep(100); 355 msleep(100);
356 356
357 /* we should have nothing in the buffer */ 357 /* we should have nothing in the buffer */
358 ret = trace_test_buffer(tr, &count); 358 ret = trace_test_buffer(&tr->trace_buffer, &count);
359 if (ret) 359 if (ret)
360 goto out; 360 goto out;
361 361
@@ -376,7 +376,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
376 ftrace_enabled = 0; 376 ftrace_enabled = 0;
377 377
378 /* check the trace buffer */ 378 /* check the trace buffer */
379 ret = trace_test_buffer(tr, &count); 379 ret = trace_test_buffer(&tr->trace_buffer, &count);
380 tracing_start(); 380 tracing_start();
381 381
382 /* we should only have one item */ 382 /* we should only have one item */
@@ -666,7 +666,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
666 ftrace_enabled = 0; 666 ftrace_enabled = 0;
667 667
668 /* check the trace buffer */ 668 /* check the trace buffer */
669 ret = trace_test_buffer(tr, &count); 669 ret = trace_test_buffer(&tr->trace_buffer, &count);
670 trace->reset(tr); 670 trace->reset(tr);
671 tracing_start(); 671 tracing_start();
672 672
@@ -737,7 +737,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
737 * Simulate the init() callback but we attach a watchdog callback 737 * Simulate the init() callback but we attach a watchdog callback
738 * to detect and recover from possible hangs 738 * to detect and recover from possible hangs
739 */ 739 */
740 tracing_reset_online_cpus(tr); 740 tracing_reset_online_cpus(&tr->trace_buffer);
741 set_graph_array(tr); 741 set_graph_array(tr);
742 ret = register_ftrace_graph(&trace_graph_return, 742 ret = register_ftrace_graph(&trace_graph_return,
743 &trace_graph_entry_watchdog); 743 &trace_graph_entry_watchdog);
@@ -760,7 +760,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
760 tracing_stop(); 760 tracing_stop();
761 761
762 /* check the trace buffer */ 762 /* check the trace buffer */
763 ret = trace_test_buffer(tr, &count); 763 ret = trace_test_buffer(&tr->trace_buffer, &count);
764 764
765 trace->reset(tr); 765 trace->reset(tr);
766 tracing_start(); 766 tracing_start();
@@ -815,9 +815,9 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
815 /* stop the tracing. */ 815 /* stop the tracing. */
816 tracing_stop(); 816 tracing_stop();
817 /* check both trace buffers */ 817 /* check both trace buffers */
818 ret = trace_test_buffer(tr, NULL); 818 ret = trace_test_buffer(&tr->trace_buffer, NULL);
819 if (!ret) 819 if (!ret)
820 ret = trace_test_buffer(&max_tr, &count); 820 ret = trace_test_buffer(&tr->max_buffer, &count);
821 trace->reset(tr); 821 trace->reset(tr);
822 tracing_start(); 822 tracing_start();
823 823
@@ -877,9 +877,9 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
877 /* stop the tracing. */ 877 /* stop the tracing. */
878 tracing_stop(); 878 tracing_stop();
879 /* check both trace buffers */ 879 /* check both trace buffers */
880 ret = trace_test_buffer(tr, NULL); 880 ret = trace_test_buffer(&tr->trace_buffer, NULL);
881 if (!ret) 881 if (!ret)
882 ret = trace_test_buffer(&max_tr, &count); 882 ret = trace_test_buffer(&tr->max_buffer, &count);
883 trace->reset(tr); 883 trace->reset(tr);
884 tracing_start(); 884 tracing_start();
885 885
@@ -943,11 +943,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
943 /* stop the tracing. */ 943 /* stop the tracing. */
944 tracing_stop(); 944 tracing_stop();
945 /* check both trace buffers */ 945 /* check both trace buffers */
946 ret = trace_test_buffer(tr, NULL); 946 ret = trace_test_buffer(&tr->trace_buffer, NULL);
947 if (ret) 947 if (ret)
948 goto out; 948 goto out;
949 949
950 ret = trace_test_buffer(&max_tr, &count); 950 ret = trace_test_buffer(&tr->max_buffer, &count);
951 if (ret) 951 if (ret)
952 goto out; 952 goto out;
953 953
@@ -973,11 +973,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
973 /* stop the tracing. */ 973 /* stop the tracing. */
974 tracing_stop(); 974 tracing_stop();
975 /* check both trace buffers */ 975 /* check both trace buffers */
976 ret = trace_test_buffer(tr, NULL); 976 ret = trace_test_buffer(&tr->trace_buffer, NULL);
977 if (ret) 977 if (ret)
978 goto out; 978 goto out;
979 979
980 ret = trace_test_buffer(&max_tr, &count); 980 ret = trace_test_buffer(&tr->max_buffer, &count);
981 981
982 if (!ret && !count) { 982 if (!ret && !count) {
983 printk(KERN_CONT ".. no entries found .."); 983 printk(KERN_CONT ".. no entries found ..");
@@ -1084,10 +1084,10 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1084 /* stop the tracing. */ 1084 /* stop the tracing. */
1085 tracing_stop(); 1085 tracing_stop();
1086 /* check both trace buffers */ 1086 /* check both trace buffers */
1087 ret = trace_test_buffer(tr, NULL); 1087 ret = trace_test_buffer(&tr->trace_buffer, NULL);
1088 printk("ret = %d\n", ret); 1088 printk("ret = %d\n", ret);
1089 if (!ret) 1089 if (!ret)
1090 ret = trace_test_buffer(&max_tr, &count); 1090 ret = trace_test_buffer(&tr->max_buffer, &count);
1091 1091
1092 1092
1093 trace->reset(tr); 1093 trace->reset(tr);
@@ -1126,7 +1126,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
1126 /* stop the tracing. */ 1126 /* stop the tracing. */
1127 tracing_stop(); 1127 tracing_stop();
1128 /* check the trace buffer */ 1128 /* check the trace buffer */
1129 ret = trace_test_buffer(tr, &count); 1129 ret = trace_test_buffer(&tr->trace_buffer, &count);
1130 trace->reset(tr); 1130 trace->reset(tr);
1131 tracing_start(); 1131 tracing_start();
1132 1132
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 1cd37ffb4093..68f3f344be65 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -321,7 +321,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
321 321
322 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 322 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
323 323
324 buffer = tr->buffer; 324 buffer = tr->trace_buffer.buffer;
325 event = trace_buffer_lock_reserve(buffer, 325 event = trace_buffer_lock_reserve(buffer,
326 sys_data->enter_event->event.type, size, 0, 0); 326 sys_data->enter_event->event.type, size, 0, 0);
327 if (!event) 327 if (!event)
@@ -355,7 +355,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
355 if (!sys_data) 355 if (!sys_data)
356 return; 356 return;
357 357
358 buffer = tr->buffer; 358 buffer = tr->trace_buffer.buffer;
359 event = trace_buffer_lock_reserve(buffer, 359 event = trace_buffer_lock_reserve(buffer,
360 sys_data->exit_event->event.type, sizeof(*entry), 0, 0); 360 sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
361 if (!event) 361 if (!event)