aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c1890
1 files changed, 976 insertions, 914 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8f3fb3db61c3..9f3b478f9171 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -14,6 +14,7 @@
14#include <linux/utsrelease.h> 14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/notifier.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
18#include <linux/pagemap.h> 19#include <linux/pagemap.h>
19#include <linux/hardirq.h> 20#include <linux/hardirq.h>
@@ -22,6 +23,7 @@
22#include <linux/ftrace.h> 23#include <linux/ftrace.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/kdebug.h>
25#include <linux/ctype.h> 27#include <linux/ctype.h>
26#include <linux/init.h> 28#include <linux/init.h>
27#include <linux/poll.h> 29#include <linux/poll.h>
@@ -31,25 +33,37 @@
31#include <linux/writeback.h> 33#include <linux/writeback.h>
32 34
33#include <linux/stacktrace.h> 35#include <linux/stacktrace.h>
36#include <linux/ring_buffer.h>
37#include <linux/irqflags.h>
34 38
35#include "trace.h" 39#include "trace.h"
36 40
41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
42
37unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
38unsigned long __read_mostly tracing_thresh; 44unsigned long __read_mostly tracing_thresh;
39 45
40static unsigned long __read_mostly tracing_nr_buffers; 46static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
47
48static inline void ftrace_disable_cpu(void)
49{
50 preempt_disable();
51 local_inc(&__get_cpu_var(ftrace_cpu_disabled));
52}
53
54static inline void ftrace_enable_cpu(void)
55{
56 local_dec(&__get_cpu_var(ftrace_cpu_disabled));
57 preempt_enable();
58}
59
41static cpumask_t __read_mostly tracing_buffer_mask; 60static cpumask_t __read_mostly tracing_buffer_mask;
42 61
43#define for_each_tracing_cpu(cpu) \ 62#define for_each_tracing_cpu(cpu) \
44 for_each_cpu_mask(cpu, tracing_buffer_mask) 63 for_each_cpu_mask(cpu, tracing_buffer_mask)
45 64
46static int trace_alloc_page(void);
47static int trace_free_page(void);
48
49static int tracing_disabled = 1; 65static int tracing_disabled = 1;
50 66
51static unsigned long tracing_pages_allocated;
52
53long 67long
54ns2usecs(cycle_t nsec) 68ns2usecs(cycle_t nsec)
55{ 69{
@@ -60,7 +74,9 @@ ns2usecs(cycle_t nsec)
60 74
61cycle_t ftrace_now(int cpu) 75cycle_t ftrace_now(int cpu)
62{ 76{
63 return cpu_clock(cpu); 77 u64 ts = ring_buffer_time_stamp(cpu);
78 ring_buffer_normalize_time_stamp(cpu, &ts);
79 return ts;
64} 80}
65 81
66/* 82/*
@@ -100,11 +116,18 @@ static int tracer_enabled = 1;
100int ftrace_function_enabled; 116int ftrace_function_enabled;
101 117
102/* 118/*
103 * trace_nr_entries is the number of entries that is allocated 119 * trace_buf_size is the size in bytes that is allocated
104 * for a buffer. Note, the number of entries is always rounded 120 * for a buffer. Note, the number of bytes is always rounded
105 * to ENTRIES_PER_PAGE. 121 * to page size.
122 *
123 * This number is purposely set to a low number of 16384.
124 * If the dump on oops happens, it will be much appreciated
125 * to not have to wait for all that output. Anyway this can be
126 * boot time and run time configurable.
106 */ 127 */
107static unsigned long trace_nr_entries = 65536UL; 128#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
129
130static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
108 131
109/* trace_types holds a link list of available tracers. */ 132/* trace_types holds a link list of available tracers. */
110static struct tracer *trace_types __read_mostly; 133static struct tracer *trace_types __read_mostly;
@@ -133,24 +156,6 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
133/* trace_flags holds iter_ctrl options */ 156/* trace_flags holds iter_ctrl options */
134unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 157unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
135 158
136static notrace void no_trace_init(struct trace_array *tr)
137{
138 int cpu;
139
140 ftrace_function_enabled = 0;
141 if(tr->ctrl)
142 for_each_online_cpu(cpu)
143 tracing_reset(tr->data[cpu]);
144 tracer_enabled = 0;
145}
146
147/* dummy trace to disable tracing */
148static struct tracer no_tracer __read_mostly = {
149 .name = "none",
150 .init = no_trace_init
151};
152
153
154/** 159/**
155 * trace_wake_up - wake up tasks waiting for trace input 160 * trace_wake_up - wake up tasks waiting for trace input
156 * 161 *
@@ -167,23 +172,21 @@ void trace_wake_up(void)
167 wake_up(&trace_wait); 172 wake_up(&trace_wait);
168} 173}
169 174
170#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry)) 175static int __init set_buf_size(char *str)
171
172static int __init set_nr_entries(char *str)
173{ 176{
174 unsigned long nr_entries; 177 unsigned long buf_size;
175 int ret; 178 int ret;
176 179
177 if (!str) 180 if (!str)
178 return 0; 181 return 0;
179 ret = strict_strtoul(str, 0, &nr_entries); 182 ret = strict_strtoul(str, 0, &buf_size);
180 /* nr_entries can not be zero */ 183 /* nr_entries can not be zero */
181 if (ret < 0 || nr_entries == 0) 184 if (ret < 0 || buf_size == 0)
182 return 0; 185 return 0;
183 trace_nr_entries = nr_entries; 186 trace_buf_size = buf_size;
184 return 1; 187 return 1;
185} 188}
186__setup("trace_entries=", set_nr_entries); 189__setup("trace_buf_size=", set_buf_size);
187 190
188unsigned long nsecs_to_usecs(unsigned long nsecs) 191unsigned long nsecs_to_usecs(unsigned long nsecs)
189{ 192{
@@ -191,21 +194,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
191} 194}
192 195
193/* 196/*
194 * trace_flag_type is an enumeration that holds different
195 * states when a trace occurs. These are:
196 * IRQS_OFF - interrupts were disabled
197 * NEED_RESCED - reschedule is requested
198 * HARDIRQ - inside an interrupt handler
199 * SOFTIRQ - inside a softirq handler
200 */
201enum trace_flag_type {
202 TRACE_FLAG_IRQS_OFF = 0x01,
203 TRACE_FLAG_NEED_RESCHED = 0x02,
204 TRACE_FLAG_HARDIRQ = 0x04,
205 TRACE_FLAG_SOFTIRQ = 0x08,
206};
207
208/*
209 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 197 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
210 * control the output of kernel symbols. 198 * control the output of kernel symbols.
211 */ 199 */
@@ -224,6 +212,7 @@ static const char *trace_options[] = {
224 "block", 212 "block",
225 "stacktrace", 213 "stacktrace",
226 "sched-tree", 214 "sched-tree",
215 "ftrace_printk",
227 NULL 216 NULL
228}; 217};
229 218
@@ -266,54 +255,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
266 tracing_record_cmdline(current); 255 tracing_record_cmdline(current);
267} 256}
268 257
269#define CHECK_COND(cond) \
270 if (unlikely(cond)) { \
271 tracing_disabled = 1; \
272 WARN_ON(1); \
273 return -1; \
274 }
275
276/**
277 * check_pages - integrity check of trace buffers
278 *
279 * As a safty measure we check to make sure the data pages have not
280 * been corrupted.
281 */
282int check_pages(struct trace_array_cpu *data)
283{
284 struct page *page, *tmp;
285
286 CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
287 CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
288
289 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
290 CHECK_COND(page->lru.next->prev != &page->lru);
291 CHECK_COND(page->lru.prev->next != &page->lru);
292 }
293
294 return 0;
295}
296
297/**
298 * head_page - page address of the first page in per_cpu buffer.
299 *
300 * head_page returns the page address of the first page in
301 * a per_cpu buffer. This also preforms various consistency
302 * checks to make sure the buffer has not been corrupted.
303 */
304void *head_page(struct trace_array_cpu *data)
305{
306 struct page *page;
307
308 if (list_empty(&data->trace_pages))
309 return NULL;
310
311 page = list_entry(data->trace_pages.next, struct page, lru);
312 BUG_ON(&page->lru == &data->trace_pages);
313
314 return page_address(page);
315}
316
317/** 258/**
318 * trace_seq_printf - sequence printing of trace information 259 * trace_seq_printf - sequence printing of trace information
319 * @s: trace sequence descriptor 260 * @s: trace sequence descriptor
@@ -395,28 +336,23 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
395 return len; 336 return len;
396} 337}
397 338
398#define HEX_CHARS 17 339#define MAX_MEMHEX_BYTES 8
399static const char hex2asc[] = "0123456789abcdef"; 340#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
400 341
401static int 342static int
402trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) 343trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
403{ 344{
404 unsigned char hex[HEX_CHARS]; 345 unsigned char hex[HEX_CHARS];
405 unsigned char *data = mem; 346 unsigned char *data = mem;
406 unsigned char byte;
407 int i, j; 347 int i, j;
408 348
409 BUG_ON(len >= HEX_CHARS);
410
411#ifdef __BIG_ENDIAN 349#ifdef __BIG_ENDIAN
412 for (i = 0, j = 0; i < len; i++) { 350 for (i = 0, j = 0; i < len; i++) {
413#else 351#else
414 for (i = len-1, j = 0; i >= 0; i--) { 352 for (i = len-1, j = 0; i >= 0; i--) {
415#endif 353#endif
416 byte = data[i]; 354 hex[j++] = hex_asc_hi(data[i]);
417 355 hex[j++] = hex_asc_lo(data[i]);
418 hex[j++] = hex2asc[byte & 0x0f];
419 hex[j++] = hex2asc[byte >> 4];
420 } 356 }
421 hex[j++] = ' '; 357 hex[j++] = ' ';
422 358
@@ -460,34 +396,6 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
460 trace_seq_reset(s); 396 trace_seq_reset(s);
461} 397}
462 398
463/*
464 * flip the trace buffers between two trace descriptors.
465 * This usually is the buffers between the global_trace and
466 * the max_tr to record a snapshot of a current trace.
467 *
468 * The ftrace_max_lock must be held.
469 */
470static void
471flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
472{
473 struct list_head flip_pages;
474
475 INIT_LIST_HEAD(&flip_pages);
476
477 memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
478 sizeof(struct trace_array_cpu) -
479 offsetof(struct trace_array_cpu, trace_head_idx));
480
481 check_pages(tr1);
482 check_pages(tr2);
483 list_splice_init(&tr1->trace_pages, &flip_pages);
484 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
485 list_splice_init(&flip_pages, &tr2->trace_pages);
486 BUG_ON(!list_empty(&flip_pages));
487 check_pages(tr1);
488 check_pages(tr2);
489}
490
491/** 399/**
492 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 400 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
493 * @tr: tracer 401 * @tr: tracer
@@ -500,17 +408,17 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
500void 408void
501update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 409update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
502{ 410{
503 struct trace_array_cpu *data; 411 struct ring_buffer *buf = tr->buffer;
504 int i;
505 412
506 WARN_ON_ONCE(!irqs_disabled()); 413 WARN_ON_ONCE(!irqs_disabled());
507 __raw_spin_lock(&ftrace_max_lock); 414 __raw_spin_lock(&ftrace_max_lock);
508 /* clear out all the previous traces */ 415
509 for_each_tracing_cpu(i) { 416 tr->buffer = max_tr.buffer;
510 data = tr->data[i]; 417 max_tr.buffer = buf;
511 flip_trace(max_tr.data[i], data); 418
512 tracing_reset(data); 419 ftrace_disable_cpu();
513 } 420 ring_buffer_reset(tr->buffer);
421 ftrace_enable_cpu();
514 422
515 __update_max_tr(tr, tsk, cpu); 423 __update_max_tr(tr, tsk, cpu);
516 __raw_spin_unlock(&ftrace_max_lock); 424 __raw_spin_unlock(&ftrace_max_lock);
@@ -527,16 +435,19 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
527void 435void
528update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 436update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
529{ 437{
530 struct trace_array_cpu *data = tr->data[cpu]; 438 int ret;
531 int i;
532 439
533 WARN_ON_ONCE(!irqs_disabled()); 440 WARN_ON_ONCE(!irqs_disabled());
534 __raw_spin_lock(&ftrace_max_lock); 441 __raw_spin_lock(&ftrace_max_lock);
535 for_each_tracing_cpu(i)
536 tracing_reset(max_tr.data[i]);
537 442
538 flip_trace(max_tr.data[cpu], data); 443 ftrace_disable_cpu();
539 tracing_reset(data); 444
445 ring_buffer_reset(max_tr.buffer);
446 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
447
448 ftrace_enable_cpu();
449
450 WARN_ON_ONCE(ret);
540 451
541 __update_max_tr(tr, tsk, cpu); 452 __update_max_tr(tr, tsk, cpu);
542 __raw_spin_unlock(&ftrace_max_lock); 453 __raw_spin_unlock(&ftrace_max_lock);
@@ -573,7 +484,6 @@ int register_tracer(struct tracer *type)
573#ifdef CONFIG_FTRACE_STARTUP_TEST 484#ifdef CONFIG_FTRACE_STARTUP_TEST
574 if (type->selftest) { 485 if (type->selftest) {
575 struct tracer *saved_tracer = current_trace; 486 struct tracer *saved_tracer = current_trace;
576 struct trace_array_cpu *data;
577 struct trace_array *tr = &global_trace; 487 struct trace_array *tr = &global_trace;
578 int saved_ctrl = tr->ctrl; 488 int saved_ctrl = tr->ctrl;
579 int i; 489 int i;
@@ -585,10 +495,7 @@ int register_tracer(struct tracer *type)
585 * If we fail, we do not register this tracer. 495 * If we fail, we do not register this tracer.
586 */ 496 */
587 for_each_tracing_cpu(i) { 497 for_each_tracing_cpu(i) {
588 data = tr->data[i]; 498 tracing_reset(tr, i);
589 if (!head_page(data))
590 continue;
591 tracing_reset(data);
592 } 499 }
593 current_trace = type; 500 current_trace = type;
594 tr->ctrl = 0; 501 tr->ctrl = 0;
@@ -604,10 +511,7 @@ int register_tracer(struct tracer *type)
604 } 511 }
605 /* Only reset on passing, to avoid touching corrupted buffers */ 512 /* Only reset on passing, to avoid touching corrupted buffers */
606 for_each_tracing_cpu(i) { 513 for_each_tracing_cpu(i) {
607 data = tr->data[i]; 514 tracing_reset(tr, i);
608 if (!head_page(data))
609 continue;
610 tracing_reset(data);
611 } 515 }
612 printk(KERN_CONT "PASSED\n"); 516 printk(KERN_CONT "PASSED\n");
613 } 517 }
@@ -653,13 +557,11 @@ void unregister_tracer(struct tracer *type)
653 mutex_unlock(&trace_types_lock); 557 mutex_unlock(&trace_types_lock);
654} 558}
655 559
656void tracing_reset(struct trace_array_cpu *data) 560void tracing_reset(struct trace_array *tr, int cpu)
657{ 561{
658 data->trace_idx = 0; 562 ftrace_disable_cpu();
659 data->overrun = 0; 563 ring_buffer_reset_cpu(tr->buffer, cpu);
660 data->trace_head = data->trace_tail = head_page(data); 564 ftrace_enable_cpu();
661 data->trace_head_idx = 0;
662 data->trace_tail_idx = 0;
663} 565}
664 566
665#define SAVED_CMDLINES 128 567#define SAVED_CMDLINES 128
@@ -745,82 +647,20 @@ void tracing_record_cmdline(struct task_struct *tsk)
745 trace_save_cmdline(tsk); 647 trace_save_cmdline(tsk);
746} 648}
747 649
748static inline struct list_head * 650void
749trace_next_list(struct trace_array_cpu *data, struct list_head *next) 651tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
750{ 652 int pc)
751 /*
752 * Roundrobin - but skip the head (which is not a real page):
753 */
754 next = next->next;
755 if (unlikely(next == &data->trace_pages))
756 next = next->next;
757 BUG_ON(next == &data->trace_pages);
758
759 return next;
760}
761
762static inline void *
763trace_next_page(struct trace_array_cpu *data, void *addr)
764{
765 struct list_head *next;
766 struct page *page;
767
768 page = virt_to_page(addr);
769
770 next = trace_next_list(data, &page->lru);
771 page = list_entry(next, struct page, lru);
772
773 return page_address(page);
774}
775
776static inline struct trace_entry *
777tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
778{
779 unsigned long idx, idx_next;
780 struct trace_entry *entry;
781
782 data->trace_idx++;
783 idx = data->trace_head_idx;
784 idx_next = idx + 1;
785
786 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
787
788 entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
789
790 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
791 data->trace_head = trace_next_page(data, data->trace_head);
792 idx_next = 0;
793 }
794
795 if (data->trace_head == data->trace_tail &&
796 idx_next == data->trace_tail_idx) {
797 /* overrun */
798 data->overrun++;
799 data->trace_tail_idx++;
800 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
801 data->trace_tail =
802 trace_next_page(data, data->trace_tail);
803 data->trace_tail_idx = 0;
804 }
805 }
806
807 data->trace_head_idx = idx_next;
808
809 return entry;
810}
811
812static inline void
813tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
814{ 653{
815 struct task_struct *tsk = current; 654 struct task_struct *tsk = current;
816 unsigned long pc;
817 655
818 pc = preempt_count(); 656 entry->preempt_count = pc & 0xff;
819 657 entry->pid = (tsk) ? tsk->pid : 0;
820 entry->preempt_count = pc & 0xff; 658 entry->flags =
821 entry->pid = (tsk) ? tsk->pid : 0; 659#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
822 entry->t = ftrace_now(raw_smp_processor_id()); 660 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
823 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 661#else
662 TRACE_FLAG_IRQS_NOSUPPORT |
663#endif
824 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 664 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
825 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 665 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
826 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 666 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -828,145 +668,141 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
828 668
829void 669void
830trace_function(struct trace_array *tr, struct trace_array_cpu *data, 670trace_function(struct trace_array *tr, struct trace_array_cpu *data,
831 unsigned long ip, unsigned long parent_ip, unsigned long flags) 671 unsigned long ip, unsigned long parent_ip, unsigned long flags,
672 int pc)
832{ 673{
833 struct trace_entry *entry; 674 struct ring_buffer_event *event;
675 struct ftrace_entry *entry;
834 unsigned long irq_flags; 676 unsigned long irq_flags;
835 677
836 raw_local_irq_save(irq_flags); 678 /* If we are reading the ring buffer, don't trace */
837 __raw_spin_lock(&data->lock); 679 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
838 entry = tracing_get_trace_entry(tr, data); 680 return;
839 tracing_generic_entry_update(entry, flags); 681
840 entry->type = TRACE_FN; 682 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
841 entry->fn.ip = ip; 683 &irq_flags);
842 entry->fn.parent_ip = parent_ip; 684 if (!event)
843 __raw_spin_unlock(&data->lock); 685 return;
844 raw_local_irq_restore(irq_flags); 686 entry = ring_buffer_event_data(event);
687 tracing_generic_entry_update(&entry->ent, flags, pc);
688 entry->ent.type = TRACE_FN;
689 entry->ip = ip;
690 entry->parent_ip = parent_ip;
691 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
845} 692}
846 693
847void 694void
848ftrace(struct trace_array *tr, struct trace_array_cpu *data, 695ftrace(struct trace_array *tr, struct trace_array_cpu *data,
849 unsigned long ip, unsigned long parent_ip, unsigned long flags) 696 unsigned long ip, unsigned long parent_ip, unsigned long flags,
697 int pc)
850{ 698{
851 if (likely(!atomic_read(&data->disabled))) 699 if (likely(!atomic_read(&data->disabled)))
852 trace_function(tr, data, ip, parent_ip, flags); 700 trace_function(tr, data, ip, parent_ip, flags, pc);
853} 701}
854 702
855#ifdef CONFIG_MMIOTRACE 703static void ftrace_trace_stack(struct trace_array *tr,
856void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, 704 struct trace_array_cpu *data,
857 struct mmiotrace_rw *rw) 705 unsigned long flags,
706 int skip, int pc)
858{ 707{
859 struct trace_entry *entry; 708#ifdef CONFIG_STACKTRACE
709 struct ring_buffer_event *event;
710 struct stack_entry *entry;
711 struct stack_trace trace;
860 unsigned long irq_flags; 712 unsigned long irq_flags;
861 713
862 raw_local_irq_save(irq_flags); 714 if (!(trace_flags & TRACE_ITER_STACKTRACE))
863 __raw_spin_lock(&data->lock); 715 return;
864
865 entry = tracing_get_trace_entry(tr, data);
866 tracing_generic_entry_update(entry, 0);
867 entry->type = TRACE_MMIO_RW;
868 entry->mmiorw = *rw;
869
870 __raw_spin_unlock(&data->lock);
871 raw_local_irq_restore(irq_flags);
872
873 trace_wake_up();
874}
875
876void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
877 struct mmiotrace_map *map)
878{
879 struct trace_entry *entry;
880 unsigned long irq_flags;
881 716
882 raw_local_irq_save(irq_flags); 717 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
883 __raw_spin_lock(&data->lock); 718 &irq_flags);
719 if (!event)
720 return;
721 entry = ring_buffer_event_data(event);
722 tracing_generic_entry_update(&entry->ent, flags, pc);
723 entry->ent.type = TRACE_STACK;
884 724
885 entry = tracing_get_trace_entry(tr, data); 725 memset(&entry->caller, 0, sizeof(entry->caller));
886 tracing_generic_entry_update(entry, 0);
887 entry->type = TRACE_MMIO_MAP;
888 entry->mmiomap = *map;
889 726
890 __raw_spin_unlock(&data->lock); 727 trace.nr_entries = 0;
891 raw_local_irq_restore(irq_flags); 728 trace.max_entries = FTRACE_STACK_ENTRIES;
729 trace.skip = skip;
730 trace.entries = entry->caller;
892 731
893 trace_wake_up(); 732 save_stack_trace(&trace);
894} 733 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
895#endif 734#endif
735}
896 736
897void __trace_stack(struct trace_array *tr, 737void __trace_stack(struct trace_array *tr,
898 struct trace_array_cpu *data, 738 struct trace_array_cpu *data,
899 unsigned long flags, 739 unsigned long flags,
900 int skip) 740 int skip)
901{ 741{
902 struct trace_entry *entry; 742 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
903 struct stack_trace trace;
904
905 if (!(trace_flags & TRACE_ITER_STACKTRACE))
906 return;
907
908 entry = tracing_get_trace_entry(tr, data);
909 tracing_generic_entry_update(entry, flags);
910 entry->type = TRACE_STACK;
911
912 memset(&entry->stack, 0, sizeof(entry->stack));
913
914 trace.nr_entries = 0;
915 trace.max_entries = FTRACE_STACK_ENTRIES;
916 trace.skip = skip;
917 trace.entries = entry->stack.caller;
918
919 save_stack_trace(&trace);
920} 743}
921 744
922void 745static void
923__trace_special(void *__tr, void *__data, 746ftrace_trace_special(void *__tr, void *__data,
924 unsigned long arg1, unsigned long arg2, unsigned long arg3) 747 unsigned long arg1, unsigned long arg2, unsigned long arg3,
748 int pc)
925{ 749{
750 struct ring_buffer_event *event;
926 struct trace_array_cpu *data = __data; 751 struct trace_array_cpu *data = __data;
927 struct trace_array *tr = __tr; 752 struct trace_array *tr = __tr;
928 struct trace_entry *entry; 753 struct special_entry *entry;
929 unsigned long irq_flags; 754 unsigned long irq_flags;
930 755
931 raw_local_irq_save(irq_flags); 756 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
932 __raw_spin_lock(&data->lock); 757 &irq_flags);
933 entry = tracing_get_trace_entry(tr, data); 758 if (!event)
934 tracing_generic_entry_update(entry, 0); 759 return;
935 entry->type = TRACE_SPECIAL; 760 entry = ring_buffer_event_data(event);
936 entry->special.arg1 = arg1; 761 tracing_generic_entry_update(&entry->ent, 0, pc);
937 entry->special.arg2 = arg2; 762 entry->ent.type = TRACE_SPECIAL;
938 entry->special.arg3 = arg3; 763 entry->arg1 = arg1;
939 __trace_stack(tr, data, irq_flags, 4); 764 entry->arg2 = arg2;
940 __raw_spin_unlock(&data->lock); 765 entry->arg3 = arg3;
941 raw_local_irq_restore(irq_flags); 766 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
767 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
942 768
943 trace_wake_up(); 769 trace_wake_up();
944} 770}
945 771
946void 772void
773__trace_special(void *__tr, void *__data,
774 unsigned long arg1, unsigned long arg2, unsigned long arg3)
775{
776 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
777}
778
779void
947tracing_sched_switch_trace(struct trace_array *tr, 780tracing_sched_switch_trace(struct trace_array *tr,
948 struct trace_array_cpu *data, 781 struct trace_array_cpu *data,
949 struct task_struct *prev, 782 struct task_struct *prev,
950 struct task_struct *next, 783 struct task_struct *next,
951 unsigned long flags) 784 unsigned long flags, int pc)
952{ 785{
953 struct trace_entry *entry; 786 struct ring_buffer_event *event;
787 struct ctx_switch_entry *entry;
954 unsigned long irq_flags; 788 unsigned long irq_flags;
955 789
956 raw_local_irq_save(irq_flags); 790 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
957 __raw_spin_lock(&data->lock); 791 &irq_flags);
958 entry = tracing_get_trace_entry(tr, data); 792 if (!event)
959 tracing_generic_entry_update(entry, flags); 793 return;
960 entry->type = TRACE_CTX; 794 entry = ring_buffer_event_data(event);
961 entry->ctx.prev_pid = prev->pid; 795 tracing_generic_entry_update(&entry->ent, flags, pc);
962 entry->ctx.prev_prio = prev->prio; 796 entry->ent.type = TRACE_CTX;
963 entry->ctx.prev_state = prev->state; 797 entry->prev_pid = prev->pid;
964 entry->ctx.next_pid = next->pid; 798 entry->prev_prio = prev->prio;
965 entry->ctx.next_prio = next->prio; 799 entry->prev_state = prev->state;
966 entry->ctx.next_state = next->state; 800 entry->next_pid = next->pid;
967 __trace_stack(tr, data, flags, 5); 801 entry->next_prio = next->prio;
968 __raw_spin_unlock(&data->lock); 802 entry->next_state = next->state;
969 raw_local_irq_restore(irq_flags); 803 entry->next_cpu = task_cpu(next);
804 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
805 ftrace_trace_stack(tr, data, flags, 5, pc);
970} 806}
971 807
972void 808void
@@ -974,25 +810,28 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
974 struct trace_array_cpu *data, 810 struct trace_array_cpu *data,
975 struct task_struct *wakee, 811 struct task_struct *wakee,
976 struct task_struct *curr, 812 struct task_struct *curr,
977 unsigned long flags) 813 unsigned long flags, int pc)
978{ 814{
979 struct trace_entry *entry; 815 struct ring_buffer_event *event;
816 struct ctx_switch_entry *entry;
980 unsigned long irq_flags; 817 unsigned long irq_flags;
981 818
982 raw_local_irq_save(irq_flags); 819 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
983 __raw_spin_lock(&data->lock); 820 &irq_flags);
984 entry = tracing_get_trace_entry(tr, data); 821 if (!event)
985 tracing_generic_entry_update(entry, flags); 822 return;
986 entry->type = TRACE_WAKE; 823 entry = ring_buffer_event_data(event);
987 entry->ctx.prev_pid = curr->pid; 824 tracing_generic_entry_update(&entry->ent, flags, pc);
988 entry->ctx.prev_prio = curr->prio; 825 entry->ent.type = TRACE_WAKE;
989 entry->ctx.prev_state = curr->state; 826 entry->prev_pid = curr->pid;
990 entry->ctx.next_pid = wakee->pid; 827 entry->prev_prio = curr->prio;
991 entry->ctx.next_prio = wakee->prio; 828 entry->prev_state = curr->state;
992 entry->ctx.next_state = wakee->state; 829 entry->next_pid = wakee->pid;
993 __trace_stack(tr, data, flags, 6); 830 entry->next_prio = wakee->prio;
994 __raw_spin_unlock(&data->lock); 831 entry->next_state = wakee->state;
995 raw_local_irq_restore(irq_flags); 832 entry->next_cpu = task_cpu(wakee);
833 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
834 ftrace_trace_stack(tr, data, flags, 6, pc);
996 835
997 trace_wake_up(); 836 trace_wake_up();
998} 837}
@@ -1002,26 +841,24 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1002{ 841{
1003 struct trace_array *tr = &global_trace; 842 struct trace_array *tr = &global_trace;
1004 struct trace_array_cpu *data; 843 struct trace_array_cpu *data;
1005 unsigned long flags;
1006 long disabled;
1007 int cpu; 844 int cpu;
845 int pc;
1008 846
1009 if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl) 847 if (tracing_disabled || !tr->ctrl)
1010 return; 848 return;
1011 849
1012 local_irq_save(flags); 850 pc = preempt_count();
851 preempt_disable_notrace();
1013 cpu = raw_smp_processor_id(); 852 cpu = raw_smp_processor_id();
1014 data = tr->data[cpu]; 853 data = tr->data[cpu];
1015 disabled = atomic_inc_return(&data->disabled);
1016 854
1017 if (likely(disabled == 1)) 855 if (likely(!atomic_read(&data->disabled)))
1018 __trace_special(tr, data, arg1, arg2, arg3); 856 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
1019 857
1020 atomic_dec(&data->disabled); 858 preempt_enable_notrace();
1021 local_irq_restore(flags);
1022} 859}
1023 860
1024#ifdef CONFIG_FTRACE 861#ifdef CONFIG_FUNCTION_TRACER
1025static void 862static void
1026function_trace_call(unsigned long ip, unsigned long parent_ip) 863function_trace_call(unsigned long ip, unsigned long parent_ip)
1027{ 864{
@@ -1029,24 +866,28 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1029 struct trace_array_cpu *data; 866 struct trace_array_cpu *data;
1030 unsigned long flags; 867 unsigned long flags;
1031 long disabled; 868 long disabled;
1032 int cpu; 869 int cpu, resched;
870 int pc;
1033 871
1034 if (unlikely(!ftrace_function_enabled)) 872 if (unlikely(!ftrace_function_enabled))
1035 return; 873 return;
1036 874
1037 if (skip_trace(ip)) 875 pc = preempt_count();
1038 return; 876 resched = need_resched();
1039 877 preempt_disable_notrace();
1040 local_irq_save(flags); 878 local_save_flags(flags);
1041 cpu = raw_smp_processor_id(); 879 cpu = raw_smp_processor_id();
1042 data = tr->data[cpu]; 880 data = tr->data[cpu];
1043 disabled = atomic_inc_return(&data->disabled); 881 disabled = atomic_inc_return(&data->disabled);
1044 882
1045 if (likely(disabled == 1)) 883 if (likely(disabled == 1))
1046 trace_function(tr, data, ip, parent_ip, flags); 884 trace_function(tr, data, ip, parent_ip, flags, pc);
1047 885
1048 atomic_dec(&data->disabled); 886 atomic_dec(&data->disabled);
1049 local_irq_restore(flags); 887 if (resched)
888 preempt_enable_no_resched_notrace();
889 else
890 preempt_enable_notrace();
1050} 891}
1051 892
1052static struct ftrace_ops trace_ops __read_mostly = 893static struct ftrace_ops trace_ops __read_mostly =
@@ -1073,111 +914,96 @@ enum trace_file_type {
1073 TRACE_FILE_LAT_FMT = 1, 914 TRACE_FILE_LAT_FMT = 1,
1074}; 915};
1075 916
1076static struct trace_entry * 917static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
1077trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
1078 struct trace_iterator *iter, int cpu)
1079{ 918{
1080 struct page *page; 919 /* Don't allow ftrace to trace into the ring buffers */
1081 struct trace_entry *array; 920 ftrace_disable_cpu();
1082 921
1083 if (iter->next_idx[cpu] >= tr->entries || 922 iter->idx++;
1084 iter->next_idx[cpu] >= data->trace_idx || 923 if (iter->buffer_iter[iter->cpu])
1085 (data->trace_head == data->trace_tail && 924 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1086 data->trace_head_idx == data->trace_tail_idx))
1087 return NULL;
1088 925
1089 if (!iter->next_page[cpu]) { 926 ftrace_enable_cpu();
1090 /* Initialize the iterator for this cpu trace buffer */ 927}
1091 WARN_ON(!data->trace_tail); 928
1092 page = virt_to_page(data->trace_tail); 929static struct trace_entry *
1093 iter->next_page[cpu] = &page->lru; 930peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1094 iter->next_page_idx[cpu] = data->trace_tail_idx; 931{
1095 } 932 struct ring_buffer_event *event;
933 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1096 934
1097 page = list_entry(iter->next_page[cpu], struct page, lru); 935 /* Don't allow ftrace to trace into the ring buffers */
1098 BUG_ON(&data->trace_pages == &page->lru); 936 ftrace_disable_cpu();
1099 937
1100 array = page_address(page); 938 if (buf_iter)
939 event = ring_buffer_iter_peek(buf_iter, ts);
940 else
941 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
942
943 ftrace_enable_cpu();
1101 944
1102 WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE); 945 return event ? ring_buffer_event_data(event) : NULL;
1103 return &array[iter->next_page_idx[cpu]];
1104} 946}
1105 947
1106static struct trace_entry * 948static struct trace_entry *
1107find_next_entry(struct trace_iterator *iter, int *ent_cpu) 949__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1108{ 950{
1109 struct trace_array *tr = iter->tr; 951 struct ring_buffer *buffer = iter->tr->buffer;
1110 struct trace_entry *ent, *next = NULL; 952 struct trace_entry *ent, *next = NULL;
953 u64 next_ts = 0, ts;
1111 int next_cpu = -1; 954 int next_cpu = -1;
1112 int cpu; 955 int cpu;
1113 956
1114 for_each_tracing_cpu(cpu) { 957 for_each_tracing_cpu(cpu) {
1115 if (!head_page(tr->data[cpu])) 958
959 if (ring_buffer_empty_cpu(buffer, cpu))
1116 continue; 960 continue;
1117 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); 961
962 ent = peek_next_entry(iter, cpu, &ts);
963
1118 /* 964 /*
1119 * Pick the entry with the smallest timestamp: 965 * Pick the entry with the smallest timestamp:
1120 */ 966 */
1121 if (ent && (!next || ent->t < next->t)) { 967 if (ent && (!next || ts < next_ts)) {
1122 next = ent; 968 next = ent;
1123 next_cpu = cpu; 969 next_cpu = cpu;
970 next_ts = ts;
1124 } 971 }
1125 } 972 }
1126 973
1127 if (ent_cpu) 974 if (ent_cpu)
1128 *ent_cpu = next_cpu; 975 *ent_cpu = next_cpu;
1129 976
977 if (ent_ts)
978 *ent_ts = next_ts;
979
1130 return next; 980 return next;
1131} 981}
1132 982
1133static void trace_iterator_increment(struct trace_iterator *iter) 983/* Find the next real entry, without updating the iterator itself */
984static struct trace_entry *
985find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1134{ 986{
1135 iter->idx++; 987 return __find_next_entry(iter, ent_cpu, ent_ts);
1136 iter->next_idx[iter->cpu]++;
1137 iter->next_page_idx[iter->cpu]++;
1138
1139 if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
1140 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
1141
1142 iter->next_page_idx[iter->cpu] = 0;
1143 iter->next_page[iter->cpu] =
1144 trace_next_list(data, iter->next_page[iter->cpu]);
1145 }
1146} 988}
1147 989
1148static void trace_consume(struct trace_iterator *iter) 990/* Find the next real entry, and increment the iterator to the next entry */
991static void *find_next_entry_inc(struct trace_iterator *iter)
1149{ 992{
1150 struct trace_array_cpu *data = iter->tr->data[iter->cpu]; 993 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1151 994
1152 data->trace_tail_idx++; 995 if (iter->ent)
1153 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { 996 trace_iterator_increment(iter, iter->cpu);
1154 data->trace_tail = trace_next_page(data, data->trace_tail);
1155 data->trace_tail_idx = 0;
1156 }
1157 997
1158 /* Check if we empty it, then reset the index */ 998 return iter->ent ? iter : NULL;
1159 if (data->trace_head == data->trace_tail &&
1160 data->trace_head_idx == data->trace_tail_idx)
1161 data->trace_idx = 0;
1162} 999}
1163 1000
1164static void *find_next_entry_inc(struct trace_iterator *iter) 1001static void trace_consume(struct trace_iterator *iter)
1165{ 1002{
1166 struct trace_entry *next; 1003 /* Don't allow ftrace to trace into the ring buffers */
1167 int next_cpu = -1; 1004 ftrace_disable_cpu();
1168 1005 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1169 next = find_next_entry(iter, &next_cpu); 1006 ftrace_enable_cpu();
1170
1171 iter->prev_ent = iter->ent;
1172 iter->prev_cpu = iter->cpu;
1173
1174 iter->ent = next;
1175 iter->cpu = next_cpu;
1176
1177 if (next)
1178 trace_iterator_increment(iter);
1179
1180 return next ? iter : NULL;
1181} 1007}
1182 1008
1183static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1009static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@@ -1210,7 +1036,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1210 struct trace_iterator *iter = m->private; 1036 struct trace_iterator *iter = m->private;
1211 void *p = NULL; 1037 void *p = NULL;
1212 loff_t l = 0; 1038 loff_t l = 0;
1213 int i; 1039 int cpu;
1214 1040
1215 mutex_lock(&trace_types_lock); 1041 mutex_lock(&trace_types_lock);
1216 1042
@@ -1229,14 +1055,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1229 iter->ent = NULL; 1055 iter->ent = NULL;
1230 iter->cpu = 0; 1056 iter->cpu = 0;
1231 iter->idx = -1; 1057 iter->idx = -1;
1232 iter->prev_ent = NULL;
1233 iter->prev_cpu = -1;
1234 1058
1235 for_each_tracing_cpu(i) { 1059 ftrace_disable_cpu();
1236 iter->next_idx[i] = 0; 1060
1237 iter->next_page[i] = NULL; 1061 for_each_tracing_cpu(cpu) {
1062 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
1238 } 1063 }
1239 1064
1065 ftrace_enable_cpu();
1066
1240 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1067 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1241 ; 1068 ;
1242 1069
@@ -1261,17 +1088,20 @@ static void s_stop(struct seq_file *m, void *p)
1261 mutex_unlock(&trace_types_lock); 1088 mutex_unlock(&trace_types_lock);
1262} 1089}
1263 1090
1264#define KRETPROBE_MSG "[unknown/kretprobe'd]"
1265
1266#ifdef CONFIG_KRETPROBES 1091#ifdef CONFIG_KRETPROBES
1267static inline int kretprobed(unsigned long addr) 1092static inline const char *kretprobed(const char *name)
1268{ 1093{
1269 return addr == (unsigned long)kretprobe_trampoline; 1094 static const char tramp_name[] = "kretprobe_trampoline";
1095 int size = sizeof(tramp_name);
1096
1097 if (strncmp(tramp_name, name, size) == 0)
1098 return "[unknown/kretprobe'd]";
1099 return name;
1270} 1100}
1271#else 1101#else
1272static inline int kretprobed(unsigned long addr) 1102static inline const char *kretprobed(const char *name)
1273{ 1103{
1274 return 0; 1104 return name;
1275} 1105}
1276#endif /* CONFIG_KRETPROBES */ 1106#endif /* CONFIG_KRETPROBES */
1277 1107
@@ -1280,10 +1110,13 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1280{ 1110{
1281#ifdef CONFIG_KALLSYMS 1111#ifdef CONFIG_KALLSYMS
1282 char str[KSYM_SYMBOL_LEN]; 1112 char str[KSYM_SYMBOL_LEN];
1113 const char *name;
1283 1114
1284 kallsyms_lookup(address, NULL, NULL, NULL, str); 1115 kallsyms_lookup(address, NULL, NULL, NULL, str);
1285 1116
1286 return trace_seq_printf(s, fmt, str); 1117 name = kretprobed(str);
1118
1119 return trace_seq_printf(s, fmt, name);
1287#endif 1120#endif
1288 return 1; 1121 return 1;
1289} 1122}
@@ -1294,9 +1127,12 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1294{ 1127{
1295#ifdef CONFIG_KALLSYMS 1128#ifdef CONFIG_KALLSYMS
1296 char str[KSYM_SYMBOL_LEN]; 1129 char str[KSYM_SYMBOL_LEN];
1130 const char *name;
1297 1131
1298 sprint_symbol(str, address); 1132 sprint_symbol(str, address);
1299 return trace_seq_printf(s, fmt, str); 1133 name = kretprobed(str);
1134
1135 return trace_seq_printf(s, fmt, name);
1300#endif 1136#endif
1301 return 1; 1137 return 1;
1302} 1138}
@@ -1330,21 +1166,21 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1330 1166
1331static void print_lat_help_header(struct seq_file *m) 1167static void print_lat_help_header(struct seq_file *m)
1332{ 1168{
1333 seq_puts(m, "# _------=> CPU# \n"); 1169 seq_puts(m, "# _------=> CPU# \n");
1334 seq_puts(m, "# / _-----=> irqs-off \n"); 1170 seq_puts(m, "# / _-----=> irqs-off \n");
1335 seq_puts(m, "# | / _----=> need-resched \n"); 1171 seq_puts(m, "# | / _----=> need-resched \n");
1336 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1172 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1337 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1173 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1338 seq_puts(m, "# |||| / \n"); 1174 seq_puts(m, "# |||| / \n");
1339 seq_puts(m, "# ||||| delay \n"); 1175 seq_puts(m, "# ||||| delay \n");
1340 seq_puts(m, "# cmd pid ||||| time | caller \n"); 1176 seq_puts(m, "# cmd pid ||||| time | caller \n");
1341 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1177 seq_puts(m, "# \\ / ||||| \\ | / \n");
1342} 1178}
1343 1179
1344static void print_func_help_header(struct seq_file *m) 1180static void print_func_help_header(struct seq_file *m)
1345{ 1181{
1346 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 1182 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1347 seq_puts(m, "# | | | | |\n"); 1183 seq_puts(m, "# | | | | |\n");
1348} 1184}
1349 1185
1350 1186
@@ -1355,23 +1191,16 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1355 struct trace_array *tr = iter->tr; 1191 struct trace_array *tr = iter->tr;
1356 struct trace_array_cpu *data = tr->data[tr->cpu]; 1192 struct trace_array_cpu *data = tr->data[tr->cpu];
1357 struct tracer *type = current_trace; 1193 struct tracer *type = current_trace;
1358 unsigned long total = 0; 1194 unsigned long total;
1359 unsigned long entries = 0; 1195 unsigned long entries;
1360 int cpu;
1361 const char *name = "preemption"; 1196 const char *name = "preemption";
1362 1197
1363 if (type) 1198 if (type)
1364 name = type->name; 1199 name = type->name;
1365 1200
1366 for_each_tracing_cpu(cpu) { 1201 entries = ring_buffer_entries(iter->tr->buffer);
1367 if (head_page(tr->data[cpu])) { 1202 total = entries +
1368 total += tr->data[cpu]->trace_idx; 1203 ring_buffer_overruns(iter->tr->buffer);
1369 if (tr->data[cpu]->trace_idx > tr->entries)
1370 entries += tr->entries;
1371 else
1372 entries += tr->data[cpu]->trace_idx;
1373 }
1374 }
1375 1204
1376 seq_printf(m, "%s latency trace v1.1.5 on %s\n", 1205 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1377 name, UTS_RELEASE); 1206 name, UTS_RELEASE);
@@ -1428,9 +1257,10 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1428 comm = trace_find_cmdline(entry->pid); 1257 comm = trace_find_cmdline(entry->pid);
1429 1258
1430 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); 1259 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1431 trace_seq_printf(s, "%d", cpu); 1260 trace_seq_printf(s, "%3d", cpu);
1432 trace_seq_printf(s, "%c%c", 1261 trace_seq_printf(s, "%c%c",
1433 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', 1262 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1263 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
1434 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); 1264 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1435 1265
1436 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 1266 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
@@ -1457,7 +1287,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1457unsigned long preempt_mark_thresh = 100; 1287unsigned long preempt_mark_thresh = 100;
1458 1288
1459static void 1289static void
1460lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, 1290lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1461 unsigned long rel_usecs) 1291 unsigned long rel_usecs)
1462{ 1292{
1463 trace_seq_printf(s, " %4lldus", abs_usecs); 1293 trace_seq_printf(s, " %4lldus", abs_usecs);
@@ -1471,34 +1301,76 @@ lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1471 1301
1472static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1302static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1473 1303
1474static int 1304/*
1305 * The message is supposed to contain an ending newline.
1306 * If the printing stops prematurely, try to add a newline of our own.
1307 */
1308void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1309{
1310 struct trace_entry *ent;
1311 struct trace_field_cont *cont;
1312 bool ok = true;
1313
1314 ent = peek_next_entry(iter, iter->cpu, NULL);
1315 if (!ent || ent->type != TRACE_CONT) {
1316 trace_seq_putc(s, '\n');
1317 return;
1318 }
1319
1320 do {
1321 cont = (struct trace_field_cont *)ent;
1322 if (ok)
1323 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
1324
1325 ftrace_disable_cpu();
1326
1327 if (iter->buffer_iter[iter->cpu])
1328 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1329 else
1330 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1331
1332 ftrace_enable_cpu();
1333
1334 ent = peek_next_entry(iter, iter->cpu, NULL);
1335 } while (ent && ent->type == TRACE_CONT);
1336
1337 if (!ok)
1338 trace_seq_putc(s, '\n');
1339}
1340
1341static enum print_line_t
1475print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1342print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1476{ 1343{
1477 struct trace_seq *s = &iter->seq; 1344 struct trace_seq *s = &iter->seq;
1478 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1345 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1479 struct trace_entry *next_entry = find_next_entry(iter, NULL); 1346 struct trace_entry *next_entry;
1480 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 1347 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1481 struct trace_entry *entry = iter->ent; 1348 struct trace_entry *entry = iter->ent;
1482 unsigned long abs_usecs; 1349 unsigned long abs_usecs;
1483 unsigned long rel_usecs; 1350 unsigned long rel_usecs;
1351 u64 next_ts;
1484 char *comm; 1352 char *comm;
1485 int S, T; 1353 int S, T;
1486 int i; 1354 int i;
1487 unsigned state; 1355 unsigned state;
1488 1356
1357 if (entry->type == TRACE_CONT)
1358 return TRACE_TYPE_HANDLED;
1359
1360 next_entry = find_next_entry(iter, NULL, &next_ts);
1489 if (!next_entry) 1361 if (!next_entry)
1490 next_entry = entry; 1362 next_ts = iter->ts;
1491 rel_usecs = ns2usecs(next_entry->t - entry->t); 1363 rel_usecs = ns2usecs(next_ts - iter->ts);
1492 abs_usecs = ns2usecs(entry->t - iter->tr->time_start); 1364 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
1493 1365
1494 if (verbose) { 1366 if (verbose) {
1495 comm = trace_find_cmdline(entry->pid); 1367 comm = trace_find_cmdline(entry->pid);
1496 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]" 1368 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1497 " %ld.%03ldms (+%ld.%03ldms): ", 1369 " %ld.%03ldms (+%ld.%03ldms): ",
1498 comm, 1370 comm,
1499 entry->pid, cpu, entry->flags, 1371 entry->pid, cpu, entry->flags,
1500 entry->preempt_count, trace_idx, 1372 entry->preempt_count, trace_idx,
1501 ns2usecs(entry->t), 1373 ns2usecs(iter->ts),
1502 abs_usecs/1000, 1374 abs_usecs/1000,
1503 abs_usecs % 1000, rel_usecs/1000, 1375 abs_usecs % 1000, rel_usecs/1000,
1504 rel_usecs % 1000); 1376 rel_usecs % 1000);
@@ -1507,52 +1379,82 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1507 lat_print_timestamp(s, abs_usecs, rel_usecs); 1379 lat_print_timestamp(s, abs_usecs, rel_usecs);
1508 } 1380 }
1509 switch (entry->type) { 1381 switch (entry->type) {
1510 case TRACE_FN: 1382 case TRACE_FN: {
1511 seq_print_ip_sym(s, entry->fn.ip, sym_flags); 1383 struct ftrace_entry *field;
1384
1385 trace_assign_type(field, entry);
1386
1387 seq_print_ip_sym(s, field->ip, sym_flags);
1512 trace_seq_puts(s, " ("); 1388 trace_seq_puts(s, " (");
1513 if (kretprobed(entry->fn.parent_ip)) 1389 seq_print_ip_sym(s, field->parent_ip, sym_flags);
1514 trace_seq_puts(s, KRETPROBE_MSG);
1515 else
1516 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1517 trace_seq_puts(s, ")\n"); 1390 trace_seq_puts(s, ")\n");
1518 break; 1391 break;
1392 }
1519 case TRACE_CTX: 1393 case TRACE_CTX:
1520 case TRACE_WAKE: 1394 case TRACE_WAKE: {
1521 T = entry->ctx.next_state < sizeof(state_to_char) ? 1395 struct ctx_switch_entry *field;
1522 state_to_char[entry->ctx.next_state] : 'X';
1523 1396
1524 state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0; 1397 trace_assign_type(field, entry);
1398
1399 T = field->next_state < sizeof(state_to_char) ?
1400 state_to_char[field->next_state] : 'X';
1401
1402 state = field->prev_state ?
1403 __ffs(field->prev_state) + 1 : 0;
1525 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; 1404 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1526 comm = trace_find_cmdline(entry->ctx.next_pid); 1405 comm = trace_find_cmdline(field->next_pid);
1527 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n", 1406 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1528 entry->ctx.prev_pid, 1407 field->prev_pid,
1529 entry->ctx.prev_prio, 1408 field->prev_prio,
1530 S, entry->type == TRACE_CTX ? "==>" : " +", 1409 S, entry->type == TRACE_CTX ? "==>" : " +",
1531 entry->ctx.next_pid, 1410 field->next_cpu,
1532 entry->ctx.next_prio, 1411 field->next_pid,
1412 field->next_prio,
1533 T, comm); 1413 T, comm);
1534 break; 1414 break;
1535 case TRACE_SPECIAL: 1415 }
1416 case TRACE_SPECIAL: {
1417 struct special_entry *field;
1418
1419 trace_assign_type(field, entry);
1420
1536 trace_seq_printf(s, "# %ld %ld %ld\n", 1421 trace_seq_printf(s, "# %ld %ld %ld\n",
1537 entry->special.arg1, 1422 field->arg1,
1538 entry->special.arg2, 1423 field->arg2,
1539 entry->special.arg3); 1424 field->arg3);
1540 break; 1425 break;
1541 case TRACE_STACK: 1426 }
1427 case TRACE_STACK: {
1428 struct stack_entry *field;
1429
1430 trace_assign_type(field, entry);
1431
1542 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1432 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1543 if (i) 1433 if (i)
1544 trace_seq_puts(s, " <= "); 1434 trace_seq_puts(s, " <= ");
1545 seq_print_ip_sym(s, entry->stack.caller[i], sym_flags); 1435 seq_print_ip_sym(s, field->caller[i], sym_flags);
1546 } 1436 }
1547 trace_seq_puts(s, "\n"); 1437 trace_seq_puts(s, "\n");
1548 break; 1438 break;
1439 }
1440 case TRACE_PRINT: {
1441 struct print_entry *field;
1442
1443 trace_assign_type(field, entry);
1444
1445 seq_print_ip_sym(s, field->ip, sym_flags);
1446 trace_seq_printf(s, ": %s", field->buf);
1447 if (entry->flags & TRACE_FLAG_CONT)
1448 trace_seq_print_cont(s, iter);
1449 break;
1450 }
1549 default: 1451 default:
1550 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1452 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1551 } 1453 }
1552 return 1; 1454 return TRACE_TYPE_HANDLED;
1553} 1455}
1554 1456
1555static int print_trace_fmt(struct trace_iterator *iter) 1457static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1556{ 1458{
1557 struct trace_seq *s = &iter->seq; 1459 struct trace_seq *s = &iter->seq;
1558 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1460 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1567,90 +1469,123 @@ static int print_trace_fmt(struct trace_iterator *iter)
1567 1469
1568 entry = iter->ent; 1470 entry = iter->ent;
1569 1471
1472 if (entry->type == TRACE_CONT)
1473 return TRACE_TYPE_HANDLED;
1474
1570 comm = trace_find_cmdline(iter->ent->pid); 1475 comm = trace_find_cmdline(iter->ent->pid);
1571 1476
1572 t = ns2usecs(entry->t); 1477 t = ns2usecs(iter->ts);
1573 usec_rem = do_div(t, 1000000ULL); 1478 usec_rem = do_div(t, 1000000ULL);
1574 secs = (unsigned long)t; 1479 secs = (unsigned long)t;
1575 1480
1576 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); 1481 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1577 if (!ret) 1482 if (!ret)
1578 return 0; 1483 return TRACE_TYPE_PARTIAL_LINE;
1579 ret = trace_seq_printf(s, "[%02d] ", iter->cpu); 1484 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
1580 if (!ret) 1485 if (!ret)
1581 return 0; 1486 return TRACE_TYPE_PARTIAL_LINE;
1582 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); 1487 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1583 if (!ret) 1488 if (!ret)
1584 return 0; 1489 return TRACE_TYPE_PARTIAL_LINE;
1585 1490
1586 switch (entry->type) { 1491 switch (entry->type) {
1587 case TRACE_FN: 1492 case TRACE_FN: {
1588 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags); 1493 struct ftrace_entry *field;
1494
1495 trace_assign_type(field, entry);
1496
1497 ret = seq_print_ip_sym(s, field->ip, sym_flags);
1589 if (!ret) 1498 if (!ret)
1590 return 0; 1499 return TRACE_TYPE_PARTIAL_LINE;
1591 if ((sym_flags & TRACE_ITER_PRINT_PARENT) && 1500 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1592 entry->fn.parent_ip) { 1501 field->parent_ip) {
1593 ret = trace_seq_printf(s, " <-"); 1502 ret = trace_seq_printf(s, " <-");
1594 if (!ret) 1503 if (!ret)
1595 return 0; 1504 return TRACE_TYPE_PARTIAL_LINE;
1596 if (kretprobed(entry->fn.parent_ip)) 1505 ret = seq_print_ip_sym(s,
1597 ret = trace_seq_puts(s, KRETPROBE_MSG); 1506 field->parent_ip,
1598 else 1507 sym_flags);
1599 ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1600 sym_flags);
1601 if (!ret) 1508 if (!ret)
1602 return 0; 1509 return TRACE_TYPE_PARTIAL_LINE;
1603 } 1510 }
1604 ret = trace_seq_printf(s, "\n"); 1511 ret = trace_seq_printf(s, "\n");
1605 if (!ret) 1512 if (!ret)
1606 return 0; 1513 return TRACE_TYPE_PARTIAL_LINE;
1607 break; 1514 break;
1515 }
1608 case TRACE_CTX: 1516 case TRACE_CTX:
1609 case TRACE_WAKE: 1517 case TRACE_WAKE: {
1610 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1518 struct ctx_switch_entry *field;
1611 state_to_char[entry->ctx.prev_state] : 'X'; 1519
1612 T = entry->ctx.next_state < sizeof(state_to_char) ? 1520 trace_assign_type(field, entry);
1613 state_to_char[entry->ctx.next_state] : 'X'; 1521
1614 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n", 1522 S = field->prev_state < sizeof(state_to_char) ?
1615 entry->ctx.prev_pid, 1523 state_to_char[field->prev_state] : 'X';
1616 entry->ctx.prev_prio, 1524 T = field->next_state < sizeof(state_to_char) ?
1525 state_to_char[field->next_state] : 'X';
1526 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1527 field->prev_pid,
1528 field->prev_prio,
1617 S, 1529 S,
1618 entry->type == TRACE_CTX ? "==>" : " +", 1530 entry->type == TRACE_CTX ? "==>" : " +",
1619 entry->ctx.next_pid, 1531 field->next_cpu,
1620 entry->ctx.next_prio, 1532 field->next_pid,
1533 field->next_prio,
1621 T); 1534 T);
1622 if (!ret) 1535 if (!ret)
1623 return 0; 1536 return TRACE_TYPE_PARTIAL_LINE;
1624 break; 1537 break;
1625 case TRACE_SPECIAL: 1538 }
1539 case TRACE_SPECIAL: {
1540 struct special_entry *field;
1541
1542 trace_assign_type(field, entry);
1543
1626 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1544 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1627 entry->special.arg1, 1545 field->arg1,
1628 entry->special.arg2, 1546 field->arg2,
1629 entry->special.arg3); 1547 field->arg3);
1630 if (!ret) 1548 if (!ret)
1631 return 0; 1549 return TRACE_TYPE_PARTIAL_LINE;
1632 break; 1550 break;
1633 case TRACE_STACK: 1551 }
1552 case TRACE_STACK: {
1553 struct stack_entry *field;
1554
1555 trace_assign_type(field, entry);
1556
1634 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1557 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1635 if (i) { 1558 if (i) {
1636 ret = trace_seq_puts(s, " <= "); 1559 ret = trace_seq_puts(s, " <= ");
1637 if (!ret) 1560 if (!ret)
1638 return 0; 1561 return TRACE_TYPE_PARTIAL_LINE;
1639 } 1562 }
1640 ret = seq_print_ip_sym(s, entry->stack.caller[i], 1563 ret = seq_print_ip_sym(s, field->caller[i],
1641 sym_flags); 1564 sym_flags);
1642 if (!ret) 1565 if (!ret)
1643 return 0; 1566 return TRACE_TYPE_PARTIAL_LINE;
1644 } 1567 }
1645 ret = trace_seq_puts(s, "\n"); 1568 ret = trace_seq_puts(s, "\n");
1646 if (!ret) 1569 if (!ret)
1647 return 0; 1570 return TRACE_TYPE_PARTIAL_LINE;
1648 break; 1571 break;
1649 } 1572 }
1650 return 1; 1573 case TRACE_PRINT: {
1574 struct print_entry *field;
1575
1576 trace_assign_type(field, entry);
1577
1578 seq_print_ip_sym(s, field->ip, sym_flags);
1579 trace_seq_printf(s, ": %s", field->buf);
1580 if (entry->flags & TRACE_FLAG_CONT)
1581 trace_seq_print_cont(s, iter);
1582 break;
1583 }
1584 }
1585 return TRACE_TYPE_HANDLED;
1651} 1586}
1652 1587
1653static int print_raw_fmt(struct trace_iterator *iter) 1588static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1654{ 1589{
1655 struct trace_seq *s = &iter->seq; 1590 struct trace_seq *s = &iter->seq;
1656 struct trace_entry *entry; 1591 struct trace_entry *entry;
@@ -1659,47 +1594,77 @@ static int print_raw_fmt(struct trace_iterator *iter)
1659 1594
1660 entry = iter->ent; 1595 entry = iter->ent;
1661 1596
1597 if (entry->type == TRACE_CONT)
1598 return TRACE_TYPE_HANDLED;
1599
1662 ret = trace_seq_printf(s, "%d %d %llu ", 1600 ret = trace_seq_printf(s, "%d %d %llu ",
1663 entry->pid, iter->cpu, entry->t); 1601 entry->pid, iter->cpu, iter->ts);
1664 if (!ret) 1602 if (!ret)
1665 return 0; 1603 return TRACE_TYPE_PARTIAL_LINE;
1666 1604
1667 switch (entry->type) { 1605 switch (entry->type) {
1668 case TRACE_FN: 1606 case TRACE_FN: {
1607 struct ftrace_entry *field;
1608
1609 trace_assign_type(field, entry);
1610
1669 ret = trace_seq_printf(s, "%x %x\n", 1611 ret = trace_seq_printf(s, "%x %x\n",
1670 entry->fn.ip, entry->fn.parent_ip); 1612 field->ip,
1613 field->parent_ip);
1671 if (!ret) 1614 if (!ret)
1672 return 0; 1615 return TRACE_TYPE_PARTIAL_LINE;
1673 break; 1616 break;
1617 }
1674 case TRACE_CTX: 1618 case TRACE_CTX:
1675 case TRACE_WAKE: 1619 case TRACE_WAKE: {
1676 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1620 struct ctx_switch_entry *field;
1677 state_to_char[entry->ctx.prev_state] : 'X'; 1621
1678 T = entry->ctx.next_state < sizeof(state_to_char) ? 1622 trace_assign_type(field, entry);
1679 state_to_char[entry->ctx.next_state] : 'X'; 1623
1624 S = field->prev_state < sizeof(state_to_char) ?
1625 state_to_char[field->prev_state] : 'X';
1626 T = field->next_state < sizeof(state_to_char) ?
1627 state_to_char[field->next_state] : 'X';
1680 if (entry->type == TRACE_WAKE) 1628 if (entry->type == TRACE_WAKE)
1681 S = '+'; 1629 S = '+';
1682 ret = trace_seq_printf(s, "%d %d %c %d %d %c\n", 1630 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1683 entry->ctx.prev_pid, 1631 field->prev_pid,
1684 entry->ctx.prev_prio, 1632 field->prev_prio,
1685 S, 1633 S,
1686 entry->ctx.next_pid, 1634 field->next_cpu,
1687 entry->ctx.next_prio, 1635 field->next_pid,
1636 field->next_prio,
1688 T); 1637 T);
1689 if (!ret) 1638 if (!ret)
1690 return 0; 1639 return TRACE_TYPE_PARTIAL_LINE;
1691 break; 1640 break;
1641 }
1692 case TRACE_SPECIAL: 1642 case TRACE_SPECIAL:
1693 case TRACE_STACK: 1643 case TRACE_STACK: {
1644 struct special_entry *field;
1645
1646 trace_assign_type(field, entry);
1647
1694 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1648 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1695 entry->special.arg1, 1649 field->arg1,
1696 entry->special.arg2, 1650 field->arg2,
1697 entry->special.arg3); 1651 field->arg3);
1698 if (!ret) 1652 if (!ret)
1699 return 0; 1653 return TRACE_TYPE_PARTIAL_LINE;
1700 break; 1654 break;
1701 } 1655 }
1702 return 1; 1656 case TRACE_PRINT: {
1657 struct print_entry *field;
1658
1659 trace_assign_type(field, entry);
1660
1661 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
1662 if (entry->flags & TRACE_FLAG_CONT)
1663 trace_seq_print_cont(s, iter);
1664 break;
1665 }
1666 }
1667 return TRACE_TYPE_HANDLED;
1703} 1668}
1704 1669
1705#define SEQ_PUT_FIELD_RET(s, x) \ 1670#define SEQ_PUT_FIELD_RET(s, x) \
@@ -1710,11 +1675,12 @@ do { \
1710 1675
1711#define SEQ_PUT_HEX_FIELD_RET(s, x) \ 1676#define SEQ_PUT_HEX_FIELD_RET(s, x) \
1712do { \ 1677do { \
1678 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
1713 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ 1679 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
1714 return 0; \ 1680 return 0; \
1715} while (0) 1681} while (0)
1716 1682
1717static int print_hex_fmt(struct trace_iterator *iter) 1683static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1718{ 1684{
1719 struct trace_seq *s = &iter->seq; 1685 struct trace_seq *s = &iter->seq;
1720 unsigned char newline = '\n'; 1686 unsigned char newline = '\n';
@@ -1723,97 +1689,139 @@ static int print_hex_fmt(struct trace_iterator *iter)
1723 1689
1724 entry = iter->ent; 1690 entry = iter->ent;
1725 1691
1692 if (entry->type == TRACE_CONT)
1693 return TRACE_TYPE_HANDLED;
1694
1726 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 1695 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1727 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 1696 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1728 SEQ_PUT_HEX_FIELD_RET(s, entry->t); 1697 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1729 1698
1730 switch (entry->type) { 1699 switch (entry->type) {
1731 case TRACE_FN: 1700 case TRACE_FN: {
1732 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip); 1701 struct ftrace_entry *field;
1733 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1702
1703 trace_assign_type(field, entry);
1704
1705 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1706 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
1734 break; 1707 break;
1708 }
1735 case TRACE_CTX: 1709 case TRACE_CTX:
1736 case TRACE_WAKE: 1710 case TRACE_WAKE: {
1737 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1711 struct ctx_switch_entry *field;
1738 state_to_char[entry->ctx.prev_state] : 'X'; 1712
1739 T = entry->ctx.next_state < sizeof(state_to_char) ? 1713 trace_assign_type(field, entry);
1740 state_to_char[entry->ctx.next_state] : 'X'; 1714
1715 S = field->prev_state < sizeof(state_to_char) ?
1716 state_to_char[field->prev_state] : 'X';
1717 T = field->next_state < sizeof(state_to_char) ?
1718 state_to_char[field->next_state] : 'X';
1741 if (entry->type == TRACE_WAKE) 1719 if (entry->type == TRACE_WAKE)
1742 S = '+'; 1720 S = '+';
1743 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid); 1721 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1744 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio); 1722 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1745 SEQ_PUT_HEX_FIELD_RET(s, S); 1723 SEQ_PUT_HEX_FIELD_RET(s, S);
1746 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid); 1724 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1747 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio); 1725 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1748 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1726 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
1749 SEQ_PUT_HEX_FIELD_RET(s, T); 1727 SEQ_PUT_HEX_FIELD_RET(s, T);
1750 break; 1728 break;
1729 }
1751 case TRACE_SPECIAL: 1730 case TRACE_SPECIAL:
1752 case TRACE_STACK: 1731 case TRACE_STACK: {
1753 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1); 1732 struct special_entry *field;
1754 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2); 1733
1755 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3); 1734 trace_assign_type(field, entry);
1735
1736 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1737 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1738 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1756 break; 1739 break;
1757 } 1740 }
1741 }
1758 SEQ_PUT_FIELD_RET(s, newline); 1742 SEQ_PUT_FIELD_RET(s, newline);
1759 1743
1760 return 1; 1744 return TRACE_TYPE_HANDLED;
1761} 1745}
1762 1746
1763static int print_bin_fmt(struct trace_iterator *iter) 1747static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1764{ 1748{
1765 struct trace_seq *s = &iter->seq; 1749 struct trace_seq *s = &iter->seq;
1766 struct trace_entry *entry; 1750 struct trace_entry *entry;
1767 1751
1768 entry = iter->ent; 1752 entry = iter->ent;
1769 1753
1754 if (entry->type == TRACE_CONT)
1755 return TRACE_TYPE_HANDLED;
1756
1770 SEQ_PUT_FIELD_RET(s, entry->pid); 1757 SEQ_PUT_FIELD_RET(s, entry->pid);
1771 SEQ_PUT_FIELD_RET(s, entry->cpu); 1758 SEQ_PUT_FIELD_RET(s, iter->cpu);
1772 SEQ_PUT_FIELD_RET(s, entry->t); 1759 SEQ_PUT_FIELD_RET(s, iter->ts);
1773 1760
1774 switch (entry->type) { 1761 switch (entry->type) {
1775 case TRACE_FN: 1762 case TRACE_FN: {
1776 SEQ_PUT_FIELD_RET(s, entry->fn.ip); 1763 struct ftrace_entry *field;
1777 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip); 1764
1765 trace_assign_type(field, entry);
1766
1767 SEQ_PUT_FIELD_RET(s, field->ip);
1768 SEQ_PUT_FIELD_RET(s, field->parent_ip);
1778 break; 1769 break;
1779 case TRACE_CTX: 1770 }
1780 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid); 1771 case TRACE_CTX: {
1781 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio); 1772 struct ctx_switch_entry *field;
1782 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state); 1773
1783 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid); 1774 trace_assign_type(field, entry);
1784 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio); 1775
1785 SEQ_PUT_FIELD_RET(s, entry->ctx.next_state); 1776 SEQ_PUT_FIELD_RET(s, field->prev_pid);
1777 SEQ_PUT_FIELD_RET(s, field->prev_prio);
1778 SEQ_PUT_FIELD_RET(s, field->prev_state);
1779 SEQ_PUT_FIELD_RET(s, field->next_pid);
1780 SEQ_PUT_FIELD_RET(s, field->next_prio);
1781 SEQ_PUT_FIELD_RET(s, field->next_state);
1786 break; 1782 break;
1783 }
1787 case TRACE_SPECIAL: 1784 case TRACE_SPECIAL:
1788 case TRACE_STACK: 1785 case TRACE_STACK: {
1789 SEQ_PUT_FIELD_RET(s, entry->special.arg1); 1786 struct special_entry *field;
1790 SEQ_PUT_FIELD_RET(s, entry->special.arg2); 1787
1791 SEQ_PUT_FIELD_RET(s, entry->special.arg3); 1788 trace_assign_type(field, entry);
1789
1790 SEQ_PUT_FIELD_RET(s, field->arg1);
1791 SEQ_PUT_FIELD_RET(s, field->arg2);
1792 SEQ_PUT_FIELD_RET(s, field->arg3);
1792 break; 1793 break;
1793 } 1794 }
1795 }
1794 return 1; 1796 return 1;
1795} 1797}
1796 1798
1797static int trace_empty(struct trace_iterator *iter) 1799static int trace_empty(struct trace_iterator *iter)
1798{ 1800{
1799 struct trace_array_cpu *data;
1800 int cpu; 1801 int cpu;
1801 1802
1802 for_each_tracing_cpu(cpu) { 1803 for_each_tracing_cpu(cpu) {
1803 data = iter->tr->data[cpu]; 1804 if (iter->buffer_iter[cpu]) {
1804 1805 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1805 if (head_page(data) && data->trace_idx && 1806 return 0;
1806 (data->trace_tail != data->trace_head || 1807 } else {
1807 data->trace_tail_idx != data->trace_head_idx)) 1808 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1808 return 0; 1809 return 0;
1810 }
1809 } 1811 }
1812
1810 return 1; 1813 return 1;
1811} 1814}
1812 1815
1813static int print_trace_line(struct trace_iterator *iter) 1816static enum print_line_t print_trace_line(struct trace_iterator *iter)
1814{ 1817{
1815 if (iter->trace && iter->trace->print_line) 1818 enum print_line_t ret;
1816 return iter->trace->print_line(iter); 1819
1820 if (iter->trace && iter->trace->print_line) {
1821 ret = iter->trace->print_line(iter);
1822 if (ret != TRACE_TYPE_UNHANDLED)
1823 return ret;
1824 }
1817 1825
1818 if (trace_flags & TRACE_ITER_BIN) 1826 if (trace_flags & TRACE_ITER_BIN)
1819 return print_bin_fmt(iter); 1827 return print_bin_fmt(iter);
@@ -1869,6 +1877,8 @@ static struct trace_iterator *
1869__tracing_open(struct inode *inode, struct file *file, int *ret) 1877__tracing_open(struct inode *inode, struct file *file, int *ret)
1870{ 1878{
1871 struct trace_iterator *iter; 1879 struct trace_iterator *iter;
1880 struct seq_file *m;
1881 int cpu;
1872 1882
1873 if (tracing_disabled) { 1883 if (tracing_disabled) {
1874 *ret = -ENODEV; 1884 *ret = -ENODEV;
@@ -1889,28 +1899,45 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1889 iter->trace = current_trace; 1899 iter->trace = current_trace;
1890 iter->pos = -1; 1900 iter->pos = -1;
1891 1901
1902 for_each_tracing_cpu(cpu) {
1903
1904 iter->buffer_iter[cpu] =
1905 ring_buffer_read_start(iter->tr->buffer, cpu);
1906
1907 if (!iter->buffer_iter[cpu])
1908 goto fail_buffer;
1909 }
1910
1892 /* TODO stop tracer */ 1911 /* TODO stop tracer */
1893 *ret = seq_open(file, &tracer_seq_ops); 1912 *ret = seq_open(file, &tracer_seq_ops);
1894 if (!*ret) { 1913 if (*ret)
1895 struct seq_file *m = file->private_data; 1914 goto fail_buffer;
1896 m->private = iter;
1897 1915
1898 /* stop the trace while dumping */ 1916 m = file->private_data;
1899 if (iter->tr->ctrl) { 1917 m->private = iter;
1900 tracer_enabled = 0;
1901 ftrace_function_enabled = 0;
1902 }
1903 1918
1904 if (iter->trace && iter->trace->open) 1919 /* stop the trace while dumping */
1905 iter->trace->open(iter); 1920 if (iter->tr->ctrl) {
1906 } else { 1921 tracer_enabled = 0;
1907 kfree(iter); 1922 ftrace_function_enabled = 0;
1908 iter = NULL;
1909 } 1923 }
1924
1925 if (iter->trace && iter->trace->open)
1926 iter->trace->open(iter);
1927
1910 mutex_unlock(&trace_types_lock); 1928 mutex_unlock(&trace_types_lock);
1911 1929
1912 out: 1930 out:
1913 return iter; 1931 return iter;
1932
1933 fail_buffer:
1934 for_each_tracing_cpu(cpu) {
1935 if (iter->buffer_iter[cpu])
1936 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1937 }
1938 mutex_unlock(&trace_types_lock);
1939
1940 return ERR_PTR(-ENOMEM);
1914} 1941}
1915 1942
1916int tracing_open_generic(struct inode *inode, struct file *filp) 1943int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -1926,8 +1953,14 @@ int tracing_release(struct inode *inode, struct file *file)
1926{ 1953{
1927 struct seq_file *m = (struct seq_file *)file->private_data; 1954 struct seq_file *m = (struct seq_file *)file->private_data;
1928 struct trace_iterator *iter = m->private; 1955 struct trace_iterator *iter = m->private;
1956 int cpu;
1929 1957
1930 mutex_lock(&trace_types_lock); 1958 mutex_lock(&trace_types_lock);
1959 for_each_tracing_cpu(cpu) {
1960 if (iter->buffer_iter[cpu])
1961 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1962 }
1963
1931 if (iter->trace && iter->trace->close) 1964 if (iter->trace && iter->trace->close)
1932 iter->trace->close(iter); 1965 iter->trace->close(iter);
1933 1966
@@ -2352,6 +2385,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2352 struct tracer *t; 2385 struct tracer *t;
2353 char buf[max_tracer_type_len+1]; 2386 char buf[max_tracer_type_len+1];
2354 int i; 2387 int i;
2388 size_t ret;
2389
2390 ret = cnt;
2355 2391
2356 if (cnt > max_tracer_type_len) 2392 if (cnt > max_tracer_type_len)
2357 cnt = max_tracer_type_len; 2393 cnt = max_tracer_type_len;
@@ -2370,7 +2406,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2370 if (strcmp(t->name, buf) == 0) 2406 if (strcmp(t->name, buf) == 0)
2371 break; 2407 break;
2372 } 2408 }
2373 if (!t || t == current_trace) 2409 if (!t) {
2410 ret = -EINVAL;
2411 goto out;
2412 }
2413 if (t == current_trace)
2374 goto out; 2414 goto out;
2375 2415
2376 if (current_trace && current_trace->reset) 2416 if (current_trace && current_trace->reset)
@@ -2383,9 +2423,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2383 out: 2423 out:
2384 mutex_unlock(&trace_types_lock); 2424 mutex_unlock(&trace_types_lock);
2385 2425
2386 filp->f_pos += cnt; 2426 if (ret > 0)
2427 filp->f_pos += ret;
2387 2428
2388 return cnt; 2429 return ret;
2389} 2430}
2390 2431
2391static ssize_t 2432static ssize_t
@@ -2500,20 +2541,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2500 size_t cnt, loff_t *ppos) 2541 size_t cnt, loff_t *ppos)
2501{ 2542{
2502 struct trace_iterator *iter = filp->private_data; 2543 struct trace_iterator *iter = filp->private_data;
2503 struct trace_array_cpu *data;
2504 static cpumask_t mask;
2505 unsigned long flags;
2506#ifdef CONFIG_FTRACE
2507 int ftrace_save;
2508#endif
2509 int cpu;
2510 ssize_t sret; 2544 ssize_t sret;
2511 2545
2512 /* return any leftover data */ 2546 /* return any leftover data */
2513 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2547 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2514 if (sret != -EBUSY) 2548 if (sret != -EBUSY)
2515 return sret; 2549 return sret;
2516 sret = 0;
2517 2550
2518 trace_seq_reset(&iter->seq); 2551 trace_seq_reset(&iter->seq);
2519 2552
@@ -2524,6 +2557,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2524 goto out; 2557 goto out;
2525 } 2558 }
2526 2559
2560waitagain:
2561 sret = 0;
2527 while (trace_empty(iter)) { 2562 while (trace_empty(iter)) {
2528 2563
2529 if ((filp->f_flags & O_NONBLOCK)) { 2564 if ((filp->f_flags & O_NONBLOCK)) {
@@ -2588,46 +2623,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2588 offsetof(struct trace_iterator, seq)); 2623 offsetof(struct trace_iterator, seq));
2589 iter->pos = -1; 2624 iter->pos = -1;
2590 2625
2591 /*
2592 * We need to stop all tracing on all CPUS to read the
2593 * the next buffer. This is a bit expensive, but is
2594 * not done often. We fill all what we can read,
2595 * and then release the locks again.
2596 */
2597
2598 cpus_clear(mask);
2599 local_irq_save(flags);
2600#ifdef CONFIG_FTRACE
2601 ftrace_save = ftrace_enabled;
2602 ftrace_enabled = 0;
2603#endif
2604 smp_wmb();
2605 for_each_tracing_cpu(cpu) {
2606 data = iter->tr->data[cpu];
2607
2608 if (!head_page(data) || !data->trace_idx)
2609 continue;
2610
2611 atomic_inc(&data->disabled);
2612 cpu_set(cpu, mask);
2613 }
2614
2615 for_each_cpu_mask(cpu, mask) {
2616 data = iter->tr->data[cpu];
2617 __raw_spin_lock(&data->lock);
2618
2619 if (data->overrun > iter->last_overrun[cpu])
2620 iter->overrun[cpu] +=
2621 data->overrun - iter->last_overrun[cpu];
2622 iter->last_overrun[cpu] = data->overrun;
2623 }
2624
2625 while (find_next_entry_inc(iter) != NULL) { 2626 while (find_next_entry_inc(iter) != NULL) {
2626 int ret; 2627 enum print_line_t ret;
2627 int len = iter->seq.len; 2628 int len = iter->seq.len;
2628 2629
2629 ret = print_trace_line(iter); 2630 ret = print_trace_line(iter);
2630 if (!ret) { 2631 if (ret == TRACE_TYPE_PARTIAL_LINE) {
2631 /* don't print partial lines */ 2632 /* don't print partial lines */
2632 iter->seq.len = len; 2633 iter->seq.len = len;
2633 break; 2634 break;
@@ -2639,26 +2640,17 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2639 break; 2640 break;
2640 } 2641 }
2641 2642
2642 for_each_cpu_mask(cpu, mask) {
2643 data = iter->tr->data[cpu];
2644 __raw_spin_unlock(&data->lock);
2645 }
2646
2647 for_each_cpu_mask(cpu, mask) {
2648 data = iter->tr->data[cpu];
2649 atomic_dec(&data->disabled);
2650 }
2651#ifdef CONFIG_FTRACE
2652 ftrace_enabled = ftrace_save;
2653#endif
2654 local_irq_restore(flags);
2655
2656 /* Now copy what we have to the user */ 2643 /* Now copy what we have to the user */
2657 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2644 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2658 if (iter->seq.readpos >= iter->seq.len) 2645 if (iter->seq.readpos >= iter->seq.len)
2659 trace_seq_reset(&iter->seq); 2646 trace_seq_reset(&iter->seq);
2647
2648 /*
2649 * If there was nothing to send to user, inspite of consuming trace
2650 * entries, go back to wait for more entries.
2651 */
2660 if (sret == -EBUSY) 2652 if (sret == -EBUSY)
2661 sret = 0; 2653 goto waitagain;
2662 2654
2663out: 2655out:
2664 mutex_unlock(&trace_types_lock); 2656 mutex_unlock(&trace_types_lock);
@@ -2684,7 +2676,8 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2684{ 2676{
2685 unsigned long val; 2677 unsigned long val;
2686 char buf[64]; 2678 char buf[64];
2687 int i, ret; 2679 int ret;
2680 struct trace_array *tr = filp->private_data;
2688 2681
2689 if (cnt >= sizeof(buf)) 2682 if (cnt >= sizeof(buf))
2690 return -EINVAL; 2683 return -EINVAL;
@@ -2704,59 +2697,38 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2704 2697
2705 mutex_lock(&trace_types_lock); 2698 mutex_lock(&trace_types_lock);
2706 2699
2707 if (current_trace != &no_tracer) { 2700 if (tr->ctrl) {
2708 cnt = -EBUSY; 2701 cnt = -EBUSY;
2709 pr_info("ftrace: set current_tracer to none" 2702 pr_info("ftrace: please disable tracing"
2710 " before modifying buffer size\n"); 2703 " before modifying buffer size\n");
2711 goto out; 2704 goto out;
2712 } 2705 }
2713 2706
2714 if (val > global_trace.entries) { 2707 if (val != global_trace.entries) {
2715 long pages_requested; 2708 ret = ring_buffer_resize(global_trace.buffer, val);
2716 unsigned long freeable_pages; 2709 if (ret < 0) {
2717 2710 cnt = ret;
2718 /* make sure we have enough memory before mapping */
2719 pages_requested =
2720 (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
2721
2722 /* account for each buffer (and max_tr) */
2723 pages_requested *= tracing_nr_buffers * 2;
2724
2725 /* Check for overflow */
2726 if (pages_requested < 0) {
2727 cnt = -ENOMEM;
2728 goto out; 2711 goto out;
2729 } 2712 }
2730 2713
2731 freeable_pages = determine_dirtyable_memory(); 2714 ret = ring_buffer_resize(max_tr.buffer, val);
2732 2715 if (ret < 0) {
2733 /* we only allow to request 1/4 of useable memory */ 2716 int r;
2734 if (pages_requested > 2717 cnt = ret;
2735 ((freeable_pages + tracing_pages_allocated) / 4)) { 2718 r = ring_buffer_resize(global_trace.buffer,
2736 cnt = -ENOMEM; 2719 global_trace.entries);
2737 goto out; 2720 if (r < 0) {
2738 } 2721 /* AARGH! We are left with different
2739 2722 * size max buffer!!!! */
2740 while (global_trace.entries < val) { 2723 WARN_ON(1);
2741 if (trace_alloc_page()) { 2724 tracing_disabled = 1;
2742 cnt = -ENOMEM;
2743 goto out;
2744 } 2725 }
2745 /* double check that we don't go over the known pages */ 2726 goto out;
2746 if (tracing_pages_allocated > pages_requested)
2747 break;
2748 } 2727 }
2749 2728
2750 } else { 2729 global_trace.entries = val;
2751 /* include the number of entries in val (inc of page entries) */
2752 while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
2753 trace_free_page();
2754 } 2730 }
2755 2731
2756 /* check integrity */
2757 for_each_tracing_cpu(i)
2758 check_pages(global_trace.data[i]);
2759
2760 filp->f_pos += cnt; 2732 filp->f_pos += cnt;
2761 2733
2762 /* If check pages failed, return ENOMEM */ 2734 /* If check pages failed, return ENOMEM */
@@ -2769,6 +2741,52 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2769 return cnt; 2741 return cnt;
2770} 2742}
2771 2743
2744static int mark_printk(const char *fmt, ...)
2745{
2746 int ret;
2747 va_list args;
2748 va_start(args, fmt);
2749 ret = trace_vprintk(0, fmt, args);
2750 va_end(args);
2751 return ret;
2752}
2753
2754static ssize_t
2755tracing_mark_write(struct file *filp, const char __user *ubuf,
2756 size_t cnt, loff_t *fpos)
2757{
2758 char *buf;
2759 char *end;
2760 struct trace_array *tr = &global_trace;
2761
2762 if (!tr->ctrl || tracing_disabled)
2763 return -EINVAL;
2764
2765 if (cnt > TRACE_BUF_SIZE)
2766 cnt = TRACE_BUF_SIZE;
2767
2768 buf = kmalloc(cnt + 1, GFP_KERNEL);
2769 if (buf == NULL)
2770 return -ENOMEM;
2771
2772 if (copy_from_user(buf, ubuf, cnt)) {
2773 kfree(buf);
2774 return -EFAULT;
2775 }
2776
2777 /* Cut from the first nil or newline. */
2778 buf[cnt] = '\0';
2779 end = strchr(buf, '\n');
2780 if (end)
2781 *end = '\0';
2782
2783 cnt = mark_printk("%s\n", buf);
2784 kfree(buf);
2785 *fpos += cnt;
2786
2787 return cnt;
2788}
2789
2772static struct file_operations tracing_max_lat_fops = { 2790static struct file_operations tracing_max_lat_fops = {
2773 .open = tracing_open_generic, 2791 .open = tracing_open_generic,
2774 .read = tracing_max_lat_read, 2792 .read = tracing_max_lat_read,
@@ -2800,6 +2818,11 @@ static struct file_operations tracing_entries_fops = {
2800 .write = tracing_entries_write, 2818 .write = tracing_entries_write,
2801}; 2819};
2802 2820
2821static struct file_operations tracing_mark_fops = {
2822 .open = tracing_open_generic,
2823 .write = tracing_mark_write,
2824};
2825
2803#ifdef CONFIG_DYNAMIC_FTRACE 2826#ifdef CONFIG_DYNAMIC_FTRACE
2804 2827
2805static ssize_t 2828static ssize_t
@@ -2846,7 +2869,7 @@ struct dentry *tracing_init_dentry(void)
2846#include "trace_selftest.c" 2869#include "trace_selftest.c"
2847#endif 2870#endif
2848 2871
2849static __init void tracer_init_debugfs(void) 2872static __init int tracer_init_debugfs(void)
2850{ 2873{
2851 struct dentry *d_tracer; 2874 struct dentry *d_tracer;
2852 struct dentry *entry; 2875 struct dentry *entry;
@@ -2881,12 +2904,12 @@ static __init void tracer_init_debugfs(void)
2881 entry = debugfs_create_file("available_tracers", 0444, d_tracer, 2904 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2882 &global_trace, &show_traces_fops); 2905 &global_trace, &show_traces_fops);
2883 if (!entry) 2906 if (!entry)
2884 pr_warning("Could not create debugfs 'trace' entry\n"); 2907 pr_warning("Could not create debugfs 'available_tracers' entry\n");
2885 2908
2886 entry = debugfs_create_file("current_tracer", 0444, d_tracer, 2909 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2887 &global_trace, &set_tracer_fops); 2910 &global_trace, &set_tracer_fops);
2888 if (!entry) 2911 if (!entry)
2889 pr_warning("Could not create debugfs 'trace' entry\n"); 2912 pr_warning("Could not create debugfs 'current_tracer' entry\n");
2890 2913
2891 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, 2914 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2892 &tracing_max_latency, 2915 &tracing_max_latency,
@@ -2899,7 +2922,7 @@ static __init void tracer_init_debugfs(void)
2899 &tracing_thresh, &tracing_max_lat_fops); 2922 &tracing_thresh, &tracing_max_lat_fops);
2900 if (!entry) 2923 if (!entry)
2901 pr_warning("Could not create debugfs " 2924 pr_warning("Could not create debugfs "
2902 "'tracing_threash' entry\n"); 2925 "'tracing_thresh' entry\n");
2903 entry = debugfs_create_file("README", 0644, d_tracer, 2926 entry = debugfs_create_file("README", 0644, d_tracer,
2904 NULL, &tracing_readme_fops); 2927 NULL, &tracing_readme_fops);
2905 if (!entry) 2928 if (!entry)
@@ -2909,13 +2932,19 @@ static __init void tracer_init_debugfs(void)
2909 NULL, &tracing_pipe_fops); 2932 NULL, &tracing_pipe_fops);
2910 if (!entry) 2933 if (!entry)
2911 pr_warning("Could not create debugfs " 2934 pr_warning("Could not create debugfs "
2912 "'tracing_threash' entry\n"); 2935 "'trace_pipe' entry\n");
2913 2936
2914 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 2937 entry = debugfs_create_file("trace_entries", 0644, d_tracer,
2915 &global_trace, &tracing_entries_fops); 2938 &global_trace, &tracing_entries_fops);
2916 if (!entry) 2939 if (!entry)
2917 pr_warning("Could not create debugfs " 2940 pr_warning("Could not create debugfs "
2918 "'tracing_threash' entry\n"); 2941 "'trace_entries' entry\n");
2942
2943 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2944 NULL, &tracing_mark_fops);
2945 if (!entry)
2946 pr_warning("Could not create debugfs "
2947 "'trace_marker' entry\n");
2919 2948
2920#ifdef CONFIG_DYNAMIC_FTRACE 2949#ifdef CONFIG_DYNAMIC_FTRACE
2921 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 2950 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
@@ -2928,230 +2957,263 @@ static __init void tracer_init_debugfs(void)
2928#ifdef CONFIG_SYSPROF_TRACER 2957#ifdef CONFIG_SYSPROF_TRACER
2929 init_tracer_sysprof_debugfs(d_tracer); 2958 init_tracer_sysprof_debugfs(d_tracer);
2930#endif 2959#endif
2960 return 0;
2931} 2961}
2932 2962
2933static int trace_alloc_page(void) 2963int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2934{ 2964{
2965 static DEFINE_SPINLOCK(trace_buf_lock);
2966 static char trace_buf[TRACE_BUF_SIZE];
2967
2968 struct ring_buffer_event *event;
2969 struct trace_array *tr = &global_trace;
2935 struct trace_array_cpu *data; 2970 struct trace_array_cpu *data;
2936 struct page *page, *tmp; 2971 struct print_entry *entry;
2937 LIST_HEAD(pages); 2972 unsigned long flags, irq_flags;
2938 void *array; 2973 int cpu, len = 0, size, pc;
2939 unsigned pages_allocated = 0;
2940 int i;
2941 2974
2942 /* first allocate a page for each CPU */ 2975 if (!tr->ctrl || tracing_disabled)
2943 for_each_tracing_cpu(i) { 2976 return 0;
2944 array = (void *)__get_free_page(GFP_KERNEL);
2945 if (array == NULL) {
2946 printk(KERN_ERR "tracer: failed to allocate page"
2947 "for trace buffer!\n");
2948 goto free_pages;
2949 }
2950 2977
2951 pages_allocated++; 2978 pc = preempt_count();
2952 page = virt_to_page(array); 2979 preempt_disable_notrace();
2953 list_add(&page->lru, &pages); 2980 cpu = raw_smp_processor_id();
2981 data = tr->data[cpu];
2954 2982
2955/* Only allocate if we are actually using the max trace */ 2983 if (unlikely(atomic_read(&data->disabled)))
2956#ifdef CONFIG_TRACER_MAX_TRACE 2984 goto out;
2957 array = (void *)__get_free_page(GFP_KERNEL);
2958 if (array == NULL) {
2959 printk(KERN_ERR "tracer: failed to allocate page"
2960 "for trace buffer!\n");
2961 goto free_pages;
2962 }
2963 pages_allocated++;
2964 page = virt_to_page(array);
2965 list_add(&page->lru, &pages);
2966#endif
2967 }
2968 2985
2969 /* Now that we successfully allocate a page per CPU, add them */ 2986 spin_lock_irqsave(&trace_buf_lock, flags);
2970 for_each_tracing_cpu(i) { 2987 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
2971 data = global_trace.data[i];
2972 page = list_entry(pages.next, struct page, lru);
2973 list_del_init(&page->lru);
2974 list_add_tail(&page->lru, &data->trace_pages);
2975 ClearPageLRU(page);
2976 2988
2977#ifdef CONFIG_TRACER_MAX_TRACE 2989 len = min(len, TRACE_BUF_SIZE-1);
2978 data = max_tr.data[i]; 2990 trace_buf[len] = 0;
2979 page = list_entry(pages.next, struct page, lru);
2980 list_del_init(&page->lru);
2981 list_add_tail(&page->lru, &data->trace_pages);
2982 SetPageLRU(page);
2983#endif
2984 }
2985 tracing_pages_allocated += pages_allocated;
2986 global_trace.entries += ENTRIES_PER_PAGE;
2987 2991
2988 return 0; 2992 size = sizeof(*entry) + len + 1;
2993 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
2994 if (!event)
2995 goto out_unlock;
2996 entry = ring_buffer_event_data(event);
2997 tracing_generic_entry_update(&entry->ent, flags, pc);
2998 entry->ent.type = TRACE_PRINT;
2999 entry->ip = ip;
2989 3000
2990 free_pages: 3001 memcpy(&entry->buf, trace_buf, len);
2991 list_for_each_entry_safe(page, tmp, &pages, lru) { 3002 entry->buf[len] = 0;
2992 list_del_init(&page->lru); 3003 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
2993 __free_page(page); 3004
2994 } 3005 out_unlock:
2995 return -ENOMEM; 3006 spin_unlock_irqrestore(&trace_buf_lock, flags);
3007
3008 out:
3009 preempt_enable_notrace();
3010
3011 return len;
2996} 3012}
3013EXPORT_SYMBOL_GPL(trace_vprintk);
2997 3014
2998static int trace_free_page(void) 3015int __ftrace_printk(unsigned long ip, const char *fmt, ...)
2999{ 3016{
3000 struct trace_array_cpu *data; 3017 int ret;
3001 struct page *page; 3018 va_list ap;
3002 struct list_head *p;
3003 int i;
3004 int ret = 0;
3005 3019
3006 /* free one page from each buffer */ 3020 if (!(trace_flags & TRACE_ITER_PRINTK))
3007 for_each_tracing_cpu(i) { 3021 return 0;
3008 data = global_trace.data[i];
3009 p = data->trace_pages.next;
3010 if (p == &data->trace_pages) {
3011 /* should never happen */
3012 WARN_ON(1);
3013 tracing_disabled = 1;
3014 ret = -1;
3015 break;
3016 }
3017 page = list_entry(p, struct page, lru);
3018 ClearPageLRU(page);
3019 list_del(&page->lru);
3020 tracing_pages_allocated--;
3021 tracing_pages_allocated--;
3022 __free_page(page);
3023 3022
3024 tracing_reset(data); 3023 va_start(ap, fmt);
3024 ret = trace_vprintk(ip, fmt, ap);
3025 va_end(ap);
3026 return ret;
3027}
3028EXPORT_SYMBOL_GPL(__ftrace_printk);
3025 3029
3026#ifdef CONFIG_TRACER_MAX_TRACE 3030static int trace_panic_handler(struct notifier_block *this,
3027 data = max_tr.data[i]; 3031 unsigned long event, void *unused)
3028 p = data->trace_pages.next; 3032{
3029 if (p == &data->trace_pages) { 3033 ftrace_dump();
3030 /* should never happen */ 3034 return NOTIFY_OK;
3031 WARN_ON(1); 3035}
3032 tracing_disabled = 1;
3033 ret = -1;
3034 break;
3035 }
3036 page = list_entry(p, struct page, lru);
3037 ClearPageLRU(page);
3038 list_del(&page->lru);
3039 __free_page(page);
3040 3036
3041 tracing_reset(data); 3037static struct notifier_block trace_panic_notifier = {
3042#endif 3038 .notifier_call = trace_panic_handler,
3043 } 3039 .next = NULL,
3044 global_trace.entries -= ENTRIES_PER_PAGE; 3040 .priority = 150 /* priority: INT_MAX >= x >= 0 */
3041};
3045 3042
3046 return ret; 3043static int trace_die_handler(struct notifier_block *self,
3044 unsigned long val,
3045 void *data)
3046{
3047 switch (val) {
3048 case DIE_OOPS:
3049 ftrace_dump();
3050 break;
3051 default:
3052 break;
3053 }
3054 return NOTIFY_OK;
3047} 3055}
3048 3056
3049__init static int tracer_alloc_buffers(void) 3057static struct notifier_block trace_die_notifier = {
3058 .notifier_call = trace_die_handler,
3059 .priority = 200
3060};
3061
3062/*
3063 * printk is set to max of 1024, we really don't need it that big.
3064 * Nothing should be printing 1000 characters anyway.
3065 */
3066#define TRACE_MAX_PRINT 1000
3067
3068/*
3069 * Define here KERN_TRACE so that we have one place to modify
3070 * it if we decide to change what log level the ftrace dump
3071 * should be at.
3072 */
3073#define KERN_TRACE KERN_INFO
3074
3075static void
3076trace_printk_seq(struct trace_seq *s)
3050{ 3077{
3051 struct trace_array_cpu *data; 3078 /* Probably should print a warning here. */
3052 void *array; 3079 if (s->len >= 1000)
3053 struct page *page; 3080 s->len = 1000;
3054 int pages = 0;
3055 int ret = -ENOMEM;
3056 int i;
3057 3081
3058 /* TODO: make the number of buffers hot pluggable with CPUS */ 3082 /* should be zero ended, but we are paranoid. */
3059 tracing_nr_buffers = num_possible_cpus(); 3083 s->buffer[s->len] = 0;
3060 tracing_buffer_mask = cpu_possible_map;
3061 3084
3062 /* Allocate the first page for all buffers */ 3085 printk(KERN_TRACE "%s", s->buffer);
3063 for_each_tracing_cpu(i) {
3064 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3065 max_tr.data[i] = &per_cpu(max_data, i);
3066 3086
3067 array = (void *)__get_free_page(GFP_KERNEL); 3087 trace_seq_reset(s);
3068 if (array == NULL) { 3088}
3069 printk(KERN_ERR "tracer: failed to allocate page" 3089
3070 "for trace buffer!\n"); 3090
3071 goto free_buffers; 3091void ftrace_dump(void)
3072 } 3092{
3093 static DEFINE_SPINLOCK(ftrace_dump_lock);
3094 /* use static because iter can be a bit big for the stack */
3095 static struct trace_iterator iter;
3096 static cpumask_t mask;
3097 static int dump_ran;
3098 unsigned long flags;
3099 int cnt = 0, cpu;
3073 3100
3074 /* set the array to the list */ 3101 /* only one dump */
3075 INIT_LIST_HEAD(&data->trace_pages); 3102 spin_lock_irqsave(&ftrace_dump_lock, flags);
3076 page = virt_to_page(array); 3103 if (dump_ran)
3077 list_add(&page->lru, &data->trace_pages); 3104 goto out;
3078 /* use the LRU flag to differentiate the two buffers */
3079 ClearPageLRU(page);
3080 3105
3081 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 3106 dump_ran = 1;
3082 max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3083 3107
3084/* Only allocate if we are actually using the max trace */ 3108 /* No turning back! */
3085#ifdef CONFIG_TRACER_MAX_TRACE 3109 ftrace_kill();
3086 array = (void *)__get_free_page(GFP_KERNEL);
3087 if (array == NULL) {
3088 printk(KERN_ERR "tracer: failed to allocate page"
3089 "for trace buffer!\n");
3090 goto free_buffers;
3091 }
3092 3110
3093 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages); 3111 for_each_tracing_cpu(cpu) {
3094 page = virt_to_page(array); 3112 atomic_inc(&global_trace.data[cpu]->disabled);
3095 list_add(&page->lru, &max_tr.data[i]->trace_pages);
3096 SetPageLRU(page);
3097#endif
3098 } 3113 }
3099 3114
3115 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3116
3117 iter.tr = &global_trace;
3118 iter.trace = current_trace;
3119
3100 /* 3120 /*
3101 * Since we allocate by orders of pages, we may be able to 3121 * We need to stop all tracing on all CPUS to read the
3102 * round up a bit. 3122 * the next buffer. This is a bit expensive, but is
3123 * not done often. We fill all what we can read,
3124 * and then release the locks again.
3103 */ 3125 */
3104 global_trace.entries = ENTRIES_PER_PAGE;
3105 pages++;
3106 3126
3107 while (global_trace.entries < trace_nr_entries) { 3127 cpus_clear(mask);
3108 if (trace_alloc_page()) 3128
3109 break; 3129 while (!trace_empty(&iter)) {
3110 pages++; 3130
3131 if (!cnt)
3132 printk(KERN_TRACE "---------------------------------\n");
3133
3134 cnt++;
3135
3136 /* reset all but tr, trace, and overruns */
3137 memset(&iter.seq, 0,
3138 sizeof(struct trace_iterator) -
3139 offsetof(struct trace_iterator, seq));
3140 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3141 iter.pos = -1;
3142
3143 if (find_next_entry_inc(&iter) != NULL) {
3144 print_trace_line(&iter);
3145 trace_consume(&iter);
3146 }
3147
3148 trace_printk_seq(&iter.seq);
3111 } 3149 }
3112 max_tr.entries = global_trace.entries;
3113 3150
3114 pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n", 3151 if (!cnt)
3115 pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE); 3152 printk(KERN_TRACE " (ftrace buffer empty)\n");
3116 pr_info(" actual entries %ld\n", global_trace.entries); 3153 else
3154 printk(KERN_TRACE "---------------------------------\n");
3117 3155
3118 tracer_init_debugfs(); 3156 out:
3157 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3158}
3159
3160__init static int tracer_alloc_buffers(void)
3161{
3162 struct trace_array_cpu *data;
3163 int i;
3164
3165 /* TODO: make the number of buffers hot pluggable with CPUS */
3166 tracing_buffer_mask = cpu_possible_map;
3167
3168 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3169 TRACE_BUFFER_FLAGS);
3170 if (!global_trace.buffer) {
3171 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3172 WARN_ON(1);
3173 return 0;
3174 }
3175 global_trace.entries = ring_buffer_size(global_trace.buffer);
3176
3177#ifdef CONFIG_TRACER_MAX_TRACE
3178 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3179 TRACE_BUFFER_FLAGS);
3180 if (!max_tr.buffer) {
3181 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3182 WARN_ON(1);
3183 ring_buffer_free(global_trace.buffer);
3184 return 0;
3185 }
3186 max_tr.entries = ring_buffer_size(max_tr.buffer);
3187 WARN_ON(max_tr.entries != global_trace.entries);
3188#endif
3189
3190 /* Allocate the first page for all buffers */
3191 for_each_tracing_cpu(i) {
3192 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3193 max_tr.data[i] = &per_cpu(max_data, i);
3194 }
3119 3195
3120 trace_init_cmdlines(); 3196 trace_init_cmdlines();
3121 3197
3122 register_tracer(&no_tracer); 3198 register_tracer(&nop_trace);
3123 current_trace = &no_tracer; 3199#ifdef CONFIG_BOOT_TRACER
3200 register_tracer(&boot_tracer);
3201 current_trace = &boot_tracer;
3202 current_trace->init(&global_trace);
3203#else
3204 current_trace = &nop_trace;
3205#endif
3124 3206
3125 /* All seems OK, enable tracing */ 3207 /* All seems OK, enable tracing */
3126 global_trace.ctrl = tracer_enabled; 3208 global_trace.ctrl = tracer_enabled;
3127 tracing_disabled = 0; 3209 tracing_disabled = 0;
3128 3210
3129 return 0; 3211 atomic_notifier_chain_register(&panic_notifier_list,
3212 &trace_panic_notifier);
3130 3213
3131 free_buffers: 3214 register_die_notifier(&trace_die_notifier);
3132 for (i-- ; i >= 0; i--) {
3133 struct page *page, *tmp;
3134 struct trace_array_cpu *data = global_trace.data[i];
3135 3215
3136 if (data) { 3216 return 0;
3137 list_for_each_entry_safe(page, tmp,
3138 &data->trace_pages, lru) {
3139 list_del_init(&page->lru);
3140 __free_page(page);
3141 }
3142 }
3143
3144#ifdef CONFIG_TRACER_MAX_TRACE
3145 data = max_tr.data[i];
3146 if (data) {
3147 list_for_each_entry_safe(page, tmp,
3148 &data->trace_pages, lru) {
3149 list_del_init(&page->lru);
3150 __free_page(page);
3151 }
3152 }
3153#endif
3154 }
3155 return ret;
3156} 3217}
3157fs_initcall(tracer_alloc_buffers); 3218early_initcall(tracer_alloc_buffers);
3219fs_initcall(tracer_init_debugfs);