aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c1845
1 files changed, 949 insertions, 896 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8f3fb3db61c3..d345d649d073 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -14,6 +14,7 @@
14#include <linux/utsrelease.h> 14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/notifier.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
18#include <linux/pagemap.h> 19#include <linux/pagemap.h>
19#include <linux/hardirq.h> 20#include <linux/hardirq.h>
@@ -22,6 +23,7 @@
22#include <linux/ftrace.h> 23#include <linux/ftrace.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/kdebug.h>
25#include <linux/ctype.h> 27#include <linux/ctype.h>
26#include <linux/init.h> 28#include <linux/init.h>
27#include <linux/poll.h> 29#include <linux/poll.h>
@@ -31,25 +33,36 @@
31#include <linux/writeback.h> 33#include <linux/writeback.h>
32 34
33#include <linux/stacktrace.h> 35#include <linux/stacktrace.h>
36#include <linux/ring_buffer.h>
34 37
35#include "trace.h" 38#include "trace.h"
36 39
40#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
41
37unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 42unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
38unsigned long __read_mostly tracing_thresh; 43unsigned long __read_mostly tracing_thresh;
39 44
40static unsigned long __read_mostly tracing_nr_buffers; 45static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
46
47static inline void ftrace_disable_cpu(void)
48{
49 preempt_disable();
50 local_inc(&__get_cpu_var(ftrace_cpu_disabled));
51}
52
53static inline void ftrace_enable_cpu(void)
54{
55 local_dec(&__get_cpu_var(ftrace_cpu_disabled));
56 preempt_enable();
57}
58
41static cpumask_t __read_mostly tracing_buffer_mask; 59static cpumask_t __read_mostly tracing_buffer_mask;
42 60
43#define for_each_tracing_cpu(cpu) \ 61#define for_each_tracing_cpu(cpu) \
44 for_each_cpu_mask(cpu, tracing_buffer_mask) 62 for_each_cpu_mask(cpu, tracing_buffer_mask)
45 63
46static int trace_alloc_page(void);
47static int trace_free_page(void);
48
49static int tracing_disabled = 1; 64static int tracing_disabled = 1;
50 65
51static unsigned long tracing_pages_allocated;
52
53long 66long
54ns2usecs(cycle_t nsec) 67ns2usecs(cycle_t nsec)
55{ 68{
@@ -60,7 +73,9 @@ ns2usecs(cycle_t nsec)
60 73
61cycle_t ftrace_now(int cpu) 74cycle_t ftrace_now(int cpu)
62{ 75{
63 return cpu_clock(cpu); 76 u64 ts = ring_buffer_time_stamp(cpu);
77 ring_buffer_normalize_time_stamp(cpu, &ts);
78 return ts;
64} 79}
65 80
66/* 81/*
@@ -100,11 +115,18 @@ static int tracer_enabled = 1;
100int ftrace_function_enabled; 115int ftrace_function_enabled;
101 116
102/* 117/*
103 * trace_nr_entries is the number of entries that is allocated 118 * trace_buf_size is the size in bytes that is allocated
104 * for a buffer. Note, the number of entries is always rounded 119 * for a buffer. Note, the number of bytes is always rounded
105 * to ENTRIES_PER_PAGE. 120 * to page size.
121 *
122 * This number is purposely set to a low number of 16384.
123 * If the dump on oops happens, it will be much appreciated
124 * to not have to wait for all that output. Anyway this can be
125 * boot time and run time configurable.
106 */ 126 */
107static unsigned long trace_nr_entries = 65536UL; 127#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
128
129static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
108 130
109/* trace_types holds a link list of available tracers. */ 131/* trace_types holds a link list of available tracers. */
110static struct tracer *trace_types __read_mostly; 132static struct tracer *trace_types __read_mostly;
@@ -133,24 +155,6 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
133/* trace_flags holds iter_ctrl options */ 155/* trace_flags holds iter_ctrl options */
134unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 156unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
135 157
136static notrace void no_trace_init(struct trace_array *tr)
137{
138 int cpu;
139
140 ftrace_function_enabled = 0;
141 if(tr->ctrl)
142 for_each_online_cpu(cpu)
143 tracing_reset(tr->data[cpu]);
144 tracer_enabled = 0;
145}
146
147/* dummy trace to disable tracing */
148static struct tracer no_tracer __read_mostly = {
149 .name = "none",
150 .init = no_trace_init
151};
152
153
154/** 158/**
155 * trace_wake_up - wake up tasks waiting for trace input 159 * trace_wake_up - wake up tasks waiting for trace input
156 * 160 *
@@ -167,23 +171,21 @@ void trace_wake_up(void)
167 wake_up(&trace_wait); 171 wake_up(&trace_wait);
168} 172}
169 173
170#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry)) 174static int __init set_buf_size(char *str)
171
172static int __init set_nr_entries(char *str)
173{ 175{
174 unsigned long nr_entries; 176 unsigned long buf_size;
175 int ret; 177 int ret;
176 178
177 if (!str) 179 if (!str)
178 return 0; 180 return 0;
179 ret = strict_strtoul(str, 0, &nr_entries); 181 ret = strict_strtoul(str, 0, &buf_size);
180 /* nr_entries can not be zero */ 182 /* nr_entries can not be zero */
181 if (ret < 0 || nr_entries == 0) 183 if (ret < 0 || buf_size == 0)
182 return 0; 184 return 0;
183 trace_nr_entries = nr_entries; 185 trace_buf_size = buf_size;
184 return 1; 186 return 1;
185} 187}
186__setup("trace_entries=", set_nr_entries); 188__setup("trace_buf_size=", set_buf_size);
187 189
188unsigned long nsecs_to_usecs(unsigned long nsecs) 190unsigned long nsecs_to_usecs(unsigned long nsecs)
189{ 191{
@@ -191,21 +193,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
191} 193}
192 194
193/* 195/*
194 * trace_flag_type is an enumeration that holds different
195 * states when a trace occurs. These are:
196 * IRQS_OFF - interrupts were disabled
197 * NEED_RESCED - reschedule is requested
198 * HARDIRQ - inside an interrupt handler
199 * SOFTIRQ - inside a softirq handler
200 */
201enum trace_flag_type {
202 TRACE_FLAG_IRQS_OFF = 0x01,
203 TRACE_FLAG_NEED_RESCHED = 0x02,
204 TRACE_FLAG_HARDIRQ = 0x04,
205 TRACE_FLAG_SOFTIRQ = 0x08,
206};
207
208/*
209 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 196 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
210 * control the output of kernel symbols. 197 * control the output of kernel symbols.
211 */ 198 */
@@ -224,6 +211,7 @@ static const char *trace_options[] = {
224 "block", 211 "block",
225 "stacktrace", 212 "stacktrace",
226 "sched-tree", 213 "sched-tree",
214 "ftrace_printk",
227 NULL 215 NULL
228}; 216};
229 217
@@ -266,54 +254,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
266 tracing_record_cmdline(current); 254 tracing_record_cmdline(current);
267} 255}
268 256
269#define CHECK_COND(cond) \
270 if (unlikely(cond)) { \
271 tracing_disabled = 1; \
272 WARN_ON(1); \
273 return -1; \
274 }
275
276/**
277 * check_pages - integrity check of trace buffers
278 *
279 * As a safty measure we check to make sure the data pages have not
280 * been corrupted.
281 */
282int check_pages(struct trace_array_cpu *data)
283{
284 struct page *page, *tmp;
285
286 CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
287 CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
288
289 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
290 CHECK_COND(page->lru.next->prev != &page->lru);
291 CHECK_COND(page->lru.prev->next != &page->lru);
292 }
293
294 return 0;
295}
296
297/**
298 * head_page - page address of the first page in per_cpu buffer.
299 *
300 * head_page returns the page address of the first page in
301 * a per_cpu buffer. This also preforms various consistency
302 * checks to make sure the buffer has not been corrupted.
303 */
304void *head_page(struct trace_array_cpu *data)
305{
306 struct page *page;
307
308 if (list_empty(&data->trace_pages))
309 return NULL;
310
311 page = list_entry(data->trace_pages.next, struct page, lru);
312 BUG_ON(&page->lru == &data->trace_pages);
313
314 return page_address(page);
315}
316
317/** 257/**
318 * trace_seq_printf - sequence printing of trace information 258 * trace_seq_printf - sequence printing of trace information
319 * @s: trace sequence descriptor 259 * @s: trace sequence descriptor
@@ -395,28 +335,23 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
395 return len; 335 return len;
396} 336}
397 337
398#define HEX_CHARS 17 338#define MAX_MEMHEX_BYTES 8
399static const char hex2asc[] = "0123456789abcdef"; 339#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
400 340
401static int 341static int
402trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) 342trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
403{ 343{
404 unsigned char hex[HEX_CHARS]; 344 unsigned char hex[HEX_CHARS];
405 unsigned char *data = mem; 345 unsigned char *data = mem;
406 unsigned char byte;
407 int i, j; 346 int i, j;
408 347
409 BUG_ON(len >= HEX_CHARS);
410
411#ifdef __BIG_ENDIAN 348#ifdef __BIG_ENDIAN
412 for (i = 0, j = 0; i < len; i++) { 349 for (i = 0, j = 0; i < len; i++) {
413#else 350#else
414 for (i = len-1, j = 0; i >= 0; i--) { 351 for (i = len-1, j = 0; i >= 0; i--) {
415#endif 352#endif
416 byte = data[i]; 353 hex[j++] = hex_asc_hi(data[i]);
417 354 hex[j++] = hex_asc_lo(data[i]);
418 hex[j++] = hex2asc[byte & 0x0f];
419 hex[j++] = hex2asc[byte >> 4];
420 } 355 }
421 hex[j++] = ' '; 356 hex[j++] = ' ';
422 357
@@ -460,34 +395,6 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
460 trace_seq_reset(s); 395 trace_seq_reset(s);
461} 396}
462 397
463/*
464 * flip the trace buffers between two trace descriptors.
465 * This usually is the buffers between the global_trace and
466 * the max_tr to record a snapshot of a current trace.
467 *
468 * The ftrace_max_lock must be held.
469 */
470static void
471flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
472{
473 struct list_head flip_pages;
474
475 INIT_LIST_HEAD(&flip_pages);
476
477 memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
478 sizeof(struct trace_array_cpu) -
479 offsetof(struct trace_array_cpu, trace_head_idx));
480
481 check_pages(tr1);
482 check_pages(tr2);
483 list_splice_init(&tr1->trace_pages, &flip_pages);
484 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
485 list_splice_init(&flip_pages, &tr2->trace_pages);
486 BUG_ON(!list_empty(&flip_pages));
487 check_pages(tr1);
488 check_pages(tr2);
489}
490
491/** 398/**
492 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 399 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
493 * @tr: tracer 400 * @tr: tracer
@@ -500,17 +407,17 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
500void 407void
501update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 408update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
502{ 409{
503 struct trace_array_cpu *data; 410 struct ring_buffer *buf = tr->buffer;
504 int i;
505 411
506 WARN_ON_ONCE(!irqs_disabled()); 412 WARN_ON_ONCE(!irqs_disabled());
507 __raw_spin_lock(&ftrace_max_lock); 413 __raw_spin_lock(&ftrace_max_lock);
508 /* clear out all the previous traces */ 414
509 for_each_tracing_cpu(i) { 415 tr->buffer = max_tr.buffer;
510 data = tr->data[i]; 416 max_tr.buffer = buf;
511 flip_trace(max_tr.data[i], data); 417
512 tracing_reset(data); 418 ftrace_disable_cpu();
513 } 419 ring_buffer_reset(tr->buffer);
420 ftrace_enable_cpu();
514 421
515 __update_max_tr(tr, tsk, cpu); 422 __update_max_tr(tr, tsk, cpu);
516 __raw_spin_unlock(&ftrace_max_lock); 423 __raw_spin_unlock(&ftrace_max_lock);
@@ -527,16 +434,19 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
527void 434void
528update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 435update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
529{ 436{
530 struct trace_array_cpu *data = tr->data[cpu]; 437 int ret;
531 int i;
532 438
533 WARN_ON_ONCE(!irqs_disabled()); 439 WARN_ON_ONCE(!irqs_disabled());
534 __raw_spin_lock(&ftrace_max_lock); 440 __raw_spin_lock(&ftrace_max_lock);
535 for_each_tracing_cpu(i)
536 tracing_reset(max_tr.data[i]);
537 441
538 flip_trace(max_tr.data[cpu], data); 442 ftrace_disable_cpu();
539 tracing_reset(data); 443
444 ring_buffer_reset(max_tr.buffer);
445 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
446
447 ftrace_enable_cpu();
448
449 WARN_ON_ONCE(ret);
540 450
541 __update_max_tr(tr, tsk, cpu); 451 __update_max_tr(tr, tsk, cpu);
542 __raw_spin_unlock(&ftrace_max_lock); 452 __raw_spin_unlock(&ftrace_max_lock);
@@ -573,7 +483,6 @@ int register_tracer(struct tracer *type)
573#ifdef CONFIG_FTRACE_STARTUP_TEST 483#ifdef CONFIG_FTRACE_STARTUP_TEST
574 if (type->selftest) { 484 if (type->selftest) {
575 struct tracer *saved_tracer = current_trace; 485 struct tracer *saved_tracer = current_trace;
576 struct trace_array_cpu *data;
577 struct trace_array *tr = &global_trace; 486 struct trace_array *tr = &global_trace;
578 int saved_ctrl = tr->ctrl; 487 int saved_ctrl = tr->ctrl;
579 int i; 488 int i;
@@ -585,10 +494,7 @@ int register_tracer(struct tracer *type)
585 * If we fail, we do not register this tracer. 494 * If we fail, we do not register this tracer.
586 */ 495 */
587 for_each_tracing_cpu(i) { 496 for_each_tracing_cpu(i) {
588 data = tr->data[i]; 497 tracing_reset(tr, i);
589 if (!head_page(data))
590 continue;
591 tracing_reset(data);
592 } 498 }
593 current_trace = type; 499 current_trace = type;
594 tr->ctrl = 0; 500 tr->ctrl = 0;
@@ -604,10 +510,7 @@ int register_tracer(struct tracer *type)
604 } 510 }
605 /* Only reset on passing, to avoid touching corrupted buffers */ 511 /* Only reset on passing, to avoid touching corrupted buffers */
606 for_each_tracing_cpu(i) { 512 for_each_tracing_cpu(i) {
607 data = tr->data[i]; 513 tracing_reset(tr, i);
608 if (!head_page(data))
609 continue;
610 tracing_reset(data);
611 } 514 }
612 printk(KERN_CONT "PASSED\n"); 515 printk(KERN_CONT "PASSED\n");
613 } 516 }
@@ -653,13 +556,11 @@ void unregister_tracer(struct tracer *type)
653 mutex_unlock(&trace_types_lock); 556 mutex_unlock(&trace_types_lock);
654} 557}
655 558
656void tracing_reset(struct trace_array_cpu *data) 559void tracing_reset(struct trace_array *tr, int cpu)
657{ 560{
658 data->trace_idx = 0; 561 ftrace_disable_cpu();
659 data->overrun = 0; 562 ring_buffer_reset_cpu(tr->buffer, cpu);
660 data->trace_head = data->trace_tail = head_page(data); 563 ftrace_enable_cpu();
661 data->trace_head_idx = 0;
662 data->trace_tail_idx = 0;
663} 564}
664 565
665#define SAVED_CMDLINES 128 566#define SAVED_CMDLINES 128
@@ -745,82 +646,16 @@ void tracing_record_cmdline(struct task_struct *tsk)
745 trace_save_cmdline(tsk); 646 trace_save_cmdline(tsk);
746} 647}
747 648
748static inline struct list_head * 649void
749trace_next_list(struct trace_array_cpu *data, struct list_head *next) 650tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
750{ 651 int pc)
751 /*
752 * Roundrobin - but skip the head (which is not a real page):
753 */
754 next = next->next;
755 if (unlikely(next == &data->trace_pages))
756 next = next->next;
757 BUG_ON(next == &data->trace_pages);
758
759 return next;
760}
761
762static inline void *
763trace_next_page(struct trace_array_cpu *data, void *addr)
764{
765 struct list_head *next;
766 struct page *page;
767
768 page = virt_to_page(addr);
769
770 next = trace_next_list(data, &page->lru);
771 page = list_entry(next, struct page, lru);
772
773 return page_address(page);
774}
775
776static inline struct trace_entry *
777tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
778{
779 unsigned long idx, idx_next;
780 struct trace_entry *entry;
781
782 data->trace_idx++;
783 idx = data->trace_head_idx;
784 idx_next = idx + 1;
785
786 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
787
788 entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
789
790 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
791 data->trace_head = trace_next_page(data, data->trace_head);
792 idx_next = 0;
793 }
794
795 if (data->trace_head == data->trace_tail &&
796 idx_next == data->trace_tail_idx) {
797 /* overrun */
798 data->overrun++;
799 data->trace_tail_idx++;
800 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
801 data->trace_tail =
802 trace_next_page(data, data->trace_tail);
803 data->trace_tail_idx = 0;
804 }
805 }
806
807 data->trace_head_idx = idx_next;
808
809 return entry;
810}
811
812static inline void
813tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
814{ 652{
815 struct task_struct *tsk = current; 653 struct task_struct *tsk = current;
816 unsigned long pc;
817
818 pc = preempt_count();
819 654
820 entry->preempt_count = pc & 0xff; 655 entry->preempt_count = pc & 0xff;
821 entry->pid = (tsk) ? tsk->pid : 0; 656 entry->pid = (tsk) ? tsk->pid : 0;
822 entry->t = ftrace_now(raw_smp_processor_id()); 657 entry->flags =
823 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 658 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
824 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 659 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
825 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 660 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
826 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 661 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -828,145 +663,139 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
828 663
829void 664void
830trace_function(struct trace_array *tr, struct trace_array_cpu *data, 665trace_function(struct trace_array *tr, struct trace_array_cpu *data,
831 unsigned long ip, unsigned long parent_ip, unsigned long flags) 666 unsigned long ip, unsigned long parent_ip, unsigned long flags,
667 int pc)
832{ 668{
833 struct trace_entry *entry; 669 struct ring_buffer_event *event;
670 struct ftrace_entry *entry;
834 unsigned long irq_flags; 671 unsigned long irq_flags;
835 672
836 raw_local_irq_save(irq_flags); 673 /* If we are reading the ring buffer, don't trace */
837 __raw_spin_lock(&data->lock); 674 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
838 entry = tracing_get_trace_entry(tr, data); 675 return;
839 tracing_generic_entry_update(entry, flags); 676
840 entry->type = TRACE_FN; 677 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
841 entry->fn.ip = ip; 678 &irq_flags);
842 entry->fn.parent_ip = parent_ip; 679 if (!event)
843 __raw_spin_unlock(&data->lock); 680 return;
844 raw_local_irq_restore(irq_flags); 681 entry = ring_buffer_event_data(event);
682 tracing_generic_entry_update(&entry->ent, flags, pc);
683 entry->ent.type = TRACE_FN;
684 entry->ip = ip;
685 entry->parent_ip = parent_ip;
686 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
845} 687}
846 688
847void 689void
848ftrace(struct trace_array *tr, struct trace_array_cpu *data, 690ftrace(struct trace_array *tr, struct trace_array_cpu *data,
849 unsigned long ip, unsigned long parent_ip, unsigned long flags) 691 unsigned long ip, unsigned long parent_ip, unsigned long flags,
692 int pc)
850{ 693{
851 if (likely(!atomic_read(&data->disabled))) 694 if (likely(!atomic_read(&data->disabled)))
852 trace_function(tr, data, ip, parent_ip, flags); 695 trace_function(tr, data, ip, parent_ip, flags, pc);
853} 696}
854 697
855#ifdef CONFIG_MMIOTRACE 698static void ftrace_trace_stack(struct trace_array *tr,
856void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, 699 struct trace_array_cpu *data,
857 struct mmiotrace_rw *rw) 700 unsigned long flags,
701 int skip, int pc)
858{ 702{
859 struct trace_entry *entry; 703 struct ring_buffer_event *event;
704 struct stack_entry *entry;
705 struct stack_trace trace;
860 unsigned long irq_flags; 706 unsigned long irq_flags;
861 707
862 raw_local_irq_save(irq_flags); 708 if (!(trace_flags & TRACE_ITER_STACKTRACE))
863 __raw_spin_lock(&data->lock); 709 return;
864
865 entry = tracing_get_trace_entry(tr, data);
866 tracing_generic_entry_update(entry, 0);
867 entry->type = TRACE_MMIO_RW;
868 entry->mmiorw = *rw;
869
870 __raw_spin_unlock(&data->lock);
871 raw_local_irq_restore(irq_flags);
872
873 trace_wake_up();
874}
875
876void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
877 struct mmiotrace_map *map)
878{
879 struct trace_entry *entry;
880 unsigned long irq_flags;
881 710
882 raw_local_irq_save(irq_flags); 711 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
883 __raw_spin_lock(&data->lock); 712 &irq_flags);
713 if (!event)
714 return;
715 entry = ring_buffer_event_data(event);
716 tracing_generic_entry_update(&entry->ent, flags, pc);
717 entry->ent.type = TRACE_STACK;
884 718
885 entry = tracing_get_trace_entry(tr, data); 719 memset(&entry->caller, 0, sizeof(entry->caller));
886 tracing_generic_entry_update(entry, 0);
887 entry->type = TRACE_MMIO_MAP;
888 entry->mmiomap = *map;
889 720
890 __raw_spin_unlock(&data->lock); 721 trace.nr_entries = 0;
891 raw_local_irq_restore(irq_flags); 722 trace.max_entries = FTRACE_STACK_ENTRIES;
723 trace.skip = skip;
724 trace.entries = entry->caller;
892 725
893 trace_wake_up(); 726 save_stack_trace(&trace);
727 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
894} 728}
895#endif
896 729
897void __trace_stack(struct trace_array *tr, 730void __trace_stack(struct trace_array *tr,
898 struct trace_array_cpu *data, 731 struct trace_array_cpu *data,
899 unsigned long flags, 732 unsigned long flags,
900 int skip) 733 int skip)
901{ 734{
902 struct trace_entry *entry; 735 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
903 struct stack_trace trace;
904
905 if (!(trace_flags & TRACE_ITER_STACKTRACE))
906 return;
907
908 entry = tracing_get_trace_entry(tr, data);
909 tracing_generic_entry_update(entry, flags);
910 entry->type = TRACE_STACK;
911
912 memset(&entry->stack, 0, sizeof(entry->stack));
913
914 trace.nr_entries = 0;
915 trace.max_entries = FTRACE_STACK_ENTRIES;
916 trace.skip = skip;
917 trace.entries = entry->stack.caller;
918
919 save_stack_trace(&trace);
920} 736}
921 737
922void 738static void
923__trace_special(void *__tr, void *__data, 739ftrace_trace_special(void *__tr, void *__data,
924 unsigned long arg1, unsigned long arg2, unsigned long arg3) 740 unsigned long arg1, unsigned long arg2, unsigned long arg3,
741 int pc)
925{ 742{
743 struct ring_buffer_event *event;
926 struct trace_array_cpu *data = __data; 744 struct trace_array_cpu *data = __data;
927 struct trace_array *tr = __tr; 745 struct trace_array *tr = __tr;
928 struct trace_entry *entry; 746 struct special_entry *entry;
929 unsigned long irq_flags; 747 unsigned long irq_flags;
930 748
931 raw_local_irq_save(irq_flags); 749 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
932 __raw_spin_lock(&data->lock); 750 &irq_flags);
933 entry = tracing_get_trace_entry(tr, data); 751 if (!event)
934 tracing_generic_entry_update(entry, 0); 752 return;
935 entry->type = TRACE_SPECIAL; 753 entry = ring_buffer_event_data(event);
936 entry->special.arg1 = arg1; 754 tracing_generic_entry_update(&entry->ent, 0, pc);
937 entry->special.arg2 = arg2; 755 entry->ent.type = TRACE_SPECIAL;
938 entry->special.arg3 = arg3; 756 entry->arg1 = arg1;
939 __trace_stack(tr, data, irq_flags, 4); 757 entry->arg2 = arg2;
940 __raw_spin_unlock(&data->lock); 758 entry->arg3 = arg3;
941 raw_local_irq_restore(irq_flags); 759 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
760 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
942 761
943 trace_wake_up(); 762 trace_wake_up();
944} 763}
945 764
946void 765void
766__trace_special(void *__tr, void *__data,
767 unsigned long arg1, unsigned long arg2, unsigned long arg3)
768{
769 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
770}
771
772void
947tracing_sched_switch_trace(struct trace_array *tr, 773tracing_sched_switch_trace(struct trace_array *tr,
948 struct trace_array_cpu *data, 774 struct trace_array_cpu *data,
949 struct task_struct *prev, 775 struct task_struct *prev,
950 struct task_struct *next, 776 struct task_struct *next,
951 unsigned long flags) 777 unsigned long flags, int pc)
952{ 778{
953 struct trace_entry *entry; 779 struct ring_buffer_event *event;
780 struct ctx_switch_entry *entry;
954 unsigned long irq_flags; 781 unsigned long irq_flags;
955 782
956 raw_local_irq_save(irq_flags); 783 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
957 __raw_spin_lock(&data->lock); 784 &irq_flags);
958 entry = tracing_get_trace_entry(tr, data); 785 if (!event)
959 tracing_generic_entry_update(entry, flags); 786 return;
960 entry->type = TRACE_CTX; 787 entry = ring_buffer_event_data(event);
961 entry->ctx.prev_pid = prev->pid; 788 tracing_generic_entry_update(&entry->ent, flags, pc);
962 entry->ctx.prev_prio = prev->prio; 789 entry->ent.type = TRACE_CTX;
963 entry->ctx.prev_state = prev->state; 790 entry->prev_pid = prev->pid;
964 entry->ctx.next_pid = next->pid; 791 entry->prev_prio = prev->prio;
965 entry->ctx.next_prio = next->prio; 792 entry->prev_state = prev->state;
966 entry->ctx.next_state = next->state; 793 entry->next_pid = next->pid;
967 __trace_stack(tr, data, flags, 5); 794 entry->next_prio = next->prio;
968 __raw_spin_unlock(&data->lock); 795 entry->next_state = next->state;
969 raw_local_irq_restore(irq_flags); 796 entry->next_cpu = task_cpu(next);
797 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
798 ftrace_trace_stack(tr, data, flags, 5, pc);
970} 799}
971 800
972void 801void
@@ -974,25 +803,28 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
974 struct trace_array_cpu *data, 803 struct trace_array_cpu *data,
975 struct task_struct *wakee, 804 struct task_struct *wakee,
976 struct task_struct *curr, 805 struct task_struct *curr,
977 unsigned long flags) 806 unsigned long flags, int pc)
978{ 807{
979 struct trace_entry *entry; 808 struct ring_buffer_event *event;
809 struct ctx_switch_entry *entry;
980 unsigned long irq_flags; 810 unsigned long irq_flags;
981 811
982 raw_local_irq_save(irq_flags); 812 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
983 __raw_spin_lock(&data->lock); 813 &irq_flags);
984 entry = tracing_get_trace_entry(tr, data); 814 if (!event)
985 tracing_generic_entry_update(entry, flags); 815 return;
986 entry->type = TRACE_WAKE; 816 entry = ring_buffer_event_data(event);
987 entry->ctx.prev_pid = curr->pid; 817 tracing_generic_entry_update(&entry->ent, flags, pc);
988 entry->ctx.prev_prio = curr->prio; 818 entry->ent.type = TRACE_WAKE;
989 entry->ctx.prev_state = curr->state; 819 entry->prev_pid = curr->pid;
990 entry->ctx.next_pid = wakee->pid; 820 entry->prev_prio = curr->prio;
991 entry->ctx.next_prio = wakee->prio; 821 entry->prev_state = curr->state;
992 entry->ctx.next_state = wakee->state; 822 entry->next_pid = wakee->pid;
993 __trace_stack(tr, data, flags, 6); 823 entry->next_prio = wakee->prio;
994 __raw_spin_unlock(&data->lock); 824 entry->next_state = wakee->state;
995 raw_local_irq_restore(irq_flags); 825 entry->next_cpu = task_cpu(wakee);
826 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
827 ftrace_trace_stack(tr, data, flags, 6, pc);
996 828
997 trace_wake_up(); 829 trace_wake_up();
998} 830}
@@ -1002,23 +834,21 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1002{ 834{
1003 struct trace_array *tr = &global_trace; 835 struct trace_array *tr = &global_trace;
1004 struct trace_array_cpu *data; 836 struct trace_array_cpu *data;
1005 unsigned long flags;
1006 long disabled;
1007 int cpu; 837 int cpu;
838 int pc;
1008 839
1009 if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl) 840 if (tracing_disabled || !tr->ctrl)
1010 return; 841 return;
1011 842
1012 local_irq_save(flags); 843 pc = preempt_count();
844 preempt_disable_notrace();
1013 cpu = raw_smp_processor_id(); 845 cpu = raw_smp_processor_id();
1014 data = tr->data[cpu]; 846 data = tr->data[cpu];
1015 disabled = atomic_inc_return(&data->disabled);
1016 847
1017 if (likely(disabled == 1)) 848 if (likely(!atomic_read(&data->disabled)))
1018 __trace_special(tr, data, arg1, arg2, arg3); 849 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
1019 850
1020 atomic_dec(&data->disabled); 851 preempt_enable_notrace();
1021 local_irq_restore(flags);
1022} 852}
1023 853
1024#ifdef CONFIG_FTRACE 854#ifdef CONFIG_FTRACE
@@ -1029,7 +859,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1029 struct trace_array_cpu *data; 859 struct trace_array_cpu *data;
1030 unsigned long flags; 860 unsigned long flags;
1031 long disabled; 861 long disabled;
1032 int cpu; 862 int cpu, resched;
863 int pc;
1033 864
1034 if (unlikely(!ftrace_function_enabled)) 865 if (unlikely(!ftrace_function_enabled))
1035 return; 866 return;
@@ -1037,16 +868,22 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1037 if (skip_trace(ip)) 868 if (skip_trace(ip))
1038 return; 869 return;
1039 870
1040 local_irq_save(flags); 871 pc = preempt_count();
872 resched = need_resched();
873 preempt_disable_notrace();
874 local_save_flags(flags);
1041 cpu = raw_smp_processor_id(); 875 cpu = raw_smp_processor_id();
1042 data = tr->data[cpu]; 876 data = tr->data[cpu];
1043 disabled = atomic_inc_return(&data->disabled); 877 disabled = atomic_inc_return(&data->disabled);
1044 878
1045 if (likely(disabled == 1)) 879 if (likely(disabled == 1))
1046 trace_function(tr, data, ip, parent_ip, flags); 880 trace_function(tr, data, ip, parent_ip, flags, pc);
1047 881
1048 atomic_dec(&data->disabled); 882 atomic_dec(&data->disabled);
1049 local_irq_restore(flags); 883 if (resched)
884 preempt_enable_no_resched_notrace();
885 else
886 preempt_enable_notrace();
1050} 887}
1051 888
1052static struct ftrace_ops trace_ops __read_mostly = 889static struct ftrace_ops trace_ops __read_mostly =
@@ -1073,111 +910,96 @@ enum trace_file_type {
1073 TRACE_FILE_LAT_FMT = 1, 910 TRACE_FILE_LAT_FMT = 1,
1074}; 911};
1075 912
1076static struct trace_entry * 913static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
1077trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
1078 struct trace_iterator *iter, int cpu)
1079{ 914{
1080 struct page *page; 915 /* Don't allow ftrace to trace into the ring buffers */
1081 struct trace_entry *array; 916 ftrace_disable_cpu();
1082 917
1083 if (iter->next_idx[cpu] >= tr->entries || 918 iter->idx++;
1084 iter->next_idx[cpu] >= data->trace_idx || 919 if (iter->buffer_iter[iter->cpu])
1085 (data->trace_head == data->trace_tail && 920 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1086 data->trace_head_idx == data->trace_tail_idx))
1087 return NULL;
1088 921
1089 if (!iter->next_page[cpu]) { 922 ftrace_enable_cpu();
1090 /* Initialize the iterator for this cpu trace buffer */ 923}
1091 WARN_ON(!data->trace_tail); 924
1092 page = virt_to_page(data->trace_tail); 925static struct trace_entry *
1093 iter->next_page[cpu] = &page->lru; 926peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1094 iter->next_page_idx[cpu] = data->trace_tail_idx; 927{
1095 } 928 struct ring_buffer_event *event;
929 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1096 930
1097 page = list_entry(iter->next_page[cpu], struct page, lru); 931 /* Don't allow ftrace to trace into the ring buffers */
1098 BUG_ON(&data->trace_pages == &page->lru); 932 ftrace_disable_cpu();
933
934 if (buf_iter)
935 event = ring_buffer_iter_peek(buf_iter, ts);
936 else
937 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1099 938
1100 array = page_address(page); 939 ftrace_enable_cpu();
1101 940
1102 WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE); 941 return event ? ring_buffer_event_data(event) : NULL;
1103 return &array[iter->next_page_idx[cpu]];
1104} 942}
1105 943
1106static struct trace_entry * 944static struct trace_entry *
1107find_next_entry(struct trace_iterator *iter, int *ent_cpu) 945__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1108{ 946{
1109 struct trace_array *tr = iter->tr; 947 struct ring_buffer *buffer = iter->tr->buffer;
1110 struct trace_entry *ent, *next = NULL; 948 struct trace_entry *ent, *next = NULL;
949 u64 next_ts = 0, ts;
1111 int next_cpu = -1; 950 int next_cpu = -1;
1112 int cpu; 951 int cpu;
1113 952
1114 for_each_tracing_cpu(cpu) { 953 for_each_tracing_cpu(cpu) {
1115 if (!head_page(tr->data[cpu])) 954
955 if (ring_buffer_empty_cpu(buffer, cpu))
1116 continue; 956 continue;
1117 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); 957
958 ent = peek_next_entry(iter, cpu, &ts);
959
1118 /* 960 /*
1119 * Pick the entry with the smallest timestamp: 961 * Pick the entry with the smallest timestamp:
1120 */ 962 */
1121 if (ent && (!next || ent->t < next->t)) { 963 if (ent && (!next || ts < next_ts)) {
1122 next = ent; 964 next = ent;
1123 next_cpu = cpu; 965 next_cpu = cpu;
966 next_ts = ts;
1124 } 967 }
1125 } 968 }
1126 969
1127 if (ent_cpu) 970 if (ent_cpu)
1128 *ent_cpu = next_cpu; 971 *ent_cpu = next_cpu;
1129 972
973 if (ent_ts)
974 *ent_ts = next_ts;
975
1130 return next; 976 return next;
1131} 977}
1132 978
1133static void trace_iterator_increment(struct trace_iterator *iter) 979/* Find the next real entry, without updating the iterator itself */
980static struct trace_entry *
981find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1134{ 982{
1135 iter->idx++; 983 return __find_next_entry(iter, ent_cpu, ent_ts);
1136 iter->next_idx[iter->cpu]++;
1137 iter->next_page_idx[iter->cpu]++;
1138
1139 if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
1140 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
1141
1142 iter->next_page_idx[iter->cpu] = 0;
1143 iter->next_page[iter->cpu] =
1144 trace_next_list(data, iter->next_page[iter->cpu]);
1145 }
1146} 984}
1147 985
1148static void trace_consume(struct trace_iterator *iter) 986/* Find the next real entry, and increment the iterator to the next entry */
987static void *find_next_entry_inc(struct trace_iterator *iter)
1149{ 988{
1150 struct trace_array_cpu *data = iter->tr->data[iter->cpu]; 989 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1151 990
1152 data->trace_tail_idx++; 991 if (iter->ent)
1153 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { 992 trace_iterator_increment(iter, iter->cpu);
1154 data->trace_tail = trace_next_page(data, data->trace_tail);
1155 data->trace_tail_idx = 0;
1156 }
1157 993
1158 /* Check if we empty it, then reset the index */ 994 return iter->ent ? iter : NULL;
1159 if (data->trace_head == data->trace_tail &&
1160 data->trace_head_idx == data->trace_tail_idx)
1161 data->trace_idx = 0;
1162} 995}
1163 996
1164static void *find_next_entry_inc(struct trace_iterator *iter) 997static void trace_consume(struct trace_iterator *iter)
1165{ 998{
1166 struct trace_entry *next; 999 /* Don't allow ftrace to trace into the ring buffers */
1167 int next_cpu = -1; 1000 ftrace_disable_cpu();
1168 1001 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1169 next = find_next_entry(iter, &next_cpu); 1002 ftrace_enable_cpu();
1170
1171 iter->prev_ent = iter->ent;
1172 iter->prev_cpu = iter->cpu;
1173
1174 iter->ent = next;
1175 iter->cpu = next_cpu;
1176
1177 if (next)
1178 trace_iterator_increment(iter);
1179
1180 return next ? iter : NULL;
1181} 1003}
1182 1004
1183static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1005static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@@ -1210,7 +1032,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1210 struct trace_iterator *iter = m->private; 1032 struct trace_iterator *iter = m->private;
1211 void *p = NULL; 1033 void *p = NULL;
1212 loff_t l = 0; 1034 loff_t l = 0;
1213 int i; 1035 int cpu;
1214 1036
1215 mutex_lock(&trace_types_lock); 1037 mutex_lock(&trace_types_lock);
1216 1038
@@ -1229,14 +1051,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1229 iter->ent = NULL; 1051 iter->ent = NULL;
1230 iter->cpu = 0; 1052 iter->cpu = 0;
1231 iter->idx = -1; 1053 iter->idx = -1;
1232 iter->prev_ent = NULL;
1233 iter->prev_cpu = -1;
1234 1054
1235 for_each_tracing_cpu(i) { 1055 ftrace_disable_cpu();
1236 iter->next_idx[i] = 0; 1056
1237 iter->next_page[i] = NULL; 1057 for_each_tracing_cpu(cpu) {
1058 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
1238 } 1059 }
1239 1060
1061 ftrace_enable_cpu();
1062
1240 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1063 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1241 ; 1064 ;
1242 1065
@@ -1330,21 +1153,21 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1330 1153
1331static void print_lat_help_header(struct seq_file *m) 1154static void print_lat_help_header(struct seq_file *m)
1332{ 1155{
1333 seq_puts(m, "# _------=> CPU# \n"); 1156 seq_puts(m, "# _------=> CPU# \n");
1334 seq_puts(m, "# / _-----=> irqs-off \n"); 1157 seq_puts(m, "# / _-----=> irqs-off \n");
1335 seq_puts(m, "# | / _----=> need-resched \n"); 1158 seq_puts(m, "# | / _----=> need-resched \n");
1336 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1159 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1337 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1160 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1338 seq_puts(m, "# |||| / \n"); 1161 seq_puts(m, "# |||| / \n");
1339 seq_puts(m, "# ||||| delay \n"); 1162 seq_puts(m, "# ||||| delay \n");
1340 seq_puts(m, "# cmd pid ||||| time | caller \n"); 1163 seq_puts(m, "# cmd pid ||||| time | caller \n");
1341 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1164 seq_puts(m, "# \\ / ||||| \\ | / \n");
1342} 1165}
1343 1166
1344static void print_func_help_header(struct seq_file *m) 1167static void print_func_help_header(struct seq_file *m)
1345{ 1168{
1346 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 1169 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1347 seq_puts(m, "# | | | | |\n"); 1170 seq_puts(m, "# | | | | |\n");
1348} 1171}
1349 1172
1350 1173
@@ -1355,23 +1178,16 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1355 struct trace_array *tr = iter->tr; 1178 struct trace_array *tr = iter->tr;
1356 struct trace_array_cpu *data = tr->data[tr->cpu]; 1179 struct trace_array_cpu *data = tr->data[tr->cpu];
1357 struct tracer *type = current_trace; 1180 struct tracer *type = current_trace;
1358 unsigned long total = 0; 1181 unsigned long total;
1359 unsigned long entries = 0; 1182 unsigned long entries;
1360 int cpu;
1361 const char *name = "preemption"; 1183 const char *name = "preemption";
1362 1184
1363 if (type) 1185 if (type)
1364 name = type->name; 1186 name = type->name;
1365 1187
1366 for_each_tracing_cpu(cpu) { 1188 entries = ring_buffer_entries(iter->tr->buffer);
1367 if (head_page(tr->data[cpu])) { 1189 total = entries +
1368 total += tr->data[cpu]->trace_idx; 1190 ring_buffer_overruns(iter->tr->buffer);
1369 if (tr->data[cpu]->trace_idx > tr->entries)
1370 entries += tr->entries;
1371 else
1372 entries += tr->data[cpu]->trace_idx;
1373 }
1374 }
1375 1191
1376 seq_printf(m, "%s latency trace v1.1.5 on %s\n", 1192 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1377 name, UTS_RELEASE); 1193 name, UTS_RELEASE);
@@ -1428,7 +1244,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1428 comm = trace_find_cmdline(entry->pid); 1244 comm = trace_find_cmdline(entry->pid);
1429 1245
1430 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); 1246 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1431 trace_seq_printf(s, "%d", cpu); 1247 trace_seq_printf(s, "%3d", cpu);
1432 trace_seq_printf(s, "%c%c", 1248 trace_seq_printf(s, "%c%c",
1433 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', 1249 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1434 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); 1250 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
@@ -1457,7 +1273,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1457unsigned long preempt_mark_thresh = 100; 1273unsigned long preempt_mark_thresh = 100;
1458 1274
1459static void 1275static void
1460lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, 1276lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1461 unsigned long rel_usecs) 1277 unsigned long rel_usecs)
1462{ 1278{
1463 trace_seq_printf(s, " %4lldus", abs_usecs); 1279 trace_seq_printf(s, " %4lldus", abs_usecs);
@@ -1471,34 +1287,76 @@ lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1471 1287
1472static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1288static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1473 1289
1474static int 1290/*
1291 * The message is supposed to contain an ending newline.
1292 * If the printing stops prematurely, try to add a newline of our own.
1293 */
1294void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1295{
1296 struct trace_entry *ent;
1297 struct trace_field_cont *cont;
1298 bool ok = true;
1299
1300 ent = peek_next_entry(iter, iter->cpu, NULL);
1301 if (!ent || ent->type != TRACE_CONT) {
1302 trace_seq_putc(s, '\n');
1303 return;
1304 }
1305
1306 do {
1307 cont = (struct trace_field_cont *)ent;
1308 if (ok)
1309 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
1310
1311 ftrace_disable_cpu();
1312
1313 if (iter->buffer_iter[iter->cpu])
1314 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1315 else
1316 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1317
1318 ftrace_enable_cpu();
1319
1320 ent = peek_next_entry(iter, iter->cpu, NULL);
1321 } while (ent && ent->type == TRACE_CONT);
1322
1323 if (!ok)
1324 trace_seq_putc(s, '\n');
1325}
1326
1327static enum print_line_t
1475print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1328print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1476{ 1329{
1477 struct trace_seq *s = &iter->seq; 1330 struct trace_seq *s = &iter->seq;
1478 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1331 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1479 struct trace_entry *next_entry = find_next_entry(iter, NULL); 1332 struct trace_entry *next_entry;
1480 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 1333 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1481 struct trace_entry *entry = iter->ent; 1334 struct trace_entry *entry = iter->ent;
1482 unsigned long abs_usecs; 1335 unsigned long abs_usecs;
1483 unsigned long rel_usecs; 1336 unsigned long rel_usecs;
1337 u64 next_ts;
1484 char *comm; 1338 char *comm;
1485 int S, T; 1339 int S, T;
1486 int i; 1340 int i;
1487 unsigned state; 1341 unsigned state;
1488 1342
1343 if (entry->type == TRACE_CONT)
1344 return TRACE_TYPE_HANDLED;
1345
1346 next_entry = find_next_entry(iter, NULL, &next_ts);
1489 if (!next_entry) 1347 if (!next_entry)
1490 next_entry = entry; 1348 next_ts = iter->ts;
1491 rel_usecs = ns2usecs(next_entry->t - entry->t); 1349 rel_usecs = ns2usecs(next_ts - iter->ts);
1492 abs_usecs = ns2usecs(entry->t - iter->tr->time_start); 1350 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
1493 1351
1494 if (verbose) { 1352 if (verbose) {
1495 comm = trace_find_cmdline(entry->pid); 1353 comm = trace_find_cmdline(entry->pid);
1496 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]" 1354 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1497 " %ld.%03ldms (+%ld.%03ldms): ", 1355 " %ld.%03ldms (+%ld.%03ldms): ",
1498 comm, 1356 comm,
1499 entry->pid, cpu, entry->flags, 1357 entry->pid, cpu, entry->flags,
1500 entry->preempt_count, trace_idx, 1358 entry->preempt_count, trace_idx,
1501 ns2usecs(entry->t), 1359 ns2usecs(iter->ts),
1502 abs_usecs/1000, 1360 abs_usecs/1000,
1503 abs_usecs % 1000, rel_usecs/1000, 1361 abs_usecs % 1000, rel_usecs/1000,
1504 rel_usecs % 1000); 1362 rel_usecs % 1000);
@@ -1507,52 +1365,85 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1507 lat_print_timestamp(s, abs_usecs, rel_usecs); 1365 lat_print_timestamp(s, abs_usecs, rel_usecs);
1508 } 1366 }
1509 switch (entry->type) { 1367 switch (entry->type) {
1510 case TRACE_FN: 1368 case TRACE_FN: {
1511 seq_print_ip_sym(s, entry->fn.ip, sym_flags); 1369 struct ftrace_entry *field;
1370
1371 trace_assign_type(field, entry);
1372
1373 seq_print_ip_sym(s, field->ip, sym_flags);
1512 trace_seq_puts(s, " ("); 1374 trace_seq_puts(s, " (");
1513 if (kretprobed(entry->fn.parent_ip)) 1375 if (kretprobed(field->parent_ip))
1514 trace_seq_puts(s, KRETPROBE_MSG); 1376 trace_seq_puts(s, KRETPROBE_MSG);
1515 else 1377 else
1516 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags); 1378 seq_print_ip_sym(s, field->parent_ip, sym_flags);
1517 trace_seq_puts(s, ")\n"); 1379 trace_seq_puts(s, ")\n");
1518 break; 1380 break;
1381 }
1519 case TRACE_CTX: 1382 case TRACE_CTX:
1520 case TRACE_WAKE: 1383 case TRACE_WAKE: {
1521 T = entry->ctx.next_state < sizeof(state_to_char) ? 1384 struct ctx_switch_entry *field;
1522 state_to_char[entry->ctx.next_state] : 'X'; 1385
1386 trace_assign_type(field, entry);
1387
1388 T = field->next_state < sizeof(state_to_char) ?
1389 state_to_char[field->next_state] : 'X';
1523 1390
1524 state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0; 1391 state = field->prev_state ?
1392 __ffs(field->prev_state) + 1 : 0;
1525 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; 1393 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1526 comm = trace_find_cmdline(entry->ctx.next_pid); 1394 comm = trace_find_cmdline(field->next_pid);
1527 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n", 1395 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1528 entry->ctx.prev_pid, 1396 field->prev_pid,
1529 entry->ctx.prev_prio, 1397 field->prev_prio,
1530 S, entry->type == TRACE_CTX ? "==>" : " +", 1398 S, entry->type == TRACE_CTX ? "==>" : " +",
1531 entry->ctx.next_pid, 1399 field->next_cpu,
1532 entry->ctx.next_prio, 1400 field->next_pid,
1401 field->next_prio,
1533 T, comm); 1402 T, comm);
1534 break; 1403 break;
1535 case TRACE_SPECIAL: 1404 }
1405 case TRACE_SPECIAL: {
1406 struct special_entry *field;
1407
1408 trace_assign_type(field, entry);
1409
1536 trace_seq_printf(s, "# %ld %ld %ld\n", 1410 trace_seq_printf(s, "# %ld %ld %ld\n",
1537 entry->special.arg1, 1411 field->arg1,
1538 entry->special.arg2, 1412 field->arg2,
1539 entry->special.arg3); 1413 field->arg3);
1540 break; 1414 break;
1541 case TRACE_STACK: 1415 }
1416 case TRACE_STACK: {
1417 struct stack_entry *field;
1418
1419 trace_assign_type(field, entry);
1420
1542 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1421 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1543 if (i) 1422 if (i)
1544 trace_seq_puts(s, " <= "); 1423 trace_seq_puts(s, " <= ");
1545 seq_print_ip_sym(s, entry->stack.caller[i], sym_flags); 1424 seq_print_ip_sym(s, field->caller[i], sym_flags);
1546 } 1425 }
1547 trace_seq_puts(s, "\n"); 1426 trace_seq_puts(s, "\n");
1548 break; 1427 break;
1428 }
1429 case TRACE_PRINT: {
1430 struct print_entry *field;
1431
1432 trace_assign_type(field, entry);
1433
1434 seq_print_ip_sym(s, field->ip, sym_flags);
1435 trace_seq_printf(s, ": %s", field->buf);
1436 if (entry->flags & TRACE_FLAG_CONT)
1437 trace_seq_print_cont(s, iter);
1438 break;
1439 }
1549 default: 1440 default:
1550 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1441 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1551 } 1442 }
1552 return 1; 1443 return TRACE_TYPE_HANDLED;
1553} 1444}
1554 1445
1555static int print_trace_fmt(struct trace_iterator *iter) 1446static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1556{ 1447{
1557 struct trace_seq *s = &iter->seq; 1448 struct trace_seq *s = &iter->seq;
1558 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1449 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1567,90 +1458,126 @@ static int print_trace_fmt(struct trace_iterator *iter)
1567 1458
1568 entry = iter->ent; 1459 entry = iter->ent;
1569 1460
1461 if (entry->type == TRACE_CONT)
1462 return TRACE_TYPE_HANDLED;
1463
1570 comm = trace_find_cmdline(iter->ent->pid); 1464 comm = trace_find_cmdline(iter->ent->pid);
1571 1465
1572 t = ns2usecs(entry->t); 1466 t = ns2usecs(iter->ts);
1573 usec_rem = do_div(t, 1000000ULL); 1467 usec_rem = do_div(t, 1000000ULL);
1574 secs = (unsigned long)t; 1468 secs = (unsigned long)t;
1575 1469
1576 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); 1470 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1577 if (!ret) 1471 if (!ret)
1578 return 0; 1472 return TRACE_TYPE_PARTIAL_LINE;
1579 ret = trace_seq_printf(s, "[%02d] ", iter->cpu); 1473 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
1580 if (!ret) 1474 if (!ret)
1581 return 0; 1475 return TRACE_TYPE_PARTIAL_LINE;
1582 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); 1476 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1583 if (!ret) 1477 if (!ret)
1584 return 0; 1478 return TRACE_TYPE_PARTIAL_LINE;
1585 1479
1586 switch (entry->type) { 1480 switch (entry->type) {
1587 case TRACE_FN: 1481 case TRACE_FN: {
1588 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags); 1482 struct ftrace_entry *field;
1483
1484 trace_assign_type(field, entry);
1485
1486 ret = seq_print_ip_sym(s, field->ip, sym_flags);
1589 if (!ret) 1487 if (!ret)
1590 return 0; 1488 return TRACE_TYPE_PARTIAL_LINE;
1591 if ((sym_flags & TRACE_ITER_PRINT_PARENT) && 1489 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1592 entry->fn.parent_ip) { 1490 field->parent_ip) {
1593 ret = trace_seq_printf(s, " <-"); 1491 ret = trace_seq_printf(s, " <-");
1594 if (!ret) 1492 if (!ret)
1595 return 0; 1493 return TRACE_TYPE_PARTIAL_LINE;
1596 if (kretprobed(entry->fn.parent_ip)) 1494 if (kretprobed(field->parent_ip))
1597 ret = trace_seq_puts(s, KRETPROBE_MSG); 1495 ret = trace_seq_puts(s, KRETPROBE_MSG);
1598 else 1496 else
1599 ret = seq_print_ip_sym(s, entry->fn.parent_ip, 1497 ret = seq_print_ip_sym(s,
1498 field->parent_ip,
1600 sym_flags); 1499 sym_flags);
1601 if (!ret) 1500 if (!ret)
1602 return 0; 1501 return TRACE_TYPE_PARTIAL_LINE;
1603 } 1502 }
1604 ret = trace_seq_printf(s, "\n"); 1503 ret = trace_seq_printf(s, "\n");
1605 if (!ret) 1504 if (!ret)
1606 return 0; 1505 return TRACE_TYPE_PARTIAL_LINE;
1607 break; 1506 break;
1507 }
1608 case TRACE_CTX: 1508 case TRACE_CTX:
1609 case TRACE_WAKE: 1509 case TRACE_WAKE: {
1610 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1510 struct ctx_switch_entry *field;
1611 state_to_char[entry->ctx.prev_state] : 'X'; 1511
1612 T = entry->ctx.next_state < sizeof(state_to_char) ? 1512 trace_assign_type(field, entry);
1613 state_to_char[entry->ctx.next_state] : 'X'; 1513
1614 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n", 1514 S = field->prev_state < sizeof(state_to_char) ?
1615 entry->ctx.prev_pid, 1515 state_to_char[field->prev_state] : 'X';
1616 entry->ctx.prev_prio, 1516 T = field->next_state < sizeof(state_to_char) ?
1517 state_to_char[field->next_state] : 'X';
1518 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1519 field->prev_pid,
1520 field->prev_prio,
1617 S, 1521 S,
1618 entry->type == TRACE_CTX ? "==>" : " +", 1522 entry->type == TRACE_CTX ? "==>" : " +",
1619 entry->ctx.next_pid, 1523 field->next_cpu,
1620 entry->ctx.next_prio, 1524 field->next_pid,
1525 field->next_prio,
1621 T); 1526 T);
1622 if (!ret) 1527 if (!ret)
1623 return 0; 1528 return TRACE_TYPE_PARTIAL_LINE;
1624 break; 1529 break;
1625 case TRACE_SPECIAL: 1530 }
1531 case TRACE_SPECIAL: {
1532 struct special_entry *field;
1533
1534 trace_assign_type(field, entry);
1535
1626 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1536 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1627 entry->special.arg1, 1537 field->arg1,
1628 entry->special.arg2, 1538 field->arg2,
1629 entry->special.arg3); 1539 field->arg3);
1630 if (!ret) 1540 if (!ret)
1631 return 0; 1541 return TRACE_TYPE_PARTIAL_LINE;
1632 break; 1542 break;
1633 case TRACE_STACK: 1543 }
1544 case TRACE_STACK: {
1545 struct stack_entry *field;
1546
1547 trace_assign_type(field, entry);
1548
1634 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1549 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1635 if (i) { 1550 if (i) {
1636 ret = trace_seq_puts(s, " <= "); 1551 ret = trace_seq_puts(s, " <= ");
1637 if (!ret) 1552 if (!ret)
1638 return 0; 1553 return TRACE_TYPE_PARTIAL_LINE;
1639 } 1554 }
1640 ret = seq_print_ip_sym(s, entry->stack.caller[i], 1555 ret = seq_print_ip_sym(s, field->caller[i],
1641 sym_flags); 1556 sym_flags);
1642 if (!ret) 1557 if (!ret)
1643 return 0; 1558 return TRACE_TYPE_PARTIAL_LINE;
1644 } 1559 }
1645 ret = trace_seq_puts(s, "\n"); 1560 ret = trace_seq_puts(s, "\n");
1646 if (!ret) 1561 if (!ret)
1647 return 0; 1562 return TRACE_TYPE_PARTIAL_LINE;
1648 break; 1563 break;
1649 } 1564 }
1650 return 1; 1565 case TRACE_PRINT: {
1566 struct print_entry *field;
1567
1568 trace_assign_type(field, entry);
1569
1570 seq_print_ip_sym(s, field->ip, sym_flags);
1571 trace_seq_printf(s, ": %s", field->buf);
1572 if (entry->flags & TRACE_FLAG_CONT)
1573 trace_seq_print_cont(s, iter);
1574 break;
1575 }
1576 }
1577 return TRACE_TYPE_HANDLED;
1651} 1578}
1652 1579
1653static int print_raw_fmt(struct trace_iterator *iter) 1580static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1654{ 1581{
1655 struct trace_seq *s = &iter->seq; 1582 struct trace_seq *s = &iter->seq;
1656 struct trace_entry *entry; 1583 struct trace_entry *entry;
@@ -1659,47 +1586,77 @@ static int print_raw_fmt(struct trace_iterator *iter)
1659 1586
1660 entry = iter->ent; 1587 entry = iter->ent;
1661 1588
1589 if (entry->type == TRACE_CONT)
1590 return TRACE_TYPE_HANDLED;
1591
1662 ret = trace_seq_printf(s, "%d %d %llu ", 1592 ret = trace_seq_printf(s, "%d %d %llu ",
1663 entry->pid, iter->cpu, entry->t); 1593 entry->pid, iter->cpu, iter->ts);
1664 if (!ret) 1594 if (!ret)
1665 return 0; 1595 return TRACE_TYPE_PARTIAL_LINE;
1666 1596
1667 switch (entry->type) { 1597 switch (entry->type) {
1668 case TRACE_FN: 1598 case TRACE_FN: {
1599 struct ftrace_entry *field;
1600
1601 trace_assign_type(field, entry);
1602
1669 ret = trace_seq_printf(s, "%x %x\n", 1603 ret = trace_seq_printf(s, "%x %x\n",
1670 entry->fn.ip, entry->fn.parent_ip); 1604 field->ip,
1605 field->parent_ip);
1671 if (!ret) 1606 if (!ret)
1672 return 0; 1607 return TRACE_TYPE_PARTIAL_LINE;
1673 break; 1608 break;
1609 }
1674 case TRACE_CTX: 1610 case TRACE_CTX:
1675 case TRACE_WAKE: 1611 case TRACE_WAKE: {
1676 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1612 struct ctx_switch_entry *field;
1677 state_to_char[entry->ctx.prev_state] : 'X'; 1613
1678 T = entry->ctx.next_state < sizeof(state_to_char) ? 1614 trace_assign_type(field, entry);
1679 state_to_char[entry->ctx.next_state] : 'X'; 1615
1616 S = field->prev_state < sizeof(state_to_char) ?
1617 state_to_char[field->prev_state] : 'X';
1618 T = field->next_state < sizeof(state_to_char) ?
1619 state_to_char[field->next_state] : 'X';
1680 if (entry->type == TRACE_WAKE) 1620 if (entry->type == TRACE_WAKE)
1681 S = '+'; 1621 S = '+';
1682 ret = trace_seq_printf(s, "%d %d %c %d %d %c\n", 1622 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1683 entry->ctx.prev_pid, 1623 field->prev_pid,
1684 entry->ctx.prev_prio, 1624 field->prev_prio,
1685 S, 1625 S,
1686 entry->ctx.next_pid, 1626 field->next_cpu,
1687 entry->ctx.next_prio, 1627 field->next_pid,
1628 field->next_prio,
1688 T); 1629 T);
1689 if (!ret) 1630 if (!ret)
1690 return 0; 1631 return TRACE_TYPE_PARTIAL_LINE;
1691 break; 1632 break;
1633 }
1692 case TRACE_SPECIAL: 1634 case TRACE_SPECIAL:
1693 case TRACE_STACK: 1635 case TRACE_STACK: {
1636 struct special_entry *field;
1637
1638 trace_assign_type(field, entry);
1639
1694 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1640 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1695 entry->special.arg1, 1641 field->arg1,
1696 entry->special.arg2, 1642 field->arg2,
1697 entry->special.arg3); 1643 field->arg3);
1698 if (!ret) 1644 if (!ret)
1699 return 0; 1645 return TRACE_TYPE_PARTIAL_LINE;
1700 break; 1646 break;
1701 } 1647 }
1702 return 1; 1648 case TRACE_PRINT: {
1649 struct print_entry *field;
1650
1651 trace_assign_type(field, entry);
1652
1653 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
1654 if (entry->flags & TRACE_FLAG_CONT)
1655 trace_seq_print_cont(s, iter);
1656 break;
1657 }
1658 }
1659 return TRACE_TYPE_HANDLED;
1703} 1660}
1704 1661
1705#define SEQ_PUT_FIELD_RET(s, x) \ 1662#define SEQ_PUT_FIELD_RET(s, x) \
@@ -1710,11 +1667,12 @@ do { \
1710 1667
1711#define SEQ_PUT_HEX_FIELD_RET(s, x) \ 1668#define SEQ_PUT_HEX_FIELD_RET(s, x) \
1712do { \ 1669do { \
1670 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
1713 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ 1671 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
1714 return 0; \ 1672 return 0; \
1715} while (0) 1673} while (0)
1716 1674
1717static int print_hex_fmt(struct trace_iterator *iter) 1675static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1718{ 1676{
1719 struct trace_seq *s = &iter->seq; 1677 struct trace_seq *s = &iter->seq;
1720 unsigned char newline = '\n'; 1678 unsigned char newline = '\n';
@@ -1723,97 +1681,139 @@ static int print_hex_fmt(struct trace_iterator *iter)
1723 1681
1724 entry = iter->ent; 1682 entry = iter->ent;
1725 1683
1684 if (entry->type == TRACE_CONT)
1685 return TRACE_TYPE_HANDLED;
1686
1726 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 1687 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1727 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 1688 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1728 SEQ_PUT_HEX_FIELD_RET(s, entry->t); 1689 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1729 1690
1730 switch (entry->type) { 1691 switch (entry->type) {
1731 case TRACE_FN: 1692 case TRACE_FN: {
1732 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip); 1693 struct ftrace_entry *field;
1733 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1694
1695 trace_assign_type(field, entry);
1696
1697 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1698 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
1734 break; 1699 break;
1700 }
1735 case TRACE_CTX: 1701 case TRACE_CTX:
1736 case TRACE_WAKE: 1702 case TRACE_WAKE: {
1737 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1703 struct ctx_switch_entry *field;
1738 state_to_char[entry->ctx.prev_state] : 'X'; 1704
1739 T = entry->ctx.next_state < sizeof(state_to_char) ? 1705 trace_assign_type(field, entry);
1740 state_to_char[entry->ctx.next_state] : 'X'; 1706
1707 S = field->prev_state < sizeof(state_to_char) ?
1708 state_to_char[field->prev_state] : 'X';
1709 T = field->next_state < sizeof(state_to_char) ?
1710 state_to_char[field->next_state] : 'X';
1741 if (entry->type == TRACE_WAKE) 1711 if (entry->type == TRACE_WAKE)
1742 S = '+'; 1712 S = '+';
1743 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid); 1713 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1744 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio); 1714 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1745 SEQ_PUT_HEX_FIELD_RET(s, S); 1715 SEQ_PUT_HEX_FIELD_RET(s, S);
1746 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid); 1716 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1747 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio); 1717 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1748 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1718 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
1749 SEQ_PUT_HEX_FIELD_RET(s, T); 1719 SEQ_PUT_HEX_FIELD_RET(s, T);
1750 break; 1720 break;
1721 }
1751 case TRACE_SPECIAL: 1722 case TRACE_SPECIAL:
1752 case TRACE_STACK: 1723 case TRACE_STACK: {
1753 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1); 1724 struct special_entry *field;
1754 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2); 1725
1755 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3); 1726 trace_assign_type(field, entry);
1727
1728 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1729 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1730 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1756 break; 1731 break;
1757 } 1732 }
1733 }
1758 SEQ_PUT_FIELD_RET(s, newline); 1734 SEQ_PUT_FIELD_RET(s, newline);
1759 1735
1760 return 1; 1736 return TRACE_TYPE_HANDLED;
1761} 1737}
1762 1738
1763static int print_bin_fmt(struct trace_iterator *iter) 1739static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1764{ 1740{
1765 struct trace_seq *s = &iter->seq; 1741 struct trace_seq *s = &iter->seq;
1766 struct trace_entry *entry; 1742 struct trace_entry *entry;
1767 1743
1768 entry = iter->ent; 1744 entry = iter->ent;
1769 1745
1746 if (entry->type == TRACE_CONT)
1747 return TRACE_TYPE_HANDLED;
1748
1770 SEQ_PUT_FIELD_RET(s, entry->pid); 1749 SEQ_PUT_FIELD_RET(s, entry->pid);
1771 SEQ_PUT_FIELD_RET(s, entry->cpu); 1750 SEQ_PUT_FIELD_RET(s, iter->cpu);
1772 SEQ_PUT_FIELD_RET(s, entry->t); 1751 SEQ_PUT_FIELD_RET(s, iter->ts);
1773 1752
1774 switch (entry->type) { 1753 switch (entry->type) {
1775 case TRACE_FN: 1754 case TRACE_FN: {
1776 SEQ_PUT_FIELD_RET(s, entry->fn.ip); 1755 struct ftrace_entry *field;
1777 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip); 1756
1757 trace_assign_type(field, entry);
1758
1759 SEQ_PUT_FIELD_RET(s, field->ip);
1760 SEQ_PUT_FIELD_RET(s, field->parent_ip);
1778 break; 1761 break;
1779 case TRACE_CTX: 1762 }
1780 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid); 1763 case TRACE_CTX: {
1781 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio); 1764 struct ctx_switch_entry *field;
1782 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state); 1765
1783 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid); 1766 trace_assign_type(field, entry);
1784 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio); 1767
1785 SEQ_PUT_FIELD_RET(s, entry->ctx.next_state); 1768 SEQ_PUT_FIELD_RET(s, field->prev_pid);
1769 SEQ_PUT_FIELD_RET(s, field->prev_prio);
1770 SEQ_PUT_FIELD_RET(s, field->prev_state);
1771 SEQ_PUT_FIELD_RET(s, field->next_pid);
1772 SEQ_PUT_FIELD_RET(s, field->next_prio);
1773 SEQ_PUT_FIELD_RET(s, field->next_state);
1786 break; 1774 break;
1775 }
1787 case TRACE_SPECIAL: 1776 case TRACE_SPECIAL:
1788 case TRACE_STACK: 1777 case TRACE_STACK: {
1789 SEQ_PUT_FIELD_RET(s, entry->special.arg1); 1778 struct special_entry *field;
1790 SEQ_PUT_FIELD_RET(s, entry->special.arg2); 1779
1791 SEQ_PUT_FIELD_RET(s, entry->special.arg3); 1780 trace_assign_type(field, entry);
1781
1782 SEQ_PUT_FIELD_RET(s, field->arg1);
1783 SEQ_PUT_FIELD_RET(s, field->arg2);
1784 SEQ_PUT_FIELD_RET(s, field->arg3);
1792 break; 1785 break;
1793 } 1786 }
1787 }
1794 return 1; 1788 return 1;
1795} 1789}
1796 1790
1797static int trace_empty(struct trace_iterator *iter) 1791static int trace_empty(struct trace_iterator *iter)
1798{ 1792{
1799 struct trace_array_cpu *data;
1800 int cpu; 1793 int cpu;
1801 1794
1802 for_each_tracing_cpu(cpu) { 1795 for_each_tracing_cpu(cpu) {
1803 data = iter->tr->data[cpu]; 1796 if (iter->buffer_iter[cpu]) {
1804 1797 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1805 if (head_page(data) && data->trace_idx && 1798 return 0;
1806 (data->trace_tail != data->trace_head || 1799 } else {
1807 data->trace_tail_idx != data->trace_head_idx)) 1800 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1808 return 0; 1801 return 0;
1802 }
1809 } 1803 }
1804
1810 return 1; 1805 return 1;
1811} 1806}
1812 1807
1813static int print_trace_line(struct trace_iterator *iter) 1808static enum print_line_t print_trace_line(struct trace_iterator *iter)
1814{ 1809{
1815 if (iter->trace && iter->trace->print_line) 1810 enum print_line_t ret;
1816 return iter->trace->print_line(iter); 1811
1812 if (iter->trace && iter->trace->print_line) {
1813 ret = iter->trace->print_line(iter);
1814 if (ret != TRACE_TYPE_UNHANDLED)
1815 return ret;
1816 }
1817 1817
1818 if (trace_flags & TRACE_ITER_BIN) 1818 if (trace_flags & TRACE_ITER_BIN)
1819 return print_bin_fmt(iter); 1819 return print_bin_fmt(iter);
@@ -1869,6 +1869,8 @@ static struct trace_iterator *
1869__tracing_open(struct inode *inode, struct file *file, int *ret) 1869__tracing_open(struct inode *inode, struct file *file, int *ret)
1870{ 1870{
1871 struct trace_iterator *iter; 1871 struct trace_iterator *iter;
1872 struct seq_file *m;
1873 int cpu;
1872 1874
1873 if (tracing_disabled) { 1875 if (tracing_disabled) {
1874 *ret = -ENODEV; 1876 *ret = -ENODEV;
@@ -1889,28 +1891,45 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1889 iter->trace = current_trace; 1891 iter->trace = current_trace;
1890 iter->pos = -1; 1892 iter->pos = -1;
1891 1893
1894 for_each_tracing_cpu(cpu) {
1895
1896 iter->buffer_iter[cpu] =
1897 ring_buffer_read_start(iter->tr->buffer, cpu);
1898
1899 if (!iter->buffer_iter[cpu])
1900 goto fail_buffer;
1901 }
1902
1892 /* TODO stop tracer */ 1903 /* TODO stop tracer */
1893 *ret = seq_open(file, &tracer_seq_ops); 1904 *ret = seq_open(file, &tracer_seq_ops);
1894 if (!*ret) { 1905 if (*ret)
1895 struct seq_file *m = file->private_data; 1906 goto fail_buffer;
1896 m->private = iter;
1897 1907
1898 /* stop the trace while dumping */ 1908 m = file->private_data;
1899 if (iter->tr->ctrl) { 1909 m->private = iter;
1900 tracer_enabled = 0;
1901 ftrace_function_enabled = 0;
1902 }
1903 1910
1904 if (iter->trace && iter->trace->open) 1911 /* stop the trace while dumping */
1905 iter->trace->open(iter); 1912 if (iter->tr->ctrl) {
1906 } else { 1913 tracer_enabled = 0;
1907 kfree(iter); 1914 ftrace_function_enabled = 0;
1908 iter = NULL;
1909 } 1915 }
1916
1917 if (iter->trace && iter->trace->open)
1918 iter->trace->open(iter);
1919
1910 mutex_unlock(&trace_types_lock); 1920 mutex_unlock(&trace_types_lock);
1911 1921
1912 out: 1922 out:
1913 return iter; 1923 return iter;
1924
1925 fail_buffer:
1926 for_each_tracing_cpu(cpu) {
1927 if (iter->buffer_iter[cpu])
1928 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1929 }
1930 mutex_unlock(&trace_types_lock);
1931
1932 return ERR_PTR(-ENOMEM);
1914} 1933}
1915 1934
1916int tracing_open_generic(struct inode *inode, struct file *filp) 1935int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -1926,8 +1945,14 @@ int tracing_release(struct inode *inode, struct file *file)
1926{ 1945{
1927 struct seq_file *m = (struct seq_file *)file->private_data; 1946 struct seq_file *m = (struct seq_file *)file->private_data;
1928 struct trace_iterator *iter = m->private; 1947 struct trace_iterator *iter = m->private;
1948 int cpu;
1929 1949
1930 mutex_lock(&trace_types_lock); 1950 mutex_lock(&trace_types_lock);
1951 for_each_tracing_cpu(cpu) {
1952 if (iter->buffer_iter[cpu])
1953 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1954 }
1955
1931 if (iter->trace && iter->trace->close) 1956 if (iter->trace && iter->trace->close)
1932 iter->trace->close(iter); 1957 iter->trace->close(iter);
1933 1958
@@ -2352,9 +2377,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2352 struct tracer *t; 2377 struct tracer *t;
2353 char buf[max_tracer_type_len+1]; 2378 char buf[max_tracer_type_len+1];
2354 int i; 2379 int i;
2380 size_t ret;
2355 2381
2356 if (cnt > max_tracer_type_len) 2382 if (cnt > max_tracer_type_len)
2357 cnt = max_tracer_type_len; 2383 cnt = max_tracer_type_len;
2384 ret = cnt;
2358 2385
2359 if (copy_from_user(&buf, ubuf, cnt)) 2386 if (copy_from_user(&buf, ubuf, cnt))
2360 return -EFAULT; 2387 return -EFAULT;
@@ -2370,7 +2397,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2370 if (strcmp(t->name, buf) == 0) 2397 if (strcmp(t->name, buf) == 0)
2371 break; 2398 break;
2372 } 2399 }
2373 if (!t || t == current_trace) 2400 if (!t) {
2401 ret = -EINVAL;
2402 goto out;
2403 }
2404 if (t == current_trace)
2374 goto out; 2405 goto out;
2375 2406
2376 if (current_trace && current_trace->reset) 2407 if (current_trace && current_trace->reset)
@@ -2383,9 +2414,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2383 out: 2414 out:
2384 mutex_unlock(&trace_types_lock); 2415 mutex_unlock(&trace_types_lock);
2385 2416
2386 filp->f_pos += cnt; 2417 if (ret == cnt)
2418 filp->f_pos += cnt;
2387 2419
2388 return cnt; 2420 return ret;
2389} 2421}
2390 2422
2391static ssize_t 2423static ssize_t
@@ -2500,20 +2532,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2500 size_t cnt, loff_t *ppos) 2532 size_t cnt, loff_t *ppos)
2501{ 2533{
2502 struct trace_iterator *iter = filp->private_data; 2534 struct trace_iterator *iter = filp->private_data;
2503 struct trace_array_cpu *data;
2504 static cpumask_t mask;
2505 unsigned long flags;
2506#ifdef CONFIG_FTRACE
2507 int ftrace_save;
2508#endif
2509 int cpu;
2510 ssize_t sret; 2535 ssize_t sret;
2511 2536
2512 /* return any leftover data */ 2537 /* return any leftover data */
2513 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2538 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2514 if (sret != -EBUSY) 2539 if (sret != -EBUSY)
2515 return sret; 2540 return sret;
2516 sret = 0;
2517 2541
2518 trace_seq_reset(&iter->seq); 2542 trace_seq_reset(&iter->seq);
2519 2543
@@ -2524,6 +2548,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2524 goto out; 2548 goto out;
2525 } 2549 }
2526 2550
2551waitagain:
2552 sret = 0;
2527 while (trace_empty(iter)) { 2553 while (trace_empty(iter)) {
2528 2554
2529 if ((filp->f_flags & O_NONBLOCK)) { 2555 if ((filp->f_flags & O_NONBLOCK)) {
@@ -2588,46 +2614,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2588 offsetof(struct trace_iterator, seq)); 2614 offsetof(struct trace_iterator, seq));
2589 iter->pos = -1; 2615 iter->pos = -1;
2590 2616
2591 /*
2592 * We need to stop all tracing on all CPUS to read the
2593 * the next buffer. This is a bit expensive, but is
2594 * not done often. We fill all what we can read,
2595 * and then release the locks again.
2596 */
2597
2598 cpus_clear(mask);
2599 local_irq_save(flags);
2600#ifdef CONFIG_FTRACE
2601 ftrace_save = ftrace_enabled;
2602 ftrace_enabled = 0;
2603#endif
2604 smp_wmb();
2605 for_each_tracing_cpu(cpu) {
2606 data = iter->tr->data[cpu];
2607
2608 if (!head_page(data) || !data->trace_idx)
2609 continue;
2610
2611 atomic_inc(&data->disabled);
2612 cpu_set(cpu, mask);
2613 }
2614
2615 for_each_cpu_mask(cpu, mask) {
2616 data = iter->tr->data[cpu];
2617 __raw_spin_lock(&data->lock);
2618
2619 if (data->overrun > iter->last_overrun[cpu])
2620 iter->overrun[cpu] +=
2621 data->overrun - iter->last_overrun[cpu];
2622 iter->last_overrun[cpu] = data->overrun;
2623 }
2624
2625 while (find_next_entry_inc(iter) != NULL) { 2617 while (find_next_entry_inc(iter) != NULL) {
2626 int ret; 2618 enum print_line_t ret;
2627 int len = iter->seq.len; 2619 int len = iter->seq.len;
2628 2620
2629 ret = print_trace_line(iter); 2621 ret = print_trace_line(iter);
2630 if (!ret) { 2622 if (ret == TRACE_TYPE_PARTIAL_LINE) {
2631 /* don't print partial lines */ 2623 /* don't print partial lines */
2632 iter->seq.len = len; 2624 iter->seq.len = len;
2633 break; 2625 break;
@@ -2639,26 +2631,17 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2639 break; 2631 break;
2640 } 2632 }
2641 2633
2642 for_each_cpu_mask(cpu, mask) {
2643 data = iter->tr->data[cpu];
2644 __raw_spin_unlock(&data->lock);
2645 }
2646
2647 for_each_cpu_mask(cpu, mask) {
2648 data = iter->tr->data[cpu];
2649 atomic_dec(&data->disabled);
2650 }
2651#ifdef CONFIG_FTRACE
2652 ftrace_enabled = ftrace_save;
2653#endif
2654 local_irq_restore(flags);
2655
2656 /* Now copy what we have to the user */ 2634 /* Now copy what we have to the user */
2657 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2635 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2658 if (iter->seq.readpos >= iter->seq.len) 2636 if (iter->seq.readpos >= iter->seq.len)
2659 trace_seq_reset(&iter->seq); 2637 trace_seq_reset(&iter->seq);
2638
2639 /*
2640 * If there was nothing to send to user, inspite of consuming trace
2641 * entries, go back to wait for more entries.
2642 */
2660 if (sret == -EBUSY) 2643 if (sret == -EBUSY)
2661 sret = 0; 2644 goto waitagain;
2662 2645
2663out: 2646out:
2664 mutex_unlock(&trace_types_lock); 2647 mutex_unlock(&trace_types_lock);
@@ -2684,7 +2667,8 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2684{ 2667{
2685 unsigned long val; 2668 unsigned long val;
2686 char buf[64]; 2669 char buf[64];
2687 int i, ret; 2670 int ret;
2671 struct trace_array *tr = filp->private_data;
2688 2672
2689 if (cnt >= sizeof(buf)) 2673 if (cnt >= sizeof(buf))
2690 return -EINVAL; 2674 return -EINVAL;
@@ -2704,59 +2688,38 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2704 2688
2705 mutex_lock(&trace_types_lock); 2689 mutex_lock(&trace_types_lock);
2706 2690
2707 if (current_trace != &no_tracer) { 2691 if (tr->ctrl) {
2708 cnt = -EBUSY; 2692 cnt = -EBUSY;
2709 pr_info("ftrace: set current_tracer to none" 2693 pr_info("ftrace: please disable tracing"
2710 " before modifying buffer size\n"); 2694 " before modifying buffer size\n");
2711 goto out; 2695 goto out;
2712 } 2696 }
2713 2697
2714 if (val > global_trace.entries) { 2698 if (val != global_trace.entries) {
2715 long pages_requested; 2699 ret = ring_buffer_resize(global_trace.buffer, val);
2716 unsigned long freeable_pages; 2700 if (ret < 0) {
2717 2701 cnt = ret;
2718 /* make sure we have enough memory before mapping */
2719 pages_requested =
2720 (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
2721
2722 /* account for each buffer (and max_tr) */
2723 pages_requested *= tracing_nr_buffers * 2;
2724
2725 /* Check for overflow */
2726 if (pages_requested < 0) {
2727 cnt = -ENOMEM;
2728 goto out;
2729 }
2730
2731 freeable_pages = determine_dirtyable_memory();
2732
2733 /* we only allow to request 1/4 of useable memory */
2734 if (pages_requested >
2735 ((freeable_pages + tracing_pages_allocated) / 4)) {
2736 cnt = -ENOMEM;
2737 goto out; 2702 goto out;
2738 } 2703 }
2739 2704
2740 while (global_trace.entries < val) { 2705 ret = ring_buffer_resize(max_tr.buffer, val);
2741 if (trace_alloc_page()) { 2706 if (ret < 0) {
2742 cnt = -ENOMEM; 2707 int r;
2743 goto out; 2708 cnt = ret;
2709 r = ring_buffer_resize(global_trace.buffer,
2710 global_trace.entries);
2711 if (r < 0) {
2712 /* AARGH! We are left with different
2713 * size max buffer!!!! */
2714 WARN_ON(1);
2715 tracing_disabled = 1;
2744 } 2716 }
2745 /* double check that we don't go over the known pages */ 2717 goto out;
2746 if (tracing_pages_allocated > pages_requested)
2747 break;
2748 } 2718 }
2749 2719
2750 } else { 2720 global_trace.entries = val;
2751 /* include the number of entries in val (inc of page entries) */
2752 while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
2753 trace_free_page();
2754 } 2721 }
2755 2722
2756 /* check integrity */
2757 for_each_tracing_cpu(i)
2758 check_pages(global_trace.data[i]);
2759
2760 filp->f_pos += cnt; 2723 filp->f_pos += cnt;
2761 2724
2762 /* If check pages failed, return ENOMEM */ 2725 /* If check pages failed, return ENOMEM */
@@ -2769,6 +2732,52 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2769 return cnt; 2732 return cnt;
2770} 2733}
2771 2734
2735static int mark_printk(const char *fmt, ...)
2736{
2737 int ret;
2738 va_list args;
2739 va_start(args, fmt);
2740 ret = trace_vprintk(0, fmt, args);
2741 va_end(args);
2742 return ret;
2743}
2744
2745static ssize_t
2746tracing_mark_write(struct file *filp, const char __user *ubuf,
2747 size_t cnt, loff_t *fpos)
2748{
2749 char *buf;
2750 char *end;
2751 struct trace_array *tr = &global_trace;
2752
2753 if (!tr->ctrl || tracing_disabled)
2754 return -EINVAL;
2755
2756 if (cnt > TRACE_BUF_SIZE)
2757 cnt = TRACE_BUF_SIZE;
2758
2759 buf = kmalloc(cnt + 1, GFP_KERNEL);
2760 if (buf == NULL)
2761 return -ENOMEM;
2762
2763 if (copy_from_user(buf, ubuf, cnt)) {
2764 kfree(buf);
2765 return -EFAULT;
2766 }
2767
2768 /* Cut from the first nil or newline. */
2769 buf[cnt] = '\0';
2770 end = strchr(buf, '\n');
2771 if (end)
2772 *end = '\0';
2773
2774 cnt = mark_printk("%s\n", buf);
2775 kfree(buf);
2776 *fpos += cnt;
2777
2778 return cnt;
2779}
2780
2772static struct file_operations tracing_max_lat_fops = { 2781static struct file_operations tracing_max_lat_fops = {
2773 .open = tracing_open_generic, 2782 .open = tracing_open_generic,
2774 .read = tracing_max_lat_read, 2783 .read = tracing_max_lat_read,
@@ -2800,6 +2809,11 @@ static struct file_operations tracing_entries_fops = {
2800 .write = tracing_entries_write, 2809 .write = tracing_entries_write,
2801}; 2810};
2802 2811
2812static struct file_operations tracing_mark_fops = {
2813 .open = tracing_open_generic,
2814 .write = tracing_mark_write,
2815};
2816
2803#ifdef CONFIG_DYNAMIC_FTRACE 2817#ifdef CONFIG_DYNAMIC_FTRACE
2804 2818
2805static ssize_t 2819static ssize_t
@@ -2846,7 +2860,7 @@ struct dentry *tracing_init_dentry(void)
2846#include "trace_selftest.c" 2860#include "trace_selftest.c"
2847#endif 2861#endif
2848 2862
2849static __init void tracer_init_debugfs(void) 2863static __init int tracer_init_debugfs(void)
2850{ 2864{
2851 struct dentry *d_tracer; 2865 struct dentry *d_tracer;
2852 struct dentry *entry; 2866 struct dentry *entry;
@@ -2881,12 +2895,12 @@ static __init void tracer_init_debugfs(void)
2881 entry = debugfs_create_file("available_tracers", 0444, d_tracer, 2895 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2882 &global_trace, &show_traces_fops); 2896 &global_trace, &show_traces_fops);
2883 if (!entry) 2897 if (!entry)
2884 pr_warning("Could not create debugfs 'trace' entry\n"); 2898 pr_warning("Could not create debugfs 'available_tracers' entry\n");
2885 2899
2886 entry = debugfs_create_file("current_tracer", 0444, d_tracer, 2900 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2887 &global_trace, &set_tracer_fops); 2901 &global_trace, &set_tracer_fops);
2888 if (!entry) 2902 if (!entry)
2889 pr_warning("Could not create debugfs 'trace' entry\n"); 2903 pr_warning("Could not create debugfs 'current_tracer' entry\n");
2890 2904
2891 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, 2905 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2892 &tracing_max_latency, 2906 &tracing_max_latency,
@@ -2899,7 +2913,7 @@ static __init void tracer_init_debugfs(void)
2899 &tracing_thresh, &tracing_max_lat_fops); 2913 &tracing_thresh, &tracing_max_lat_fops);
2900 if (!entry) 2914 if (!entry)
2901 pr_warning("Could not create debugfs " 2915 pr_warning("Could not create debugfs "
2902 "'tracing_threash' entry\n"); 2916 "'tracing_thresh' entry\n");
2903 entry = debugfs_create_file("README", 0644, d_tracer, 2917 entry = debugfs_create_file("README", 0644, d_tracer,
2904 NULL, &tracing_readme_fops); 2918 NULL, &tracing_readme_fops);
2905 if (!entry) 2919 if (!entry)
@@ -2909,13 +2923,19 @@ static __init void tracer_init_debugfs(void)
2909 NULL, &tracing_pipe_fops); 2923 NULL, &tracing_pipe_fops);
2910 if (!entry) 2924 if (!entry)
2911 pr_warning("Could not create debugfs " 2925 pr_warning("Could not create debugfs "
2912 "'tracing_threash' entry\n"); 2926 "'trace_pipe' entry\n");
2913 2927
2914 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 2928 entry = debugfs_create_file("trace_entries", 0644, d_tracer,
2915 &global_trace, &tracing_entries_fops); 2929 &global_trace, &tracing_entries_fops);
2916 if (!entry) 2930 if (!entry)
2917 pr_warning("Could not create debugfs " 2931 pr_warning("Could not create debugfs "
2918 "'tracing_threash' entry\n"); 2932 "'trace_entries' entry\n");
2933
2934 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2935 NULL, &tracing_mark_fops);
2936 if (!entry)
2937 pr_warning("Could not create debugfs "
2938 "'trace_marker' entry\n");
2919 2939
2920#ifdef CONFIG_DYNAMIC_FTRACE 2940#ifdef CONFIG_DYNAMIC_FTRACE
2921 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 2941 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
@@ -2928,230 +2948,263 @@ static __init void tracer_init_debugfs(void)
2928#ifdef CONFIG_SYSPROF_TRACER 2948#ifdef CONFIG_SYSPROF_TRACER
2929 init_tracer_sysprof_debugfs(d_tracer); 2949 init_tracer_sysprof_debugfs(d_tracer);
2930#endif 2950#endif
2951 return 0;
2931} 2952}
2932 2953
2933static int trace_alloc_page(void) 2954int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2934{ 2955{
2956 static DEFINE_SPINLOCK(trace_buf_lock);
2957 static char trace_buf[TRACE_BUF_SIZE];
2958
2959 struct ring_buffer_event *event;
2960 struct trace_array *tr = &global_trace;
2935 struct trace_array_cpu *data; 2961 struct trace_array_cpu *data;
2936 struct page *page, *tmp; 2962 struct print_entry *entry;
2937 LIST_HEAD(pages); 2963 unsigned long flags, irq_flags;
2938 void *array; 2964 int cpu, len = 0, size, pc;
2939 unsigned pages_allocated = 0;
2940 int i;
2941 2965
2942 /* first allocate a page for each CPU */ 2966 if (!tr->ctrl || tracing_disabled)
2943 for_each_tracing_cpu(i) { 2967 return 0;
2944 array = (void *)__get_free_page(GFP_KERNEL);
2945 if (array == NULL) {
2946 printk(KERN_ERR "tracer: failed to allocate page"
2947 "for trace buffer!\n");
2948 goto free_pages;
2949 }
2950 2968
2951 pages_allocated++; 2969 pc = preempt_count();
2952 page = virt_to_page(array); 2970 preempt_disable_notrace();
2953 list_add(&page->lru, &pages); 2971 cpu = raw_smp_processor_id();
2972 data = tr->data[cpu];
2954 2973
2955/* Only allocate if we are actually using the max trace */ 2974 if (unlikely(atomic_read(&data->disabled)))
2956#ifdef CONFIG_TRACER_MAX_TRACE 2975 goto out;
2957 array = (void *)__get_free_page(GFP_KERNEL);
2958 if (array == NULL) {
2959 printk(KERN_ERR "tracer: failed to allocate page"
2960 "for trace buffer!\n");
2961 goto free_pages;
2962 }
2963 pages_allocated++;
2964 page = virt_to_page(array);
2965 list_add(&page->lru, &pages);
2966#endif
2967 }
2968 2976
2969 /* Now that we successfully allocate a page per CPU, add them */ 2977 spin_lock_irqsave(&trace_buf_lock, flags);
2970 for_each_tracing_cpu(i) { 2978 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
2971 data = global_trace.data[i];
2972 page = list_entry(pages.next, struct page, lru);
2973 list_del_init(&page->lru);
2974 list_add_tail(&page->lru, &data->trace_pages);
2975 ClearPageLRU(page);
2976 2979
2977#ifdef CONFIG_TRACER_MAX_TRACE 2980 len = min(len, TRACE_BUF_SIZE-1);
2978 data = max_tr.data[i]; 2981 trace_buf[len] = 0;
2979 page = list_entry(pages.next, struct page, lru);
2980 list_del_init(&page->lru);
2981 list_add_tail(&page->lru, &data->trace_pages);
2982 SetPageLRU(page);
2983#endif
2984 }
2985 tracing_pages_allocated += pages_allocated;
2986 global_trace.entries += ENTRIES_PER_PAGE;
2987 2982
2988 return 0; 2983 size = sizeof(*entry) + len + 1;
2984 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
2985 if (!event)
2986 goto out_unlock;
2987 entry = ring_buffer_event_data(event);
2988 tracing_generic_entry_update(&entry->ent, flags, pc);
2989 entry->ent.type = TRACE_PRINT;
2990 entry->ip = ip;
2989 2991
2990 free_pages: 2992 memcpy(&entry->buf, trace_buf, len);
2991 list_for_each_entry_safe(page, tmp, &pages, lru) { 2993 entry->buf[len] = 0;
2992 list_del_init(&page->lru); 2994 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
2993 __free_page(page); 2995
2994 } 2996 out_unlock:
2995 return -ENOMEM; 2997 spin_unlock_irqrestore(&trace_buf_lock, flags);
2998
2999 out:
3000 preempt_enable_notrace();
3001
3002 return len;
2996} 3003}
3004EXPORT_SYMBOL_GPL(trace_vprintk);
2997 3005
2998static int trace_free_page(void) 3006int __ftrace_printk(unsigned long ip, const char *fmt, ...)
2999{ 3007{
3000 struct trace_array_cpu *data; 3008 int ret;
3001 struct page *page; 3009 va_list ap;
3002 struct list_head *p;
3003 int i;
3004 int ret = 0;
3005 3010
3006 /* free one page from each buffer */ 3011 if (!(trace_flags & TRACE_ITER_PRINTK))
3007 for_each_tracing_cpu(i) { 3012 return 0;
3008 data = global_trace.data[i];
3009 p = data->trace_pages.next;
3010 if (p == &data->trace_pages) {
3011 /* should never happen */
3012 WARN_ON(1);
3013 tracing_disabled = 1;
3014 ret = -1;
3015 break;
3016 }
3017 page = list_entry(p, struct page, lru);
3018 ClearPageLRU(page);
3019 list_del(&page->lru);
3020 tracing_pages_allocated--;
3021 tracing_pages_allocated--;
3022 __free_page(page);
3023 3013
3024 tracing_reset(data); 3014 va_start(ap, fmt);
3015 ret = trace_vprintk(ip, fmt, ap);
3016 va_end(ap);
3017 return ret;
3018}
3019EXPORT_SYMBOL_GPL(__ftrace_printk);
3025 3020
3026#ifdef CONFIG_TRACER_MAX_TRACE 3021static int trace_panic_handler(struct notifier_block *this,
3027 data = max_tr.data[i]; 3022 unsigned long event, void *unused)
3028 p = data->trace_pages.next; 3023{
3029 if (p == &data->trace_pages) { 3024 ftrace_dump();
3030 /* should never happen */ 3025 return NOTIFY_OK;
3031 WARN_ON(1); 3026}
3032 tracing_disabled = 1;
3033 ret = -1;
3034 break;
3035 }
3036 page = list_entry(p, struct page, lru);
3037 ClearPageLRU(page);
3038 list_del(&page->lru);
3039 __free_page(page);
3040 3027
3041 tracing_reset(data); 3028static struct notifier_block trace_panic_notifier = {
3042#endif 3029 .notifier_call = trace_panic_handler,
3043 } 3030 .next = NULL,
3044 global_trace.entries -= ENTRIES_PER_PAGE; 3031 .priority = 150 /* priority: INT_MAX >= x >= 0 */
3032};
3045 3033
3046 return ret; 3034static int trace_die_handler(struct notifier_block *self,
3035 unsigned long val,
3036 void *data)
3037{
3038 switch (val) {
3039 case DIE_OOPS:
3040 ftrace_dump();
3041 break;
3042 default:
3043 break;
3044 }
3045 return NOTIFY_OK;
3047} 3046}
3048 3047
3049__init static int tracer_alloc_buffers(void) 3048static struct notifier_block trace_die_notifier = {
3049 .notifier_call = trace_die_handler,
3050 .priority = 200
3051};
3052
3053/*
3054 * printk is set to max of 1024, we really don't need it that big.
3055 * Nothing should be printing 1000 characters anyway.
3056 */
3057#define TRACE_MAX_PRINT 1000
3058
3059/*
3060 * Define here KERN_TRACE so that we have one place to modify
3061 * it if we decide to change what log level the ftrace dump
3062 * should be at.
3063 */
3064#define KERN_TRACE KERN_INFO
3065
3066static void
3067trace_printk_seq(struct trace_seq *s)
3050{ 3068{
3051 struct trace_array_cpu *data; 3069 /* Probably should print a warning here. */
3052 void *array; 3070 if (s->len >= 1000)
3053 struct page *page; 3071 s->len = 1000;
3054 int pages = 0;
3055 int ret = -ENOMEM;
3056 int i;
3057 3072
3058 /* TODO: make the number of buffers hot pluggable with CPUS */ 3073 /* should be zero ended, but we are paranoid. */
3059 tracing_nr_buffers = num_possible_cpus(); 3074 s->buffer[s->len] = 0;
3060 tracing_buffer_mask = cpu_possible_map;
3061 3075
3062 /* Allocate the first page for all buffers */ 3076 printk(KERN_TRACE "%s", s->buffer);
3063 for_each_tracing_cpu(i) {
3064 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3065 max_tr.data[i] = &per_cpu(max_data, i);
3066 3077
3067 array = (void *)__get_free_page(GFP_KERNEL); 3078 trace_seq_reset(s);
3068 if (array == NULL) { 3079}
3069 printk(KERN_ERR "tracer: failed to allocate page" 3080
3070 "for trace buffer!\n"); 3081
3071 goto free_buffers; 3082void ftrace_dump(void)
3072 } 3083{
3084 static DEFINE_SPINLOCK(ftrace_dump_lock);
3085 /* use static because iter can be a bit big for the stack */
3086 static struct trace_iterator iter;
3087 static cpumask_t mask;
3088 static int dump_ran;
3089 unsigned long flags;
3090 int cnt = 0, cpu;
3073 3091
3074 /* set the array to the list */ 3092 /* only one dump */
3075 INIT_LIST_HEAD(&data->trace_pages); 3093 spin_lock_irqsave(&ftrace_dump_lock, flags);
3076 page = virt_to_page(array); 3094 if (dump_ran)
3077 list_add(&page->lru, &data->trace_pages); 3095 goto out;
3078 /* use the LRU flag to differentiate the two buffers */
3079 ClearPageLRU(page);
3080 3096
3081 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 3097 dump_ran = 1;
3082 max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3083 3098
3084/* Only allocate if we are actually using the max trace */ 3099 /* No turning back! */
3085#ifdef CONFIG_TRACER_MAX_TRACE 3100 ftrace_kill_atomic();
3086 array = (void *)__get_free_page(GFP_KERNEL);
3087 if (array == NULL) {
3088 printk(KERN_ERR "tracer: failed to allocate page"
3089 "for trace buffer!\n");
3090 goto free_buffers;
3091 }
3092 3101
3093 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages); 3102 for_each_tracing_cpu(cpu) {
3094 page = virt_to_page(array); 3103 atomic_inc(&global_trace.data[cpu]->disabled);
3095 list_add(&page->lru, &max_tr.data[i]->trace_pages);
3096 SetPageLRU(page);
3097#endif
3098 } 3104 }
3099 3105
3106 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3107
3108 iter.tr = &global_trace;
3109 iter.trace = current_trace;
3110
3100 /* 3111 /*
3101 * Since we allocate by orders of pages, we may be able to 3112 * We need to stop all tracing on all CPUS to read the
3102 * round up a bit. 3113 * the next buffer. This is a bit expensive, but is
3114 * not done often. We fill all what we can read,
3115 * and then release the locks again.
3103 */ 3116 */
3104 global_trace.entries = ENTRIES_PER_PAGE;
3105 pages++;
3106 3117
3107 while (global_trace.entries < trace_nr_entries) { 3118 cpus_clear(mask);
3108 if (trace_alloc_page()) 3119
3109 break; 3120 while (!trace_empty(&iter)) {
3110 pages++; 3121
3122 if (!cnt)
3123 printk(KERN_TRACE "---------------------------------\n");
3124
3125 cnt++;
3126
3127 /* reset all but tr, trace, and overruns */
3128 memset(&iter.seq, 0,
3129 sizeof(struct trace_iterator) -
3130 offsetof(struct trace_iterator, seq));
3131 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3132 iter.pos = -1;
3133
3134 if (find_next_entry_inc(&iter) != NULL) {
3135 print_trace_line(&iter);
3136 trace_consume(&iter);
3137 }
3138
3139 trace_printk_seq(&iter.seq);
3111 } 3140 }
3112 max_tr.entries = global_trace.entries;
3113 3141
3114 pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n", 3142 if (!cnt)
3115 pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE); 3143 printk(KERN_TRACE " (ftrace buffer empty)\n");
3116 pr_info(" actual entries %ld\n", global_trace.entries); 3144 else
3145 printk(KERN_TRACE "---------------------------------\n");
3146
3147 out:
3148 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3149}
3150
3151__init static int tracer_alloc_buffers(void)
3152{
3153 struct trace_array_cpu *data;
3154 int i;
3155
3156 /* TODO: make the number of buffers hot pluggable with CPUS */
3157 tracing_buffer_mask = cpu_possible_map;
3158
3159 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3160 TRACE_BUFFER_FLAGS);
3161 if (!global_trace.buffer) {
3162 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3163 WARN_ON(1);
3164 return 0;
3165 }
3166 global_trace.entries = ring_buffer_size(global_trace.buffer);
3117 3167
3118 tracer_init_debugfs(); 3168#ifdef CONFIG_TRACER_MAX_TRACE
3169 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3170 TRACE_BUFFER_FLAGS);
3171 if (!max_tr.buffer) {
3172 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3173 WARN_ON(1);
3174 ring_buffer_free(global_trace.buffer);
3175 return 0;
3176 }
3177 max_tr.entries = ring_buffer_size(max_tr.buffer);
3178 WARN_ON(max_tr.entries != global_trace.entries);
3179#endif
3180
3181 /* Allocate the first page for all buffers */
3182 for_each_tracing_cpu(i) {
3183 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3184 max_tr.data[i] = &per_cpu(max_data, i);
3185 }
3119 3186
3120 trace_init_cmdlines(); 3187 trace_init_cmdlines();
3121 3188
3122 register_tracer(&no_tracer); 3189 register_tracer(&nop_trace);
3123 current_trace = &no_tracer; 3190#ifdef CONFIG_BOOT_TRACER
3191 register_tracer(&boot_tracer);
3192 current_trace = &boot_tracer;
3193 current_trace->init(&global_trace);
3194#else
3195 current_trace = &nop_trace;
3196#endif
3124 3197
3125 /* All seems OK, enable tracing */ 3198 /* All seems OK, enable tracing */
3126 global_trace.ctrl = tracer_enabled; 3199 global_trace.ctrl = tracer_enabled;
3127 tracing_disabled = 0; 3200 tracing_disabled = 0;
3128 3201
3129 return 0; 3202 atomic_notifier_chain_register(&panic_notifier_list,
3203 &trace_panic_notifier);
3130 3204
3131 free_buffers: 3205 register_die_notifier(&trace_die_notifier);
3132 for (i-- ; i >= 0; i--) {
3133 struct page *page, *tmp;
3134 struct trace_array_cpu *data = global_trace.data[i];
3135 3206
3136 if (data) { 3207 return 0;
3137 list_for_each_entry_safe(page, tmp,
3138 &data->trace_pages, lru) {
3139 list_del_init(&page->lru);
3140 __free_page(page);
3141 }
3142 }
3143
3144#ifdef CONFIG_TRACER_MAX_TRACE
3145 data = max_tr.data[i];
3146 if (data) {
3147 list_for_each_entry_safe(page, tmp,
3148 &data->trace_pages, lru) {
3149 list_del_init(&page->lru);
3150 __free_page(page);
3151 }
3152 }
3153#endif
3154 }
3155 return ret;
3156} 3208}
3157fs_initcall(tracer_alloc_buffers); 3209early_initcall(tracer_alloc_buffers);
3210fs_initcall(tracer_init_debugfs);