aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/trace/ftrace.c52
-rw-r--r--kernel/trace/ring_buffer.c252
-rw-r--r--kernel/trace/trace.c257
-rw-r--r--kernel/trace/trace.h38
-rw-r--r--kernel/trace/trace_event_types.h11
-rw-r--r--kernel/trace/trace_events.c18
-rw-r--r--kernel/trace/trace_events_stage_2.h2
-rw-r--r--kernel/trace/trace_functions_graph.c6
-rw-r--r--kernel/trace/trace_mmiotrace.c7
-rw-r--r--kernel/trace/trace_output.c57
-rw-r--r--kernel/trace/trace_printk.c150
-rw-r--r--kernel/trace/trace_stack.c19
-rw-r--r--kernel/trace/trace_workqueue.c20
14 files changed, 730 insertions, 174 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 7571bcb71be4..65ff3e3961b4 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -24,6 +24,7 @@
24#include <linux/ftrace.h> 24#include <linux/ftrace.h>
25#include <linux/smp.h> 25#include <linux/smp.h>
26#include <linux/tick.h> 26#include <linux/tick.h>
27#include <trace/irq.h>
27 28
28#include <asm/irq.h> 29#include <asm/irq.h>
29/* 30/*
@@ -53,6 +54,11 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
53 54
54static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 55static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
55 56
57char *softirq_to_name[NR_SOFTIRQS] = {
58 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
59 "TASKLET", "SCHED", "HRTIMER", "RCU"
60};
61
56/* 62/*
57 * we cannot loop indefinitely here to avoid userspace starvation, 63 * we cannot loop indefinitely here to avoid userspace starvation,
58 * but we also don't want to introduce a worst case 1/HZ latency 64 * but we also don't want to introduce a worst case 1/HZ latency
@@ -180,6 +186,9 @@ EXPORT_SYMBOL(local_bh_enable_ip);
180 */ 186 */
181#define MAX_SOFTIRQ_RESTART 10 187#define MAX_SOFTIRQ_RESTART 10
182 188
189DEFINE_TRACE(softirq_entry);
190DEFINE_TRACE(softirq_exit);
191
183asmlinkage void __do_softirq(void) 192asmlinkage void __do_softirq(void)
184{ 193{
185 struct softirq_action *h; 194 struct softirq_action *h;
@@ -206,12 +215,14 @@ restart:
206 if (pending & 1) { 215 if (pending & 1) {
207 int prev_count = preempt_count(); 216 int prev_count = preempt_count();
208 217
218 trace_softirq_entry(h, softirq_vec);
209 h->action(h); 219 h->action(h);
210 220 trace_softirq_exit(h, softirq_vec);
211 if (unlikely(prev_count != preempt_count())) { 221 if (unlikely(prev_count != preempt_count())) {
212 printk(KERN_ERR "huh, entered softirq %td %p" 222 printk(KERN_ERR "huh, entered softirq %td %s %p"
213 "with preempt_count %08x," 223 "with preempt_count %08x,"
214 " exited with %08x?\n", h - softirq_vec, 224 " exited with %08x?\n", h - softirq_vec,
225 softirq_to_name[h - softirq_vec],
215 h->action, prev_count, preempt_count()); 226 h->action, prev_count, preempt_count());
216 preempt_count() = prev_count; 227 preempt_count() = prev_count;
217 } 228 }
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d33d306bdcf4..90d5729afeff 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -272,7 +272,7 @@ enum {
272 272
273static int ftrace_filtered; 273static int ftrace_filtered;
274 274
275static LIST_HEAD(ftrace_new_addrs); 275static struct dyn_ftrace *ftrace_new_addrs;
276 276
277static DEFINE_MUTEX(ftrace_regex_lock); 277static DEFINE_MUTEX(ftrace_regex_lock);
278 278
@@ -356,7 +356,8 @@ void ftrace_release(void *start, unsigned long size)
356 356
357 mutex_lock(&ftrace_lock); 357 mutex_lock(&ftrace_lock);
358 do_for_each_ftrace_rec(pg, rec) { 358 do_for_each_ftrace_rec(pg, rec) {
359 if ((rec->ip >= s) && (rec->ip < e)) 359 if ((rec->ip >= s) && (rec->ip < e) &&
360 !(rec->flags & FTRACE_FL_FREE))
360 ftrace_free_rec(rec); 361 ftrace_free_rec(rec);
361 } while_for_each_ftrace_rec(); 362 } while_for_each_ftrace_rec();
362 mutex_unlock(&ftrace_lock); 363 mutex_unlock(&ftrace_lock);
@@ -408,8 +409,8 @@ ftrace_record_ip(unsigned long ip)
408 return NULL; 409 return NULL;
409 410
410 rec->ip = ip; 411 rec->ip = ip;
411 412 rec->flags = (unsigned long)ftrace_new_addrs;
412 list_add(&rec->list, &ftrace_new_addrs); 413 ftrace_new_addrs = rec;
413 414
414 return rec; 415 return rec;
415} 416}
@@ -531,11 +532,12 @@ static void ftrace_replace_code(int enable)
531 532
532 do_for_each_ftrace_rec(pg, rec) { 533 do_for_each_ftrace_rec(pg, rec) {
533 /* 534 /*
534 * Skip over free records and records that have 535 * Skip over free records, records that have
535 * failed. 536 * failed and not converted.
536 */ 537 */
537 if (rec->flags & FTRACE_FL_FREE || 538 if (rec->flags & FTRACE_FL_FREE ||
538 rec->flags & FTRACE_FL_FAILED) 539 rec->flags & FTRACE_FL_FAILED ||
540 rec->flags & FTRACE_FL_CONVERTED)
539 continue; 541 continue;
540 542
541 /* ignore updates to this record's mcount site */ 543 /* ignore updates to this record's mcount site */
@@ -547,7 +549,7 @@ static void ftrace_replace_code(int enable)
547 } 549 }
548 550
549 failed = __ftrace_replace_code(rec, enable); 551 failed = __ftrace_replace_code(rec, enable);
550 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 552 if (failed) {
551 rec->flags |= FTRACE_FL_FAILED; 553 rec->flags |= FTRACE_FL_FAILED;
552 if ((system_state == SYSTEM_BOOTING) || 554 if ((system_state == SYSTEM_BOOTING) ||
553 !core_kernel_text(rec->ip)) { 555 !core_kernel_text(rec->ip)) {
@@ -714,19 +716,21 @@ unsigned long ftrace_update_tot_cnt;
714 716
715static int ftrace_update_code(struct module *mod) 717static int ftrace_update_code(struct module *mod)
716{ 718{
717 struct dyn_ftrace *p, *t; 719 struct dyn_ftrace *p;
718 cycle_t start, stop; 720 cycle_t start, stop;
719 721
720 start = ftrace_now(raw_smp_processor_id()); 722 start = ftrace_now(raw_smp_processor_id());
721 ftrace_update_cnt = 0; 723 ftrace_update_cnt = 0;
722 724
723 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { 725 while (ftrace_new_addrs) {
724 726
725 /* If something went wrong, bail without enabling anything */ 727 /* If something went wrong, bail without enabling anything */
726 if (unlikely(ftrace_disabled)) 728 if (unlikely(ftrace_disabled))
727 return -1; 729 return -1;
728 730
729 list_del_init(&p->list); 731 p = ftrace_new_addrs;
732 ftrace_new_addrs = (struct dyn_ftrace *)p->flags;
733 p->flags = 0L;
730 734
731 /* convert record (i.e, patch mcount-call with NOP) */ 735 /* convert record (i.e, patch mcount-call with NOP) */
732 if (ftrace_code_disable(mod, p)) { 736 if (ftrace_code_disable(mod, p)) {
@@ -1118,16 +1122,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
1118 return ftrace_regex_open(inode, file, 0); 1122 return ftrace_regex_open(inode, file, 0);
1119} 1123}
1120 1124
1121static ssize_t
1122ftrace_regex_read(struct file *file, char __user *ubuf,
1123 size_t cnt, loff_t *ppos)
1124{
1125 if (file->f_mode & FMODE_READ)
1126 return seq_read(file, ubuf, cnt, ppos);
1127 else
1128 return -EPERM;
1129}
1130
1131static loff_t 1125static loff_t
1132ftrace_regex_lseek(struct file *file, loff_t offset, int origin) 1126ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1133{ 1127{
@@ -1880,7 +1874,7 @@ static const struct file_operations ftrace_failures_fops = {
1880 1874
1881static const struct file_operations ftrace_filter_fops = { 1875static const struct file_operations ftrace_filter_fops = {
1882 .open = ftrace_filter_open, 1876 .open = ftrace_filter_open,
1883 .read = ftrace_regex_read, 1877 .read = seq_read,
1884 .write = ftrace_filter_write, 1878 .write = ftrace_filter_write,
1885 .llseek = ftrace_regex_lseek, 1879 .llseek = ftrace_regex_lseek,
1886 .release = ftrace_filter_release, 1880 .release = ftrace_filter_release,
@@ -1888,7 +1882,7 @@ static const struct file_operations ftrace_filter_fops = {
1888 1882
1889static const struct file_operations ftrace_notrace_fops = { 1883static const struct file_operations ftrace_notrace_fops = {
1890 .open = ftrace_notrace_open, 1884 .open = ftrace_notrace_open,
1891 .read = ftrace_regex_read, 1885 .read = seq_read,
1892 .write = ftrace_notrace_write, 1886 .write = ftrace_notrace_write,
1893 .llseek = ftrace_regex_lseek, 1887 .llseek = ftrace_regex_lseek,
1894 .release = ftrace_notrace_release, 1888 .release = ftrace_notrace_release,
@@ -1990,16 +1984,6 @@ ftrace_graph_open(struct inode *inode, struct file *file)
1990 return ret; 1984 return ret;
1991} 1985}
1992 1986
1993static ssize_t
1994ftrace_graph_read(struct file *file, char __user *ubuf,
1995 size_t cnt, loff_t *ppos)
1996{
1997 if (file->f_mode & FMODE_READ)
1998 return seq_read(file, ubuf, cnt, ppos);
1999 else
2000 return -EPERM;
2001}
2002
2003static int 1987static int
2004ftrace_set_func(unsigned long *array, int *idx, char *buffer) 1988ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2005{ 1989{
@@ -2130,7 +2114,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2130 2114
2131static const struct file_operations ftrace_graph_fops = { 2115static const struct file_operations ftrace_graph_fops = {
2132 .open = ftrace_graph_open, 2116 .open = ftrace_graph_open,
2133 .read = ftrace_graph_read, 2117 .read = seq_read,
2134 .write = ftrace_graph_write, 2118 .write = ftrace_graph_write,
2135}; 2119};
2136#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2120#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 178858492a89..58128ad2fde0 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -16,11 +16,80 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/hash.h> 17#include <linux/hash.h>
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/cpu.h>
19#include <linux/fs.h> 20#include <linux/fs.h>
20 21
21#include "trace.h" 22#include "trace.h"
22 23
23/* 24/*
25 * The ring buffer is made up of a list of pages. A separate list of pages is
26 * allocated for each CPU. A writer may only write to a buffer that is
27 * associated with the CPU it is currently executing on. A reader may read
28 * from any per cpu buffer.
29 *
30 * The reader is special. For each per cpu buffer, the reader has its own
31 * reader page. When a reader has read the entire reader page, this reader
32 * page is swapped with another page in the ring buffer.
33 *
34 * Now, as long as the writer is off the reader page, the reader can do what
35 * ever it wants with that page. The writer will never write to that page
36 * again (as long as it is out of the ring buffer).
37 *
38 * Here's some silly ASCII art.
39 *
40 * +------+
41 * |reader| RING BUFFER
42 * |page |
43 * +------+ +---+ +---+ +---+
44 * | |-->| |-->| |
45 * +---+ +---+ +---+
46 * ^ |
47 * | |
48 * +---------------+
49 *
50 *
51 * +------+
52 * |reader| RING BUFFER
53 * |page |------------------v
54 * +------+ +---+ +---+ +---+
55 * | |-->| |-->| |
56 * +---+ +---+ +---+
57 * ^ |
58 * | |
59 * +---------------+
60 *
61 *
62 * +------+
63 * |reader| RING BUFFER
64 * |page |------------------v
65 * +------+ +---+ +---+ +---+
66 * ^ | |-->| |-->| |
67 * | +---+ +---+ +---+
68 * | |
69 * | |
70 * +------------------------------+
71 *
72 *
73 * +------+
74 * |buffer| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
77 * ^ | | | |-->| |
78 * | New +---+ +---+ +---+
79 * | Reader------^ |
80 * | page |
81 * +------------------------------+
82 *
83 *
84 * After we make this swap, the reader can hand this page off to the splice
85 * code and be done with it. It can even allocate a new page if it needs to
86 * and swap that into the ring buffer.
87 *
88 * We will be using cmpxchg soon to make all this lockless.
89 *
90 */
91
92/*
24 * A fast way to enable or disable all ring buffers is to 93 * A fast way to enable or disable all ring buffers is to
25 * call tracing_on or tracing_off. Turning off the ring buffers 94 * call tracing_on or tracing_off. Turning off the ring buffers
26 * prevents all ring buffers from being recorded to. 95 * prevents all ring buffers from being recorded to.
@@ -301,6 +370,10 @@ struct ring_buffer {
301 struct mutex mutex; 370 struct mutex mutex;
302 371
303 struct ring_buffer_per_cpu **buffers; 372 struct ring_buffer_per_cpu **buffers;
373
374#ifdef CONFIG_HOTPLUG_CPU
375 struct notifier_block cpu_notify;
376#endif
304}; 377};
305 378
306struct ring_buffer_iter { 379struct ring_buffer_iter {
@@ -459,6 +532,11 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
459 */ 532 */
460extern int ring_buffer_page_too_big(void); 533extern int ring_buffer_page_too_big(void);
461 534
535#ifdef CONFIG_HOTPLUG_CPU
536static int __cpuinit rb_cpu_notify(struct notifier_block *self,
537 unsigned long action, void *hcpu);
538#endif
539
462/** 540/**
463 * ring_buffer_alloc - allocate a new ring_buffer 541 * ring_buffer_alloc - allocate a new ring_buffer
464 * @size: the size in bytes per cpu that is needed. 542 * @size: the size in bytes per cpu that is needed.
@@ -496,7 +574,8 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
496 if (buffer->pages == 1) 574 if (buffer->pages == 1)
497 buffer->pages++; 575 buffer->pages++;
498 576
499 cpumask_copy(buffer->cpumask, cpu_possible_mask); 577 get_online_cpus();
578 cpumask_copy(buffer->cpumask, cpu_online_mask);
500 buffer->cpus = nr_cpu_ids; 579 buffer->cpus = nr_cpu_ids;
501 580
502 bsize = sizeof(void *) * nr_cpu_ids; 581 bsize = sizeof(void *) * nr_cpu_ids;
@@ -512,6 +591,13 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
512 goto fail_free_buffers; 591 goto fail_free_buffers;
513 } 592 }
514 593
594#ifdef CONFIG_HOTPLUG_CPU
595 buffer->cpu_notify.notifier_call = rb_cpu_notify;
596 buffer->cpu_notify.priority = 0;
597 register_cpu_notifier(&buffer->cpu_notify);
598#endif
599
600 put_online_cpus();
515 mutex_init(&buffer->mutex); 601 mutex_init(&buffer->mutex);
516 602
517 return buffer; 603 return buffer;
@@ -525,6 +611,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
525 611
526 fail_free_cpumask: 612 fail_free_cpumask:
527 free_cpumask_var(buffer->cpumask); 613 free_cpumask_var(buffer->cpumask);
614 put_online_cpus();
528 615
529 fail_free_buffer: 616 fail_free_buffer:
530 kfree(buffer); 617 kfree(buffer);
@@ -541,9 +628,17 @@ ring_buffer_free(struct ring_buffer *buffer)
541{ 628{
542 int cpu; 629 int cpu;
543 630
631 get_online_cpus();
632
633#ifdef CONFIG_HOTPLUG_CPU
634 unregister_cpu_notifier(&buffer->cpu_notify);
635#endif
636
544 for_each_buffer_cpu(buffer, cpu) 637 for_each_buffer_cpu(buffer, cpu)
545 rb_free_cpu_buffer(buffer->buffers[cpu]); 638 rb_free_cpu_buffer(buffer->buffers[cpu]);
546 639
640 put_online_cpus();
641
547 free_cpumask_var(buffer->cpumask); 642 free_cpumask_var(buffer->cpumask);
548 643
549 kfree(buffer); 644 kfree(buffer);
@@ -649,16 +744,15 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
649 return size; 744 return size;
650 745
651 mutex_lock(&buffer->mutex); 746 mutex_lock(&buffer->mutex);
747 get_online_cpus();
652 748
653 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 749 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
654 750
655 if (size < buffer_size) { 751 if (size < buffer_size) {
656 752
657 /* easy case, just free pages */ 753 /* easy case, just free pages */
658 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { 754 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
659 mutex_unlock(&buffer->mutex); 755 goto out_fail;
660 return -1;
661 }
662 756
663 rm_pages = buffer->pages - nr_pages; 757 rm_pages = buffer->pages - nr_pages;
664 758
@@ -677,10 +771,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
677 * add these pages to the cpu_buffers. Otherwise we just free 771 * add these pages to the cpu_buffers. Otherwise we just free
678 * them all and return -ENOMEM; 772 * them all and return -ENOMEM;
679 */ 773 */
680 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { 774 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
681 mutex_unlock(&buffer->mutex); 775 goto out_fail;
682 return -1;
683 }
684 776
685 new_pages = nr_pages - buffer->pages; 777 new_pages = nr_pages - buffer->pages;
686 778
@@ -705,13 +797,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
705 rb_insert_pages(cpu_buffer, &pages, new_pages); 797 rb_insert_pages(cpu_buffer, &pages, new_pages);
706 } 798 }
707 799
708 if (RB_WARN_ON(buffer, !list_empty(&pages))) { 800 if (RB_WARN_ON(buffer, !list_empty(&pages)))
709 mutex_unlock(&buffer->mutex); 801 goto out_fail;
710 return -1;
711 }
712 802
713 out: 803 out:
714 buffer->pages = nr_pages; 804 buffer->pages = nr_pages;
805 put_online_cpus();
715 mutex_unlock(&buffer->mutex); 806 mutex_unlock(&buffer->mutex);
716 807
717 return size; 808 return size;
@@ -721,8 +812,18 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
721 list_del_init(&bpage->list); 812 list_del_init(&bpage->list);
722 free_buffer_page(bpage); 813 free_buffer_page(bpage);
723 } 814 }
815 put_online_cpus();
724 mutex_unlock(&buffer->mutex); 816 mutex_unlock(&buffer->mutex);
725 return -ENOMEM; 817 return -ENOMEM;
818
819 /*
820 * Something went totally wrong, and we are too paranoid
821 * to even clean up the mess.
822 */
823 out_fail:
824 put_online_cpus();
825 mutex_unlock(&buffer->mutex);
826 return -1;
726} 827}
727EXPORT_SYMBOL_GPL(ring_buffer_resize); 828EXPORT_SYMBOL_GPL(ring_buffer_resize);
728 829
@@ -1564,12 +1665,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1564unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) 1665unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1565{ 1666{
1566 struct ring_buffer_per_cpu *cpu_buffer; 1667 struct ring_buffer_per_cpu *cpu_buffer;
1668 unsigned long ret;
1567 1669
1568 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1670 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1569 return 0; 1671 return 0;
1570 1672
1571 cpu_buffer = buffer->buffers[cpu]; 1673 cpu_buffer = buffer->buffers[cpu];
1572 return cpu_buffer->entries; 1674 ret = cpu_buffer->entries;
1675
1676 return ret;
1573} 1677}
1574EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 1678EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1575 1679
@@ -1581,12 +1685,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1581unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) 1685unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1582{ 1686{
1583 struct ring_buffer_per_cpu *cpu_buffer; 1687 struct ring_buffer_per_cpu *cpu_buffer;
1688 unsigned long ret;
1584 1689
1585 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1690 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1586 return 0; 1691 return 0;
1587 1692
1588 cpu_buffer = buffer->buffers[cpu]; 1693 cpu_buffer = buffer->buffers[cpu];
1589 return cpu_buffer->overrun; 1694 ret = cpu_buffer->overrun;
1695
1696 return ret;
1590} 1697}
1591EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 1698EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1592 1699
@@ -1663,9 +1770,14 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
1663 */ 1770 */
1664void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 1771void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1665{ 1772{
1666 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1773 struct ring_buffer_per_cpu *cpu_buffer;
1667 unsigned long flags; 1774 unsigned long flags;
1668 1775
1776 if (!iter)
1777 return;
1778
1779 cpu_buffer = iter->cpu_buffer;
1780
1669 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1781 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1670 rb_iter_reset(iter); 1782 rb_iter_reset(iter);
1671 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1783 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
@@ -1900,9 +2012,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1900 struct buffer_page *reader; 2012 struct buffer_page *reader;
1901 int nr_loops = 0; 2013 int nr_loops = 0;
1902 2014
1903 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1904 return NULL;
1905
1906 cpu_buffer = buffer->buffers[cpu]; 2015 cpu_buffer = buffer->buffers[cpu];
1907 2016
1908 again: 2017 again:
@@ -2031,6 +2140,9 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2031 struct ring_buffer_event *event; 2140 struct ring_buffer_event *event;
2032 unsigned long flags; 2141 unsigned long flags;
2033 2142
2143 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2144 return NULL;
2145
2034 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2146 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2035 event = rb_buffer_peek(buffer, cpu, ts); 2147 event = rb_buffer_peek(buffer, cpu, ts);
2036 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2148 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
@@ -2071,24 +2183,31 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2071struct ring_buffer_event * 2183struct ring_buffer_event *
2072ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 2184ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2073{ 2185{
2074 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2186 struct ring_buffer_per_cpu *cpu_buffer;
2075 struct ring_buffer_event *event; 2187 struct ring_buffer_event *event = NULL;
2076 unsigned long flags; 2188 unsigned long flags;
2077 2189
2190 /* might be called in atomic */
2191 preempt_disable();
2192
2078 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2193 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2079 return NULL; 2194 goto out;
2080 2195
2196 cpu_buffer = buffer->buffers[cpu];
2081 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2197 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2082 2198
2083 event = rb_buffer_peek(buffer, cpu, ts); 2199 event = rb_buffer_peek(buffer, cpu, ts);
2084 if (!event) 2200 if (!event)
2085 goto out; 2201 goto out_unlock;
2086 2202
2087 rb_advance_reader(cpu_buffer); 2203 rb_advance_reader(cpu_buffer);
2088 2204
2089 out: 2205 out_unlock:
2090 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2206 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2091 2207
2208 out:
2209 preempt_enable();
2210
2092 return event; 2211 return event;
2093} 2212}
2094EXPORT_SYMBOL_GPL(ring_buffer_consume); 2213EXPORT_SYMBOL_GPL(ring_buffer_consume);
@@ -2268,6 +2387,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2268 if (!rb_per_cpu_empty(cpu_buffer)) 2387 if (!rb_per_cpu_empty(cpu_buffer))
2269 return 0; 2388 return 0;
2270 } 2389 }
2390
2271 return 1; 2391 return 1;
2272} 2392}
2273EXPORT_SYMBOL_GPL(ring_buffer_empty); 2393EXPORT_SYMBOL_GPL(ring_buffer_empty);
@@ -2280,12 +2400,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
2280int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 2400int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2281{ 2401{
2282 struct ring_buffer_per_cpu *cpu_buffer; 2402 struct ring_buffer_per_cpu *cpu_buffer;
2403 int ret;
2283 2404
2284 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2405 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2285 return 1; 2406 return 1;
2286 2407
2287 cpu_buffer = buffer->buffers[cpu]; 2408 cpu_buffer = buffer->buffers[cpu];
2288 return rb_per_cpu_empty(cpu_buffer); 2409 ret = rb_per_cpu_empty(cpu_buffer);
2410
2411
2412 return ret;
2289} 2413}
2290EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 2414EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2291 2415
@@ -2304,32 +2428,35 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2304{ 2428{
2305 struct ring_buffer_per_cpu *cpu_buffer_a; 2429 struct ring_buffer_per_cpu *cpu_buffer_a;
2306 struct ring_buffer_per_cpu *cpu_buffer_b; 2430 struct ring_buffer_per_cpu *cpu_buffer_b;
2431 int ret = -EINVAL;
2307 2432
2308 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 2433 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2309 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 2434 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2310 return -EINVAL; 2435 goto out;
2311 2436
2312 /* At least make sure the two buffers are somewhat the same */ 2437 /* At least make sure the two buffers are somewhat the same */
2313 if (buffer_a->pages != buffer_b->pages) 2438 if (buffer_a->pages != buffer_b->pages)
2314 return -EINVAL; 2439 goto out;
2440
2441 ret = -EAGAIN;
2315 2442
2316 if (ring_buffer_flags != RB_BUFFERS_ON) 2443 if (ring_buffer_flags != RB_BUFFERS_ON)
2317 return -EAGAIN; 2444 goto out;
2318 2445
2319 if (atomic_read(&buffer_a->record_disabled)) 2446 if (atomic_read(&buffer_a->record_disabled))
2320 return -EAGAIN; 2447 goto out;
2321 2448
2322 if (atomic_read(&buffer_b->record_disabled)) 2449 if (atomic_read(&buffer_b->record_disabled))
2323 return -EAGAIN; 2450 goto out;
2324 2451
2325 cpu_buffer_a = buffer_a->buffers[cpu]; 2452 cpu_buffer_a = buffer_a->buffers[cpu];
2326 cpu_buffer_b = buffer_b->buffers[cpu]; 2453 cpu_buffer_b = buffer_b->buffers[cpu];
2327 2454
2328 if (atomic_read(&cpu_buffer_a->record_disabled)) 2455 if (atomic_read(&cpu_buffer_a->record_disabled))
2329 return -EAGAIN; 2456 goto out;
2330 2457
2331 if (atomic_read(&cpu_buffer_b->record_disabled)) 2458 if (atomic_read(&cpu_buffer_b->record_disabled))
2332 return -EAGAIN; 2459 goto out;
2333 2460
2334 /* 2461 /*
2335 * We can't do a synchronize_sched here because this 2462 * We can't do a synchronize_sched here because this
@@ -2349,7 +2476,9 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2349 atomic_dec(&cpu_buffer_a->record_disabled); 2476 atomic_dec(&cpu_buffer_a->record_disabled);
2350 atomic_dec(&cpu_buffer_b->record_disabled); 2477 atomic_dec(&cpu_buffer_b->record_disabled);
2351 2478
2352 return 0; 2479 ret = 0;
2480out:
2481 return ret;
2353} 2482}
2354EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 2483EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2355 2484
@@ -2464,27 +2593,30 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2464 u64 save_timestamp; 2593 u64 save_timestamp;
2465 int ret = -1; 2594 int ret = -1;
2466 2595
2596 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2597 goto out;
2598
2467 /* 2599 /*
2468 * If len is not big enough to hold the page header, then 2600 * If len is not big enough to hold the page header, then
2469 * we can not copy anything. 2601 * we can not copy anything.
2470 */ 2602 */
2471 if (len <= BUF_PAGE_HDR_SIZE) 2603 if (len <= BUF_PAGE_HDR_SIZE)
2472 return -1; 2604 goto out;
2473 2605
2474 len -= BUF_PAGE_HDR_SIZE; 2606 len -= BUF_PAGE_HDR_SIZE;
2475 2607
2476 if (!data_page) 2608 if (!data_page)
2477 return -1; 2609 goto out;
2478 2610
2479 bpage = *data_page; 2611 bpage = *data_page;
2480 if (!bpage) 2612 if (!bpage)
2481 return -1; 2613 goto out;
2482 2614
2483 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2615 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2484 2616
2485 reader = rb_get_reader_page(cpu_buffer); 2617 reader = rb_get_reader_page(cpu_buffer);
2486 if (!reader) 2618 if (!reader)
2487 goto out; 2619 goto out_unlock;
2488 2620
2489 event = rb_reader_event(cpu_buffer); 2621 event = rb_reader_event(cpu_buffer);
2490 2622
@@ -2506,7 +2638,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2506 unsigned int size; 2638 unsigned int size;
2507 2639
2508 if (full) 2640 if (full)
2509 goto out; 2641 goto out_unlock;
2510 2642
2511 if (len > (commit - read)) 2643 if (len > (commit - read))
2512 len = (commit - read); 2644 len = (commit - read);
@@ -2514,7 +2646,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2514 size = rb_event_length(event); 2646 size = rb_event_length(event);
2515 2647
2516 if (len < size) 2648 if (len < size)
2517 goto out; 2649 goto out_unlock;
2518 2650
2519 /* save the current timestamp, since the user will need it */ 2651 /* save the current timestamp, since the user will need it */
2520 save_timestamp = cpu_buffer->read_stamp; 2652 save_timestamp = cpu_buffer->read_stamp;
@@ -2553,9 +2685,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2553 } 2685 }
2554 ret = read; 2686 ret = read;
2555 2687
2556 out: 2688 out_unlock:
2557 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2689 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2558 2690
2691 out:
2559 return ret; 2692 return ret;
2560} 2693}
2561 2694
@@ -2629,3 +2762,42 @@ static __init int rb_init_debugfs(void)
2629} 2762}
2630 2763
2631fs_initcall(rb_init_debugfs); 2764fs_initcall(rb_init_debugfs);
2765
2766#ifdef CONFIG_HOTPLUG_CPU
2767static int __cpuinit rb_cpu_notify(struct notifier_block *self,
2768 unsigned long action, void *hcpu)
2769{
2770 struct ring_buffer *buffer =
2771 container_of(self, struct ring_buffer, cpu_notify);
2772 long cpu = (long)hcpu;
2773
2774 switch (action) {
2775 case CPU_UP_PREPARE:
2776 case CPU_UP_PREPARE_FROZEN:
2777 if (cpu_isset(cpu, *buffer->cpumask))
2778 return NOTIFY_OK;
2779
2780 buffer->buffers[cpu] =
2781 rb_allocate_cpu_buffer(buffer, cpu);
2782 if (!buffer->buffers[cpu]) {
2783 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
2784 cpu);
2785 return NOTIFY_OK;
2786 }
2787 smp_wmb();
2788 cpu_set(cpu, *buffer->cpumask);
2789 break;
2790 case CPU_DOWN_PREPARE:
2791 case CPU_DOWN_PREPARE_FROZEN:
2792 /*
2793 * Do nothing.
2794 * If we were to free the buffer, then the user would
2795 * lose any trace that was in the buffer.
2796 */
2797 break;
2798 default:
2799 break;
2800 }
2801 return NOTIFY_OK;
2802}
2803#endif
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5c9c6d907054..efe3202c0209 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -45,6 +45,12 @@ unsigned long __read_mostly tracing_max_latency;
45unsigned long __read_mostly tracing_thresh; 45unsigned long __read_mostly tracing_thresh;
46 46
47/* 47/*
48 * On boot up, the ring buffer is set to the minimum size, so that
49 * we do not waste memory on systems that are not using tracing.
50 */
51static int ring_buffer_expanded;
52
53/*
48 * We need to change this state when a selftest is running. 54 * We need to change this state when a selftest is running.
49 * A selftest will lurk into the ring-buffer to count the 55 * A selftest will lurk into the ring-buffer to count the
50 * entries inserted during the selftest although some concurrent 56 * entries inserted during the selftest although some concurrent
@@ -128,6 +134,8 @@ static int __init set_ftrace(char *str)
128{ 134{
129 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); 135 strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
130 default_bootup_tracer = bootup_tracer_buf; 136 default_bootup_tracer = bootup_tracer_buf;
137 /* We are using ftrace early, expand it */
138 ring_buffer_expanded = 1;
131 return 1; 139 return 1;
132} 140}
133__setup("ftrace=", set_ftrace); 141__setup("ftrace=", set_ftrace);
@@ -1171,10 +1179,10 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
1171 1179
1172 1180
1173/** 1181/**
1174 * trace_vprintk - write binary msg to tracing buffer 1182 * trace_vbprintk - write binary msg to tracing buffer
1175 * 1183 *
1176 */ 1184 */
1177int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) 1185int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
1178{ 1186{
1179 static raw_spinlock_t trace_buf_lock = 1187 static raw_spinlock_t trace_buf_lock =
1180 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1188 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
@@ -1183,7 +1191,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
1183 struct ring_buffer_event *event; 1191 struct ring_buffer_event *event;
1184 struct trace_array *tr = &global_trace; 1192 struct trace_array *tr = &global_trace;
1185 struct trace_array_cpu *data; 1193 struct trace_array_cpu *data;
1186 struct print_entry *entry; 1194 struct bprint_entry *entry;
1187 unsigned long flags; 1195 unsigned long flags;
1188 int resched; 1196 int resched;
1189 int cpu, len = 0, size, pc; 1197 int cpu, len = 0, size, pc;
@@ -1211,7 +1219,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
1211 goto out_unlock; 1219 goto out_unlock;
1212 1220
1213 size = sizeof(*entry) + sizeof(u32) * len; 1221 size = sizeof(*entry) + sizeof(u32) * len;
1214 event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, flags, pc); 1222 event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc);
1215 if (!event) 1223 if (!event)
1216 goto out_unlock; 1224 goto out_unlock;
1217 entry = ring_buffer_event_data(event); 1225 entry = ring_buffer_event_data(event);
@@ -1232,6 +1240,60 @@ out:
1232 1240
1233 return len; 1241 return len;
1234} 1242}
1243EXPORT_SYMBOL_GPL(trace_vbprintk);
1244
1245int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
1246{
1247 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1248 static char trace_buf[TRACE_BUF_SIZE];
1249
1250 struct ring_buffer_event *event;
1251 struct trace_array *tr = &global_trace;
1252 struct trace_array_cpu *data;
1253 int cpu, len = 0, size, pc;
1254 struct print_entry *entry;
1255 unsigned long irq_flags;
1256
1257 if (tracing_disabled || tracing_selftest_running)
1258 return 0;
1259
1260 pc = preempt_count();
1261 preempt_disable_notrace();
1262 cpu = raw_smp_processor_id();
1263 data = tr->data[cpu];
1264
1265 if (unlikely(atomic_read(&data->disabled)))
1266 goto out;
1267
1268 pause_graph_tracing();
1269 raw_local_irq_save(irq_flags);
1270 __raw_spin_lock(&trace_buf_lock);
1271 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1272
1273 len = min(len, TRACE_BUF_SIZE-1);
1274 trace_buf[len] = 0;
1275
1276 size = sizeof(*entry) + len + 1;
1277 event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
1278 if (!event)
1279 goto out_unlock;
1280 entry = ring_buffer_event_data(event);
1281 entry->ip = ip;
1282 entry->depth = depth;
1283
1284 memcpy(&entry->buf, trace_buf, len);
1285 entry->buf[len] = 0;
1286 ring_buffer_unlock_commit(tr->buffer, event);
1287
1288 out_unlock:
1289 __raw_spin_unlock(&trace_buf_lock);
1290 raw_local_irq_restore(irq_flags);
1291 unpause_graph_tracing();
1292 out:
1293 preempt_enable_notrace();
1294
1295 return len;
1296}
1235EXPORT_SYMBOL_GPL(trace_vprintk); 1297EXPORT_SYMBOL_GPL(trace_vprintk);
1236 1298
1237enum trace_file_type { 1299enum trace_file_type {
@@ -1620,6 +1682,22 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1620 return TRACE_TYPE_HANDLED; 1682 return TRACE_TYPE_HANDLED;
1621} 1683}
1622 1684
1685static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter)
1686{
1687 struct trace_seq *s = &iter->seq;
1688 struct trace_entry *entry = iter->ent;
1689 struct bprint_entry *field;
1690 int ret;
1691
1692 trace_assign_type(field, entry);
1693
1694 ret = trace_seq_bprintf(s, field->fmt, field->buf);
1695 if (!ret)
1696 return TRACE_TYPE_PARTIAL_LINE;
1697
1698 return TRACE_TYPE_HANDLED;
1699}
1700
1623static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) 1701static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
1624{ 1702{
1625 struct trace_seq *s = &iter->seq; 1703 struct trace_seq *s = &iter->seq;
@@ -1629,7 +1707,7 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
1629 1707
1630 trace_assign_type(field, entry); 1708 trace_assign_type(field, entry);
1631 1709
1632 ret = trace_seq_bprintf(s, field->fmt, field->buf); 1710 ret = trace_seq_printf(s, "%s", field->buf);
1633 if (!ret) 1711 if (!ret)
1634 return TRACE_TYPE_PARTIAL_LINE; 1712 return TRACE_TYPE_PARTIAL_LINE;
1635 1713
@@ -1658,6 +1736,19 @@ static int trace_empty(struct trace_iterator *iter)
1658{ 1736{
1659 int cpu; 1737 int cpu;
1660 1738
1739 /* If we are looking at one CPU buffer, only check that one */
1740 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
1741 cpu = iter->cpu_file;
1742 if (iter->buffer_iter[cpu]) {
1743 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1744 return 0;
1745 } else {
1746 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1747 return 0;
1748 }
1749 return 1;
1750 }
1751
1661 for_each_tracing_cpu(cpu) { 1752 for_each_tracing_cpu(cpu) {
1662 if (iter->buffer_iter[cpu]) { 1753 if (iter->buffer_iter[cpu]) {
1663 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) 1754 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
@@ -1681,6 +1772,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1681 return ret; 1772 return ret;
1682 } 1773 }
1683 1774
1775 if (iter->ent->type == TRACE_BPRINT &&
1776 trace_flags & TRACE_ITER_PRINTK &&
1777 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
1778 return print_bprintk_msg_only(iter);
1779
1684 if (iter->ent->type == TRACE_PRINT && 1780 if (iter->ent->type == TRACE_PRINT &&
1685 trace_flags & TRACE_ITER_PRINTK && 1781 trace_flags & TRACE_ITER_PRINTK &&
1686 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 1782 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
@@ -1784,17 +1880,11 @@ __tracing_open(struct inode *inode, struct file *file)
1784 1880
1785 iter->buffer_iter[cpu] = 1881 iter->buffer_iter[cpu] =
1786 ring_buffer_read_start(iter->tr->buffer, cpu); 1882 ring_buffer_read_start(iter->tr->buffer, cpu);
1787
1788 if (!iter->buffer_iter[cpu])
1789 goto fail_buffer;
1790 } 1883 }
1791 } else { 1884 } else {
1792 cpu = iter->cpu_file; 1885 cpu = iter->cpu_file;
1793 iter->buffer_iter[cpu] = 1886 iter->buffer_iter[cpu] =
1794 ring_buffer_read_start(iter->tr->buffer, cpu); 1887 ring_buffer_read_start(iter->tr->buffer, cpu);
1795
1796 if (!iter->buffer_iter[cpu])
1797 goto fail;
1798 } 1888 }
1799 1889
1800 /* TODO stop tracer */ 1890 /* TODO stop tracer */
@@ -2315,6 +2405,75 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
2315 return t->init(tr); 2405 return t->init(tr);
2316} 2406}
2317 2407
2408static int tracing_resize_ring_buffer(unsigned long size)
2409{
2410 int ret;
2411
2412 /*
2413 * If kernel or user changes the size of the ring buffer
2414 * we use the size that was given, and we can forget about
2415 * expanding it later.
2416 */
2417 ring_buffer_expanded = 1;
2418
2419 ret = ring_buffer_resize(global_trace.buffer, size);
2420 if (ret < 0)
2421 return ret;
2422
2423 ret = ring_buffer_resize(max_tr.buffer, size);
2424 if (ret < 0) {
2425 int r;
2426
2427 r = ring_buffer_resize(global_trace.buffer,
2428 global_trace.entries);
2429 if (r < 0) {
2430 /*
2431 * AARGH! We are left with different
2432 * size max buffer!!!!
2433 * The max buffer is our "snapshot" buffer.
2434 * When a tracer needs a snapshot (one of the
2435 * latency tracers), it swaps the max buffer
2436 * with the saved snap shot. We succeeded to
2437 * update the size of the main buffer, but failed to
2438 * update the size of the max buffer. But when we tried
2439 * to reset the main buffer to the original size, we
2440 * failed there too. This is very unlikely to
2441 * happen, but if it does, warn and kill all
2442 * tracing.
2443 */
2444 WARN_ON(1);
2445 tracing_disabled = 1;
2446 }
2447 return ret;
2448 }
2449
2450 global_trace.entries = size;
2451
2452 return ret;
2453}
2454
2455/**
2456 * tracing_update_buffers - used by tracing facility to expand ring buffers
2457 *
2458 * To save on memory when the tracing is never used on a system with it
2459 * configured in. The ring buffers are set to a minimum size. But once
2460 * a user starts to use the tracing facility, then they need to grow
2461 * to their default size.
2462 *
2463 * This function is to be called when a tracer is about to be used.
2464 */
2465int tracing_update_buffers(void)
2466{
2467 int ret = 0;
2468
2469 mutex_lock(&trace_types_lock);
2470 if (!ring_buffer_expanded)
2471 ret = tracing_resize_ring_buffer(trace_buf_size);
2472 mutex_unlock(&trace_types_lock);
2473
2474 return ret;
2475}
2476
2318struct trace_option_dentry; 2477struct trace_option_dentry;
2319 2478
2320static struct trace_option_dentry * 2479static struct trace_option_dentry *
@@ -2331,6 +2490,14 @@ static int tracing_set_tracer(const char *buf)
2331 int ret = 0; 2490 int ret = 0;
2332 2491
2333 mutex_lock(&trace_types_lock); 2492 mutex_lock(&trace_types_lock);
2493
2494 if (!ring_buffer_expanded) {
2495 ret = tracing_resize_ring_buffer(trace_buf_size);
2496 if (ret < 0)
2497 return ret;
2498 ret = 0;
2499 }
2500
2334 for (t = trace_types; t; t = t->next) { 2501 for (t = trace_types; t; t = t->next) {
2335 if (strcmp(t->name, buf) == 0) 2502 if (strcmp(t->name, buf) == 0)
2336 break; 2503 break;
@@ -2856,10 +3023,18 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
2856 size_t cnt, loff_t *ppos) 3023 size_t cnt, loff_t *ppos)
2857{ 3024{
2858 struct trace_array *tr = filp->private_data; 3025 struct trace_array *tr = filp->private_data;
2859 char buf[64]; 3026 char buf[96];
2860 int r; 3027 int r;
2861 3028
2862 r = sprintf(buf, "%lu\n", tr->entries >> 10); 3029 mutex_lock(&trace_types_lock);
3030 if (!ring_buffer_expanded)
3031 r = sprintf(buf, "%lu (expanded: %lu)\n",
3032 tr->entries >> 10,
3033 trace_buf_size >> 10);
3034 else
3035 r = sprintf(buf, "%lu\n", tr->entries >> 10);
3036 mutex_unlock(&trace_types_lock);
3037
2863 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3038 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2864} 3039}
2865 3040
@@ -2903,28 +3078,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2903 val <<= 10; 3078 val <<= 10;
2904 3079
2905 if (val != global_trace.entries) { 3080 if (val != global_trace.entries) {
2906 ret = ring_buffer_resize(global_trace.buffer, val); 3081 ret = tracing_resize_ring_buffer(val);
2907 if (ret < 0) { 3082 if (ret < 0) {
2908 cnt = ret; 3083 cnt = ret;
2909 goto out; 3084 goto out;
2910 } 3085 }
2911
2912 ret = ring_buffer_resize(max_tr.buffer, val);
2913 if (ret < 0) {
2914 int r;
2915 cnt = ret;
2916 r = ring_buffer_resize(global_trace.buffer,
2917 global_trace.entries);
2918 if (r < 0) {
2919 /* AARGH! We are left with different
2920 * size max buffer!!!! */
2921 WARN_ON(1);
2922 tracing_disabled = 1;
2923 }
2924 goto out;
2925 }
2926
2927 global_trace.entries = val;
2928 } 3086 }
2929 3087
2930 filp->f_pos += cnt; 3088 filp->f_pos += cnt;
@@ -3385,6 +3543,11 @@ static void tracing_init_debugfs_percpu(long cpu)
3385 (void *) cpu, &tracing_fops); 3543 (void *) cpu, &tracing_fops);
3386 if (!entry) 3544 if (!entry)
3387 pr_warning("Could not create debugfs 'trace' entry\n"); 3545 pr_warning("Could not create debugfs 'trace' entry\n");
3546
3547 entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu,
3548 (void *) cpu, &tracing_buffers_fops);
3549 if (!entry)
3550 pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n");
3388} 3551}
3389 3552
3390#ifdef CONFIG_FTRACE_SELFTEST 3553#ifdef CONFIG_FTRACE_SELFTEST
@@ -3668,7 +3831,6 @@ static __init void create_trace_options_dir(void)
3668static __init int tracer_init_debugfs(void) 3831static __init int tracer_init_debugfs(void)
3669{ 3832{
3670 struct dentry *d_tracer; 3833 struct dentry *d_tracer;
3671 struct dentry *buffers;
3672 struct dentry *entry; 3834 struct dentry *entry;
3673 int cpu; 3835 int cpu;
3674 3836
@@ -3741,26 +3903,6 @@ static __init int tracer_init_debugfs(void)
3741 pr_warning("Could not create debugfs " 3903 pr_warning("Could not create debugfs "
3742 "'trace_marker' entry\n"); 3904 "'trace_marker' entry\n");
3743 3905
3744 buffers = debugfs_create_dir("binary_buffers", d_tracer);
3745
3746 if (!buffers)
3747 pr_warning("Could not create buffers directory\n");
3748 else {
3749 int cpu;
3750 char buf[64];
3751
3752 for_each_tracing_cpu(cpu) {
3753 sprintf(buf, "%d", cpu);
3754
3755 entry = debugfs_create_file(buf, 0444, buffers,
3756 (void *)(long)cpu,
3757 &tracing_buffers_fops);
3758 if (!entry)
3759 pr_warning("Could not create debugfs buffers "
3760 "'%s' entry\n", buf);
3761 }
3762 }
3763
3764#ifdef CONFIG_DYNAMIC_FTRACE 3906#ifdef CONFIG_DYNAMIC_FTRACE
3765 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 3907 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
3766 &ftrace_update_tot_cnt, 3908 &ftrace_update_tot_cnt,
@@ -3916,6 +4058,7 @@ void ftrace_dump(void)
3916__init static int tracer_alloc_buffers(void) 4058__init static int tracer_alloc_buffers(void)
3917{ 4059{
3918 struct trace_array_cpu *data; 4060 struct trace_array_cpu *data;
4061 int ring_buf_size;
3919 int i; 4062 int i;
3920 int ret = -ENOMEM; 4063 int ret = -ENOMEM;
3921 4064
@@ -3928,12 +4071,18 @@ __init static int tracer_alloc_buffers(void)
3928 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) 4071 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
3929 goto out_free_tracing_cpumask; 4072 goto out_free_tracing_cpumask;
3930 4073
4074 /* To save memory, keep the ring buffer size to its minimum */
4075 if (ring_buffer_expanded)
4076 ring_buf_size = trace_buf_size;
4077 else
4078 ring_buf_size = 1;
4079
3931 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 4080 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3932 cpumask_copy(tracing_cpumask, cpu_all_mask); 4081 cpumask_copy(tracing_cpumask, cpu_all_mask);
3933 cpumask_clear(tracing_reader_cpumask); 4082 cpumask_clear(tracing_reader_cpumask);
3934 4083
3935 /* TODO: make the number of buffers hot pluggable with CPUS */ 4084 /* TODO: make the number of buffers hot pluggable with CPUS */
3936 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 4085 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
3937 TRACE_BUFFER_FLAGS); 4086 TRACE_BUFFER_FLAGS);
3938 if (!global_trace.buffer) { 4087 if (!global_trace.buffer) {
3939 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 4088 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
@@ -3944,7 +4093,7 @@ __init static int tracer_alloc_buffers(void)
3944 4093
3945 4094
3946#ifdef CONFIG_TRACER_MAX_TRACE 4095#ifdef CONFIG_TRACER_MAX_TRACE
3947 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 4096 max_tr.buffer = ring_buffer_alloc(ring_buf_size,
3948 TRACE_BUFFER_FLAGS); 4097 TRACE_BUFFER_FLAGS);
3949 if (!max_tr.buffer) { 4098 if (!max_tr.buffer) {
3950 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 4099 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d80ca0d464d9..f56162806f50 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -20,6 +20,7 @@ enum trace_type {
20 TRACE_WAKE, 20 TRACE_WAKE,
21 TRACE_STACK, 21 TRACE_STACK,
22 TRACE_PRINT, 22 TRACE_PRINT,
23 TRACE_BPRINT,
23 TRACE_SPECIAL, 24 TRACE_SPECIAL,
24 TRACE_MMIO_RW, 25 TRACE_MMIO_RW,
25 TRACE_MMIO_MAP, 26 TRACE_MMIO_MAP,
@@ -119,7 +120,7 @@ struct userstack_entry {
119/* 120/*
120 * trace_printk entry: 121 * trace_printk entry:
121 */ 122 */
122struct print_entry { 123struct bprint_entry {
123 struct trace_entry ent; 124 struct trace_entry ent;
124 unsigned long ip; 125 unsigned long ip;
125 int depth; 126 int depth;
@@ -127,6 +128,13 @@ struct print_entry {
127 u32 buf[]; 128 u32 buf[];
128}; 129};
129 130
131struct print_entry {
132 struct trace_entry ent;
133 unsigned long ip;
134 int depth;
135 char buf[];
136};
137
130#define TRACE_OLD_SIZE 88 138#define TRACE_OLD_SIZE 88
131 139
132struct trace_field_cont { 140struct trace_field_cont {
@@ -301,6 +309,7 @@ extern void __ftrace_bad_type(void);
301 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 309 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
302 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 310 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
303 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 311 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
312 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
304 IF_ASSIGN(var, ent, struct special_entry, 0); \ 313 IF_ASSIGN(var, ent, struct special_entry, 0); \
305 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 314 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
306 TRACE_MMIO_RW); \ 315 TRACE_MMIO_RW); \
@@ -589,6 +598,8 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
589extern void *head_page(struct trace_array_cpu *data); 598extern void *head_page(struct trace_array_cpu *data);
590extern long ns2usecs(cycle_t nsec); 599extern long ns2usecs(cycle_t nsec);
591extern int 600extern int
601trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args);
602extern int
592trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); 603trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
593 604
594extern unsigned long trace_flags; 605extern unsigned long trace_flags;
@@ -756,6 +767,9 @@ static inline void trace_branch_disable(void)
756} 767}
757#endif /* CONFIG_BRANCH_TRACER */ 768#endif /* CONFIG_BRANCH_TRACER */
758 769
770/* set ring buffers to default size if not already done so */
771int tracing_update_buffers(void);
772
759/* trace event type bit fields, not numeric */ 773/* trace event type bit fields, not numeric */
760enum { 774enum {
761 TRACE_EVENT_TYPE_PRINTF = 1, 775 TRACE_EVENT_TYPE_PRINTF = 1,
@@ -778,4 +792,26 @@ void event_trace_printk(unsigned long ip, const char *fmt, ...);
778extern struct ftrace_event_call __start_ftrace_events[]; 792extern struct ftrace_event_call __start_ftrace_events[];
779extern struct ftrace_event_call __stop_ftrace_events[]; 793extern struct ftrace_event_call __stop_ftrace_events[];
780 794
795extern const char *__start___trace_bprintk_fmt[];
796extern const char *__stop___trace_bprintk_fmt[];
797
798/*
799 * The double __builtin_constant_p is because gcc will give us an error
800 * if we try to allocate the static variable to fmt if it is not a
801 * constant. Even with the outer if statement optimizing out.
802 */
803#define event_trace_printk(ip, fmt, args...) \
804do { \
805 __trace_printk_check_format(fmt, ##args); \
806 tracing_record_cmdline(current); \
807 if (__builtin_constant_p(fmt)) { \
808 static const char *trace_printk_fmt \
809 __attribute__((section("__trace_printk_fmt"))) = \
810 __builtin_constant_p(fmt) ? fmt : NULL; \
811 \
812 __trace_bprintk(ip, trace_printk_fmt, ##args); \
813 } else \
814 __trace_printk(ip, fmt, ##args); \
815} while (0)
816
781#endif /* _LINUX_KERNEL_TRACE_H */ 817#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h
index 5cca4c978bde..019915063fe6 100644
--- a/kernel/trace/trace_event_types.h
+++ b/kernel/trace/trace_event_types.h
@@ -102,7 +102,7 @@ TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore,
102 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n") 102 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
103); 103);
104 104
105TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore, 105TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore,
106 TRACE_STRUCT( 106 TRACE_STRUCT(
107 TRACE_FIELD(unsigned long, ip, ip) 107 TRACE_FIELD(unsigned long, ip, ip)
108 TRACE_FIELD(unsigned int, depth, depth) 108 TRACE_FIELD(unsigned int, depth, depth)
@@ -112,6 +112,15 @@ TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore,
112 TP_RAW_FMT("%08lx (%d) fmt:%p %s") 112 TP_RAW_FMT("%08lx (%d) fmt:%p %s")
113); 113);
114 114
115TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore,
116 TRACE_STRUCT(
117 TRACE_FIELD(unsigned long, ip, ip)
118 TRACE_FIELD(unsigned int, depth, depth)
119 TRACE_FIELD_ZERO_CHAR(buf)
120 ),
121 TP_RAW_FMT("%08lx (%d) fmt:%p %s")
122);
123
115TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore, 124TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore,
116 TRACE_STRUCT( 125 TRACE_STRUCT(
117 TRACE_FIELD(unsigned int, line, line) 126 TRACE_FIELD(unsigned int, line, line)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 769dfd00fc85..238ea95a4115 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -24,16 +24,6 @@ static DEFINE_MUTEX(event_mutex);
24 (unsigned long)event < (unsigned long)__stop_ftrace_events; \ 24 (unsigned long)event < (unsigned long)__stop_ftrace_events; \
25 event++) 25 event++)
26 26
27void event_trace_printk(unsigned long ip, const char *fmt, ...)
28{
29 va_list ap;
30
31 va_start(ap, fmt);
32 tracing_record_cmdline(current);
33 trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
34 va_end(ap);
35}
36
37static void ftrace_clear_events(void) 27static void ftrace_clear_events(void)
38{ 28{
39 struct ftrace_event_call *call = (void *)__start_ftrace_events; 29 struct ftrace_event_call *call = (void *)__start_ftrace_events;
@@ -141,6 +131,10 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
141 if (!cnt || cnt < 0) 131 if (!cnt || cnt < 0)
142 return 0; 132 return 0;
143 133
134 ret = tracing_update_buffers();
135 if (ret < 0)
136 return ret;
137
144 ret = get_user(ch, ubuf++); 138 ret = get_user(ch, ubuf++);
145 if (ret) 139 if (ret)
146 return ret; 140 return ret;
@@ -331,6 +325,10 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
331 if (ret < 0) 325 if (ret < 0)
332 return ret; 326 return ret;
333 327
328 ret = tracing_update_buffers();
329 if (ret < 0)
330 return ret;
331
334 switch (val) { 332 switch (val) {
335 case 0: 333 case 0:
336 case 1: 334 case 1:
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h
index ca347afd6aa0..5117c43f5c67 100644
--- a/kernel/trace/trace_events_stage_2.h
+++ b/kernel/trace/trace_events_stage_2.h
@@ -57,7 +57,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
57 \ 57 \
58 field = (typeof(field))entry; \ 58 field = (typeof(field))entry; \
59 \ 59 \
60 ret = trace_seq_printf(s, print); \ 60 ret = trace_seq_printf(s, #call ": " print); \
61 if (!ret) \ 61 if (!ret) \
62 return TRACE_TYPE_PARTIAL_LINE; \ 62 return TRACE_TYPE_PARTIAL_LINE; \
63 \ 63 \
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 8566c14b3e9a..4c388607ed67 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -684,7 +684,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
684} 684}
685 685
686static enum print_line_t 686static enum print_line_t
687print_graph_comment(struct print_entry *trace, struct trace_seq *s, 687print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
688 struct trace_entry *ent, struct trace_iterator *iter) 688 struct trace_entry *ent, struct trace_iterator *iter)
689{ 689{
690 int i; 690 int i;
@@ -781,8 +781,8 @@ print_graph_function(struct trace_iterator *iter)
781 trace_assign_type(field, entry); 781 trace_assign_type(field, entry);
782 return print_graph_return(&field->ret, s, entry, iter); 782 return print_graph_return(&field->ret, s, entry, iter);
783 } 783 }
784 case TRACE_PRINT: { 784 case TRACE_BPRINT: {
785 struct print_entry *field; 785 struct bprint_entry *field;
786 trace_assign_type(field, entry); 786 trace_assign_type(field, entry);
787 return print_graph_comment(field, s, entry, iter); 787 return print_graph_comment(field, s, entry, iter);
788 } 788 }
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 23e346a734ca..f095916e477f 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -254,6 +254,7 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
254{ 254{
255 struct trace_entry *entry = iter->ent; 255 struct trace_entry *entry = iter->ent;
256 struct print_entry *print = (struct print_entry *)entry; 256 struct print_entry *print = (struct print_entry *)entry;
257 const char *msg = print->buf;
257 struct trace_seq *s = &iter->seq; 258 struct trace_seq *s = &iter->seq;
258 unsigned long long t = ns2usecs(iter->ts); 259 unsigned long long t = ns2usecs(iter->ts);
259 unsigned long usec_rem = do_div(t, USEC_PER_SEC); 260 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
@@ -261,11 +262,7 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
261 int ret; 262 int ret;
262 263
263 /* The trailing newline must be in the message. */ 264 /* The trailing newline must be in the message. */
264 ret = trace_seq_printf(s, "MARK %u.%06lu ", secs, usec_rem); 265 ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
265 if (!ret)
266 return TRACE_TYPE_PARTIAL_LINE;
267
268 ret = trace_seq_bprintf(s, print->fmt, print->buf);
269 if (!ret) 266 if (!ret)
270 return TRACE_TYPE_PARTIAL_LINE; 267 return TRACE_TYPE_PARTIAL_LINE;
271 268
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 491832af9ba1..ea9d3b410c7a 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -832,13 +832,13 @@ static struct trace_event trace_user_stack_event = {
832 .binary = trace_special_bin, 832 .binary = trace_special_bin,
833}; 833};
834 834
835/* TRACE_PRINT */ 835/* TRACE_BPRINT */
836static enum print_line_t 836static enum print_line_t
837trace_print_print(struct trace_iterator *iter, int flags) 837trace_bprint_print(struct trace_iterator *iter, int flags)
838{ 838{
839 struct trace_entry *entry = iter->ent; 839 struct trace_entry *entry = iter->ent;
840 struct trace_seq *s = &iter->seq; 840 struct trace_seq *s = &iter->seq;
841 struct print_entry *field; 841 struct bprint_entry *field;
842 842
843 trace_assign_type(field, entry); 843 trace_assign_type(field, entry);
844 844
@@ -858,9 +858,10 @@ trace_print_print(struct trace_iterator *iter, int flags)
858} 858}
859 859
860 860
861static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) 861static enum print_line_t
862trace_bprint_raw(struct trace_iterator *iter, int flags)
862{ 863{
863 struct print_entry *field; 864 struct bprint_entry *field;
864 struct trace_seq *s = &iter->seq; 865 struct trace_seq *s = &iter->seq;
865 866
866 trace_assign_type(field, iter->ent); 867 trace_assign_type(field, iter->ent);
@@ -878,12 +879,55 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
878} 879}
879 880
880 881
882static struct trace_event trace_bprint_event = {
883 .type = TRACE_BPRINT,
884 .trace = trace_bprint_print,
885 .raw = trace_bprint_raw,
886};
887
888/* TRACE_PRINT */
889static enum print_line_t trace_print_print(struct trace_iterator *iter,
890 int flags)
891{
892 struct print_entry *field;
893 struct trace_seq *s = &iter->seq;
894
895 trace_assign_type(field, iter->ent);
896
897 if (!seq_print_ip_sym(s, field->ip, flags))
898 goto partial;
899
900 if (!trace_seq_printf(s, ": %s", field->buf))
901 goto partial;
902
903 return TRACE_TYPE_HANDLED;
904
905 partial:
906 return TRACE_TYPE_PARTIAL_LINE;
907}
908
909static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
910{
911 struct print_entry *field;
912
913 trace_assign_type(field, iter->ent);
914
915 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
916 goto partial;
917
918 return TRACE_TYPE_HANDLED;
919
920 partial:
921 return TRACE_TYPE_PARTIAL_LINE;
922}
923
881static struct trace_event trace_print_event = { 924static struct trace_event trace_print_event = {
882 .type = TRACE_PRINT, 925 .type = TRACE_PRINT,
883 .trace = trace_print_print, 926 .trace = trace_print_print,
884 .raw = trace_print_raw, 927 .raw = trace_print_raw,
885}; 928};
886 929
930
887static struct trace_event *events[] __initdata = { 931static struct trace_event *events[] __initdata = {
888 &trace_fn_event, 932 &trace_fn_event,
889 &trace_ctx_event, 933 &trace_ctx_event,
@@ -891,6 +935,7 @@ static struct trace_event *events[] __initdata = {
891 &trace_special_event, 935 &trace_special_event,
892 &trace_stack_event, 936 &trace_stack_event,
893 &trace_user_stack_event, 937 &trace_user_stack_event,
938 &trace_bprint_event,
894 &trace_print_event, 939 &trace_print_event,
895 NULL 940 NULL
896}; 941};
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index a50aea22e929..486785214e3e 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -4,18 +4,19 @@
4 * Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com> 4 * Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com>
5 * 5 *
6 */ 6 */
7#include <linux/seq_file.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
7#include <linux/kernel.h> 10#include <linux/kernel.h>
8#include <linux/ftrace.h> 11#include <linux/ftrace.h>
9#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/module.h>
14#include <linux/marker.h>
15#include <linux/mutex.h>
10#include <linux/ctype.h> 16#include <linux/ctype.h>
11#include <linux/list.h> 17#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/slab.h> 18#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
16#include <linux/fs.h> 19#include <linux/fs.h>
17#include <linux/marker.h>
18#include <linux/uaccess.h>
19 20
20#include "trace.h" 21#include "trace.h"
21 22
@@ -99,7 +100,7 @@ struct notifier_block module_trace_bprintk_format_nb = {
99 .notifier_call = module_trace_bprintk_format_notify, 100 .notifier_call = module_trace_bprintk_format_notify,
100}; 101};
101 102
102int __trace_printk(unsigned long ip, const char *fmt, ...) 103int __trace_bprintk(unsigned long ip, const char *fmt, ...)
103 { 104 {
104 int ret; 105 int ret;
105 va_list ap; 106 va_list ap;
@@ -111,13 +112,13 @@ int __trace_printk(unsigned long ip, const char *fmt, ...)
111 return 0; 112 return 0;
112 113
113 va_start(ap, fmt); 114 va_start(ap, fmt);
114 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); 115 ret = trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap);
115 va_end(ap); 116 va_end(ap);
116 return ret; 117 return ret;
117} 118}
118EXPORT_SYMBOL_GPL(__trace_printk); 119EXPORT_SYMBOL_GPL(__trace_bprintk);
119 120
120int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) 121int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
121 { 122 {
122 if (unlikely(!fmt)) 123 if (unlikely(!fmt))
123 return 0; 124 return 0;
@@ -125,10 +126,141 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
125 if (!(trace_flags & TRACE_ITER_PRINTK)) 126 if (!(trace_flags & TRACE_ITER_PRINTK))
126 return 0; 127 return 0;
127 128
129 return trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap);
130}
131EXPORT_SYMBOL_GPL(__ftrace_vbprintk);
132
133int __trace_printk(unsigned long ip, const char *fmt, ...)
134{
135 int ret;
136 va_list ap;
137
138 if (!(trace_flags & TRACE_ITER_PRINTK))
139 return 0;
140
141 va_start(ap, fmt);
142 ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
143 va_end(ap);
144 return ret;
145}
146EXPORT_SYMBOL_GPL(__trace_printk);
147
148int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
149{
150 if (!(trace_flags & TRACE_ITER_PRINTK))
151 return 0;
152
128 return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); 153 return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
129} 154}
130EXPORT_SYMBOL_GPL(__ftrace_vprintk); 155EXPORT_SYMBOL_GPL(__ftrace_vprintk);
131 156
157static void *
158t_next(struct seq_file *m, void *v, loff_t *pos)
159{
160 const char **fmt = m->private;
161 const char **next = fmt;
162
163 (*pos)++;
164
165 if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
166 return NULL;
167
168 next = fmt;
169 m->private = ++next;
170
171 return fmt;
172}
173
174static void *t_start(struct seq_file *m, loff_t *pos)
175{
176 return t_next(m, NULL, pos);
177}
178
179static int t_show(struct seq_file *m, void *v)
180{
181 const char **fmt = v;
182 const char *str = *fmt;
183 int i;
184
185 seq_printf(m, "0x%lx : \"", (unsigned long)fmt);
186
187 /*
188 * Tabs and new lines need to be converted.
189 */
190 for (i = 0; str[i]; i++) {
191 switch (str[i]) {
192 case '\n':
193 seq_puts(m, "\\n");
194 break;
195 case '\t':
196 seq_puts(m, "\\t");
197 break;
198 case '\\':
199 seq_puts(m, "\\");
200 break;
201 case '"':
202 seq_puts(m, "\\\"");
203 break;
204 default:
205 seq_putc(m, str[i]);
206 }
207 }
208 seq_puts(m, "\"\n");
209
210 return 0;
211}
212
213static void t_stop(struct seq_file *m, void *p)
214{
215}
216
217static const struct seq_operations show_format_seq_ops = {
218 .start = t_start,
219 .next = t_next,
220 .show = t_show,
221 .stop = t_stop,
222};
223
224static int
225ftrace_formats_open(struct inode *inode, struct file *file)
226{
227 int ret;
228
229 ret = seq_open(file, &show_format_seq_ops);
230 if (!ret) {
231 struct seq_file *m = file->private_data;
232
233 m->private = __start___trace_bprintk_fmt;
234 }
235 return ret;
236}
237
238static const struct file_operations ftrace_formats_fops = {
239 .open = ftrace_formats_open,
240 .read = seq_read,
241 .llseek = seq_lseek,
242 .release = seq_release,
243};
244
245static __init int init_trace_printk_function_export(void)
246{
247 struct dentry *d_tracer;
248 struct dentry *entry;
249
250 d_tracer = tracing_init_dentry();
251 if (!d_tracer)
252 return 0;
253
254 entry = debugfs_create_file("printk_formats", 0444, d_tracer,
255 NULL, &ftrace_formats_fops);
256 if (!entry)
257 pr_warning("Could not create debugfs "
258 "'printk_formats' entry\n");
259
260 return 0;
261}
262
263fs_initcall(init_trace_printk_function_export);
132 264
133static __init int init_trace_printk(void) 265static __init int init_trace_printk(void)
134{ 266{
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index d0871bc0aca5..c750f65f9661 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -245,16 +245,31 @@ static int trace_lookup_stack(struct seq_file *m, long i)
245#endif 245#endif
246} 246}
247 247
248static void print_disabled(struct seq_file *m)
249{
250 seq_puts(m, "#\n"
251 "# Stack tracer disabled\n"
252 "#\n"
253 "# To enable the stack tracer, either add 'stacktrace' to the\n"
254 "# kernel command line\n"
255 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
256 "#\n");
257}
258
248static int t_show(struct seq_file *m, void *v) 259static int t_show(struct seq_file *m, void *v)
249{ 260{
250 long i; 261 long i;
251 int size; 262 int size;
252 263
253 if (v == SEQ_START_TOKEN) { 264 if (v == SEQ_START_TOKEN) {
254 seq_printf(m, " Depth Size Location" 265 seq_printf(m, " Depth Size Location"
255 " (%d entries)\n" 266 " (%d entries)\n"
256 " ----- ---- --------\n", 267 " ----- ---- --------\n",
257 max_stack_trace.nr_entries); 268 max_stack_trace.nr_entries);
269
270 if (!stack_tracer_enabled && !max_stack_size)
271 print_disabled(m);
272
258 return 0; 273 return 0;
259 } 274 }
260 275
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index fb5ccac8bbc0..9ab035b58cf1 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -193,12 +193,20 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
193 struct cpu_workqueue_stats *cws = p; 193 struct cpu_workqueue_stats *cws = p;
194 unsigned long flags; 194 unsigned long flags;
195 int cpu = cws->cpu; 195 int cpu = cws->cpu;
196 struct task_struct *tsk = find_task_by_vpid(cws->pid); 196 struct pid *pid;
197 197 struct task_struct *tsk;
198 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, 198
199 atomic_read(&cws->inserted), 199 pid = find_get_pid(cws->pid);
200 cws->executed, 200 if (pid) {
201 tsk ? tsk->comm : "<...>"); 201 tsk = get_pid_task(pid, PIDTYPE_PID);
202 if (tsk) {
203 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
204 atomic_read(&cws->inserted), cws->executed,
205 tsk->comm);
206 put_task_struct(tsk);
207 }
208 put_pid(pid);
209 }
202 210
203 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); 211 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
204 if (&cws->list == workqueue_cpu_stat(cpu)->list.next) 212 if (&cws->list == workqueue_cpu_stat(cpu)->list.next)