aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c204
1 files changed, 133 insertions, 71 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3ec2ee6f6560..086d36316805 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -33,10 +33,10 @@
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/rwsem.h> 35#include <linux/rwsem.h>
36#include <linux/slab.h>
36#include <linux/ctype.h> 37#include <linux/ctype.h>
37#include <linux/init.h> 38#include <linux/init.h>
38#include <linux/poll.h> 39#include <linux/poll.h>
39#include <linux/gfp.h>
40#include <linux/fs.h> 40#include <linux/fs.h>
41 41
42#include "trace.h" 42#include "trace.h"
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
117 * 117 *
118 * It is default off, but you can enable it with either specifying 118 * It is default off, but you can enable it with either specifying
119 * "ftrace_dump_on_oops" in the kernel command line, or setting 119 * "ftrace_dump_on_oops" in the kernel command line, or setting
120 * /proc/sys/kernel/ftrace_dump_on_oops to true. 120 * /proc/sys/kernel/ftrace_dump_on_oops
121 * Set 1 if you want to dump buffers of all CPUs
122 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 */ 123 */
122int ftrace_dump_on_oops; 124
125enum ftrace_dump_mode ftrace_dump_on_oops;
123 126
124static int tracing_set_tracer(const char *buf); 127static int tracing_set_tracer(const char *buf);
125 128
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace);
139 142
140static int __init set_ftrace_dump_on_oops(char *str) 143static int __init set_ftrace_dump_on_oops(char *str)
141{ 144{
142 ftrace_dump_on_oops = 1; 145 if (*str++ != '=' || !*str) {
143 return 1; 146 ftrace_dump_on_oops = DUMP_ALL;
147 return 1;
148 }
149
150 if (!strcmp("orig_cpu", str)) {
151 ftrace_dump_on_oops = DUMP_ORIG;
152 return 1;
153 }
154
155 return 0;
144} 156}
145__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 157__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
146 158
@@ -1545,7 +1557,8 @@ static void trace_iterator_increment(struct trace_iterator *iter)
1545} 1557}
1546 1558
1547static struct trace_entry * 1559static struct trace_entry *
1548peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) 1560peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1561 unsigned long *lost_events)
1549{ 1562{
1550 struct ring_buffer_event *event; 1563 struct ring_buffer_event *event;
1551 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; 1564 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
@@ -1556,7 +1569,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1556 if (buf_iter) 1569 if (buf_iter)
1557 event = ring_buffer_iter_peek(buf_iter, ts); 1570 event = ring_buffer_iter_peek(buf_iter, ts);
1558 else 1571 else
1559 event = ring_buffer_peek(iter->tr->buffer, cpu, ts); 1572 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1573 lost_events);
1560 1574
1561 ftrace_enable_cpu(); 1575 ftrace_enable_cpu();
1562 1576
@@ -1564,10 +1578,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1564} 1578}
1565 1579
1566static struct trace_entry * 1580static struct trace_entry *
1567__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) 1581__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1582 unsigned long *missing_events, u64 *ent_ts)
1568{ 1583{
1569 struct ring_buffer *buffer = iter->tr->buffer; 1584 struct ring_buffer *buffer = iter->tr->buffer;
1570 struct trace_entry *ent, *next = NULL; 1585 struct trace_entry *ent, *next = NULL;
1586 unsigned long lost_events = 0, next_lost = 0;
1571 int cpu_file = iter->cpu_file; 1587 int cpu_file = iter->cpu_file;
1572 u64 next_ts = 0, ts; 1588 u64 next_ts = 0, ts;
1573 int next_cpu = -1; 1589 int next_cpu = -1;
@@ -1580,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1580 if (cpu_file > TRACE_PIPE_ALL_CPU) { 1596 if (cpu_file > TRACE_PIPE_ALL_CPU) {
1581 if (ring_buffer_empty_cpu(buffer, cpu_file)) 1597 if (ring_buffer_empty_cpu(buffer, cpu_file))
1582 return NULL; 1598 return NULL;
1583 ent = peek_next_entry(iter, cpu_file, ent_ts); 1599 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1584 if (ent_cpu) 1600 if (ent_cpu)
1585 *ent_cpu = cpu_file; 1601 *ent_cpu = cpu_file;
1586 1602
@@ -1592,7 +1608,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1592 if (ring_buffer_empty_cpu(buffer, cpu)) 1608 if (ring_buffer_empty_cpu(buffer, cpu))
1593 continue; 1609 continue;
1594 1610
1595 ent = peek_next_entry(iter, cpu, &ts); 1611 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1596 1612
1597 /* 1613 /*
1598 * Pick the entry with the smallest timestamp: 1614 * Pick the entry with the smallest timestamp:
@@ -1601,6 +1617,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1601 next = ent; 1617 next = ent;
1602 next_cpu = cpu; 1618 next_cpu = cpu;
1603 next_ts = ts; 1619 next_ts = ts;
1620 next_lost = lost_events;
1604 } 1621 }
1605 } 1622 }
1606 1623
@@ -1610,6 +1627,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1610 if (ent_ts) 1627 if (ent_ts)
1611 *ent_ts = next_ts; 1628 *ent_ts = next_ts;
1612 1629
1630 if (missing_events)
1631 *missing_events = next_lost;
1632
1613 return next; 1633 return next;
1614} 1634}
1615 1635
@@ -1617,13 +1637,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1617struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 1637struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1618 int *ent_cpu, u64 *ent_ts) 1638 int *ent_cpu, u64 *ent_ts)
1619{ 1639{
1620 return __find_next_entry(iter, ent_cpu, ent_ts); 1640 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1621} 1641}
1622 1642
1623/* Find the next real entry, and increment the iterator to the next entry */ 1643/* Find the next real entry, and increment the iterator to the next entry */
1624static void *find_next_entry_inc(struct trace_iterator *iter) 1644static void *find_next_entry_inc(struct trace_iterator *iter)
1625{ 1645{
1626 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1646 iter->ent = __find_next_entry(iter, &iter->cpu,
1647 &iter->lost_events, &iter->ts);
1627 1648
1628 if (iter->ent) 1649 if (iter->ent)
1629 trace_iterator_increment(iter); 1650 trace_iterator_increment(iter);
@@ -1635,7 +1656,8 @@ static void trace_consume(struct trace_iterator *iter)
1635{ 1656{
1636 /* Don't allow ftrace to trace into the ring buffers */ 1657 /* Don't allow ftrace to trace into the ring buffers */
1637 ftrace_disable_cpu(); 1658 ftrace_disable_cpu();
1638 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); 1659 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1660 &iter->lost_events);
1639 ftrace_enable_cpu(); 1661 ftrace_enable_cpu();
1640} 1662}
1641 1663
@@ -1786,7 +1808,7 @@ static void print_func_help_header(struct seq_file *m)
1786} 1808}
1787 1809
1788 1810
1789static void 1811void
1790print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1812print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1791{ 1813{
1792 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1814 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1914,7 +1936,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1914 } 1936 }
1915 1937
1916 if (event) 1938 if (event)
1917 return event->trace(iter, sym_flags); 1939 return event->funcs->trace(iter, sym_flags, event);
1918 1940
1919 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 1941 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1920 goto partial; 1942 goto partial;
@@ -1940,7 +1962,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1940 1962
1941 event = ftrace_find_event(entry->type); 1963 event = ftrace_find_event(entry->type);
1942 if (event) 1964 if (event)
1943 return event->raw(iter, 0); 1965 return event->funcs->raw(iter, 0, event);
1944 1966
1945 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 1967 if (!trace_seq_printf(s, "%d ?\n", entry->type))
1946 goto partial; 1968 goto partial;
@@ -1967,7 +1989,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1967 1989
1968 event = ftrace_find_event(entry->type); 1990 event = ftrace_find_event(entry->type);
1969 if (event) { 1991 if (event) {
1970 enum print_line_t ret = event->hex(iter, 0); 1992 enum print_line_t ret = event->funcs->hex(iter, 0, event);
1971 if (ret != TRACE_TYPE_HANDLED) 1993 if (ret != TRACE_TYPE_HANDLED)
1972 return ret; 1994 return ret;
1973 } 1995 }
@@ -1992,10 +2014,11 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1992 } 2014 }
1993 2015
1994 event = ftrace_find_event(entry->type); 2016 event = ftrace_find_event(entry->type);
1995 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; 2017 return event ? event->funcs->binary(iter, 0, event) :
2018 TRACE_TYPE_HANDLED;
1996} 2019}
1997 2020
1998static int trace_empty(struct trace_iterator *iter) 2021int trace_empty(struct trace_iterator *iter)
1999{ 2022{
2000 int cpu; 2023 int cpu;
2001 2024
@@ -2030,6 +2053,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2030{ 2053{
2031 enum print_line_t ret; 2054 enum print_line_t ret;
2032 2055
2056 if (iter->lost_events)
2057 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2058 iter->cpu, iter->lost_events);
2059
2033 if (iter->trace && iter->trace->print_line) { 2060 if (iter->trace && iter->trace->print_line) {
2034 ret = iter->trace->print_line(iter); 2061 ret = iter->trace->print_line(iter);
2035 if (ret != TRACE_TYPE_UNHANDLED) 2062 if (ret != TRACE_TYPE_UNHANDLED)
@@ -2058,6 +2085,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2058 return print_trace_fmt(iter); 2085 return print_trace_fmt(iter);
2059} 2086}
2060 2087
2088void trace_default_header(struct seq_file *m)
2089{
2090 struct trace_iterator *iter = m->private;
2091
2092 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2093 /* print nothing if the buffers are empty */
2094 if (trace_empty(iter))
2095 return;
2096 print_trace_header(m, iter);
2097 if (!(trace_flags & TRACE_ITER_VERBOSE))
2098 print_lat_help_header(m);
2099 } else {
2100 if (!(trace_flags & TRACE_ITER_VERBOSE))
2101 print_func_help_header(m);
2102 }
2103}
2104
2061static int s_show(struct seq_file *m, void *v) 2105static int s_show(struct seq_file *m, void *v)
2062{ 2106{
2063 struct trace_iterator *iter = v; 2107 struct trace_iterator *iter = v;
@@ -2070,17 +2114,9 @@ static int s_show(struct seq_file *m, void *v)
2070 } 2114 }
2071 if (iter->trace && iter->trace->print_header) 2115 if (iter->trace && iter->trace->print_header)
2072 iter->trace->print_header(m); 2116 iter->trace->print_header(m);
2073 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2117 else
2074 /* print nothing if the buffers are empty */ 2118 trace_default_header(m);
2075 if (trace_empty(iter)) 2119
2076 return 0;
2077 print_trace_header(m, iter);
2078 if (!(trace_flags & TRACE_ITER_VERBOSE))
2079 print_lat_help_header(m);
2080 } else {
2081 if (!(trace_flags & TRACE_ITER_VERBOSE))
2082 print_func_help_header(m);
2083 }
2084 } else if (iter->leftover) { 2120 } else if (iter->leftover) {
2085 /* 2121 /*
2086 * If we filled the seq_file buffer earlier, we 2122 * If we filled the seq_file buffer earlier, we
@@ -2166,15 +2202,20 @@ __tracing_open(struct inode *inode, struct file *file)
2166 2202
2167 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2203 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2168 for_each_tracing_cpu(cpu) { 2204 for_each_tracing_cpu(cpu) {
2169
2170 iter->buffer_iter[cpu] = 2205 iter->buffer_iter[cpu] =
2171 ring_buffer_read_start(iter->tr->buffer, cpu); 2206 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2207 }
2208 ring_buffer_read_prepare_sync();
2209 for_each_tracing_cpu(cpu) {
2210 ring_buffer_read_start(iter->buffer_iter[cpu]);
2172 tracing_iter_reset(iter, cpu); 2211 tracing_iter_reset(iter, cpu);
2173 } 2212 }
2174 } else { 2213 } else {
2175 cpu = iter->cpu_file; 2214 cpu = iter->cpu_file;
2176 iter->buffer_iter[cpu] = 2215 iter->buffer_iter[cpu] =
2177 ring_buffer_read_start(iter->tr->buffer, cpu); 2216 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2217 ring_buffer_read_prepare_sync();
2218 ring_buffer_read_start(iter->buffer_iter[cpu]);
2178 tracing_iter_reset(iter, cpu); 2219 tracing_iter_reset(iter, cpu);
2179 } 2220 }
2180 2221
@@ -3269,12 +3310,12 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3269 size_t len, 3310 size_t len,
3270 unsigned int flags) 3311 unsigned int flags)
3271{ 3312{
3272 struct page *pages[PIPE_BUFFERS]; 3313 struct page *pages_def[PIPE_DEF_BUFFERS];
3273 struct partial_page partial[PIPE_BUFFERS]; 3314 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3274 struct trace_iterator *iter = filp->private_data; 3315 struct trace_iterator *iter = filp->private_data;
3275 struct splice_pipe_desc spd = { 3316 struct splice_pipe_desc spd = {
3276 .pages = pages, 3317 .pages = pages_def,
3277 .partial = partial, 3318 .partial = partial_def,
3278 .nr_pages = 0, /* This gets updated below. */ 3319 .nr_pages = 0, /* This gets updated below. */
3279 .flags = flags, 3320 .flags = flags,
3280 .ops = &tracing_pipe_buf_ops, 3321 .ops = &tracing_pipe_buf_ops,
@@ -3285,6 +3326,9 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3285 size_t rem; 3326 size_t rem;
3286 unsigned int i; 3327 unsigned int i;
3287 3328
3329 if (splice_grow_spd(pipe, &spd))
3330 return -ENOMEM;
3331
3288 /* copy the tracer to avoid using a global lock all around */ 3332 /* copy the tracer to avoid using a global lock all around */
3289 mutex_lock(&trace_types_lock); 3333 mutex_lock(&trace_types_lock);
3290 if (unlikely(old_tracer != current_trace && current_trace)) { 3334 if (unlikely(old_tracer != current_trace && current_trace)) {
@@ -3315,23 +3359,23 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3315 trace_access_lock(iter->cpu_file); 3359 trace_access_lock(iter->cpu_file);
3316 3360
3317 /* Fill as many pages as possible. */ 3361 /* Fill as many pages as possible. */
3318 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { 3362 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3319 pages[i] = alloc_page(GFP_KERNEL); 3363 spd.pages[i] = alloc_page(GFP_KERNEL);
3320 if (!pages[i]) 3364 if (!spd.pages[i])
3321 break; 3365 break;
3322 3366
3323 rem = tracing_fill_pipe_page(rem, iter); 3367 rem = tracing_fill_pipe_page(rem, iter);
3324 3368
3325 /* Copy the data into the page, so we can start over. */ 3369 /* Copy the data into the page, so we can start over. */
3326 ret = trace_seq_to_buffer(&iter->seq, 3370 ret = trace_seq_to_buffer(&iter->seq,
3327 page_address(pages[i]), 3371 page_address(spd.pages[i]),
3328 iter->seq.len); 3372 iter->seq.len);
3329 if (ret < 0) { 3373 if (ret < 0) {
3330 __free_page(pages[i]); 3374 __free_page(spd.pages[i]);
3331 break; 3375 break;
3332 } 3376 }
3333 partial[i].offset = 0; 3377 spd.partial[i].offset = 0;
3334 partial[i].len = iter->seq.len; 3378 spd.partial[i].len = iter->seq.len;
3335 3379
3336 trace_seq_init(&iter->seq); 3380 trace_seq_init(&iter->seq);
3337 } 3381 }
@@ -3342,12 +3386,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3342 3386
3343 spd.nr_pages = i; 3387 spd.nr_pages = i;
3344 3388
3345 return splice_to_pipe(pipe, &spd); 3389 ret = splice_to_pipe(pipe, &spd);
3390out:
3391 splice_shrink_spd(pipe, &spd);
3392 return ret;
3346 3393
3347out_err: 3394out_err:
3348 mutex_unlock(&iter->mutex); 3395 mutex_unlock(&iter->mutex);
3349 3396 goto out;
3350 return ret;
3351} 3397}
3352 3398
3353static ssize_t 3399static ssize_t
@@ -3620,7 +3666,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3620 size_t count, loff_t *ppos) 3666 size_t count, loff_t *ppos)
3621{ 3667{
3622 struct ftrace_buffer_info *info = filp->private_data; 3668 struct ftrace_buffer_info *info = filp->private_data;
3623 unsigned int pos;
3624 ssize_t ret; 3669 ssize_t ret;
3625 size_t size; 3670 size_t size;
3626 3671
@@ -3647,11 +3692,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3647 if (ret < 0) 3692 if (ret < 0)
3648 return 0; 3693 return 0;
3649 3694
3650 pos = ring_buffer_page_len(info->spare);
3651
3652 if (pos < PAGE_SIZE)
3653 memset(info->spare + pos, 0, PAGE_SIZE - pos);
3654
3655read: 3695read:
3656 size = PAGE_SIZE - info->read; 3696 size = PAGE_SIZE - info->read;
3657 if (size > count) 3697 if (size > count)
@@ -3746,11 +3786,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3746 unsigned int flags) 3786 unsigned int flags)
3747{ 3787{
3748 struct ftrace_buffer_info *info = file->private_data; 3788 struct ftrace_buffer_info *info = file->private_data;
3749 struct partial_page partial[PIPE_BUFFERS]; 3789 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3750 struct page *pages[PIPE_BUFFERS]; 3790 struct page *pages_def[PIPE_DEF_BUFFERS];
3751 struct splice_pipe_desc spd = { 3791 struct splice_pipe_desc spd = {
3752 .pages = pages, 3792 .pages = pages_def,
3753 .partial = partial, 3793 .partial = partial_def,
3754 .flags = flags, 3794 .flags = flags,
3755 .ops = &buffer_pipe_buf_ops, 3795 .ops = &buffer_pipe_buf_ops,
3756 .spd_release = buffer_spd_release, 3796 .spd_release = buffer_spd_release,
@@ -3759,22 +3799,28 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3759 int entries, size, i; 3799 int entries, size, i;
3760 size_t ret; 3800 size_t ret;
3761 3801
3802 if (splice_grow_spd(pipe, &spd))
3803 return -ENOMEM;
3804
3762 if (*ppos & (PAGE_SIZE - 1)) { 3805 if (*ppos & (PAGE_SIZE - 1)) {
3763 WARN_ONCE(1, "Ftrace: previous read must page-align\n"); 3806 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
3764 return -EINVAL; 3807 ret = -EINVAL;
3808 goto out;
3765 } 3809 }
3766 3810
3767 if (len & (PAGE_SIZE - 1)) { 3811 if (len & (PAGE_SIZE - 1)) {
3768 WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); 3812 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
3769 if (len < PAGE_SIZE) 3813 if (len < PAGE_SIZE) {
3770 return -EINVAL; 3814 ret = -EINVAL;
3815 goto out;
3816 }
3771 len &= PAGE_MASK; 3817 len &= PAGE_MASK;
3772 } 3818 }
3773 3819
3774 trace_access_lock(info->cpu); 3820 trace_access_lock(info->cpu);
3775 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 3821 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3776 3822
3777 for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { 3823 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
3778 struct page *page; 3824 struct page *page;
3779 int r; 3825 int r;
3780 3826
@@ -3829,11 +3875,12 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3829 else 3875 else
3830 ret = 0; 3876 ret = 0;
3831 /* TODO: block */ 3877 /* TODO: block */
3832 return ret; 3878 goto out;
3833 } 3879 }
3834 3880
3835 ret = splice_to_pipe(pipe, &spd); 3881 ret = splice_to_pipe(pipe, &spd);
3836 3882 splice_shrink_spd(pipe, &spd);
3883out:
3837 return ret; 3884 return ret;
3838} 3885}
3839 3886
@@ -4324,7 +4371,7 @@ static int trace_panic_handler(struct notifier_block *this,
4324 unsigned long event, void *unused) 4371 unsigned long event, void *unused)
4325{ 4372{
4326 if (ftrace_dump_on_oops) 4373 if (ftrace_dump_on_oops)
4327 ftrace_dump(); 4374 ftrace_dump(ftrace_dump_on_oops);
4328 return NOTIFY_OK; 4375 return NOTIFY_OK;
4329} 4376}
4330 4377
@@ -4341,7 +4388,7 @@ static int trace_die_handler(struct notifier_block *self,
4341 switch (val) { 4388 switch (val) {
4342 case DIE_OOPS: 4389 case DIE_OOPS:
4343 if (ftrace_dump_on_oops) 4390 if (ftrace_dump_on_oops)
4344 ftrace_dump(); 4391 ftrace_dump(ftrace_dump_on_oops);
4345 break; 4392 break;
4346 default: 4393 default:
4347 break; 4394 break;
@@ -4382,7 +4429,8 @@ trace_printk_seq(struct trace_seq *s)
4382 trace_seq_init(s); 4429 trace_seq_init(s);
4383} 4430}
4384 4431
4385static void __ftrace_dump(bool disable_tracing) 4432static void
4433__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4386{ 4434{
4387 static arch_spinlock_t ftrace_dump_lock = 4435 static arch_spinlock_t ftrace_dump_lock =
4388 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 4436 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -4415,12 +4463,25 @@ static void __ftrace_dump(bool disable_tracing)
4415 /* don't look at user memory in panic mode */ 4463 /* don't look at user memory in panic mode */
4416 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4464 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4417 4465
4418 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4419
4420 /* Simulate the iterator */ 4466 /* Simulate the iterator */
4421 iter.tr = &global_trace; 4467 iter.tr = &global_trace;
4422 iter.trace = current_trace; 4468 iter.trace = current_trace;
4423 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4469
4470 switch (oops_dump_mode) {
4471 case DUMP_ALL:
4472 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4473 break;
4474 case DUMP_ORIG:
4475 iter.cpu_file = raw_smp_processor_id();
4476 break;
4477 case DUMP_NONE:
4478 goto out_enable;
4479 default:
4480 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4481 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4482 }
4483
4484 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4424 4485
4425 /* 4486 /*
4426 * We need to stop all tracing on all CPUS to read the 4487 * We need to stop all tracing on all CPUS to read the
@@ -4459,6 +4520,7 @@ static void __ftrace_dump(bool disable_tracing)
4459 else 4520 else
4460 printk(KERN_TRACE "---------------------------------\n"); 4521 printk(KERN_TRACE "---------------------------------\n");
4461 4522
4523 out_enable:
4462 /* Re-enable tracing if requested */ 4524 /* Re-enable tracing if requested */
4463 if (!disable_tracing) { 4525 if (!disable_tracing) {
4464 trace_flags |= old_userobj; 4526 trace_flags |= old_userobj;
@@ -4475,9 +4537,9 @@ static void __ftrace_dump(bool disable_tracing)
4475} 4537}
4476 4538
4477/* By default: disable tracing after the dump */ 4539/* By default: disable tracing after the dump */
4478void ftrace_dump(void) 4540void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4479{ 4541{
4480 __ftrace_dump(true); 4542 __ftrace_dump(true, oops_dump_mode);
4481} 4543}
4482 4544
4483__init static int tracer_alloc_buffers(void) 4545__init static int tracer_alloc_buffers(void)