aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c178
1 files changed, 133 insertions, 45 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ed01fdba4a55..756d7283318b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -33,10 +33,10 @@
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/rwsem.h> 35#include <linux/rwsem.h>
36#include <linux/slab.h>
36#include <linux/ctype.h> 37#include <linux/ctype.h>
37#include <linux/init.h> 38#include <linux/init.h>
38#include <linux/poll.h> 39#include <linux/poll.h>
39#include <linux/gfp.h>
40#include <linux/fs.h> 40#include <linux/fs.h>
41 41
42#include "trace.h" 42#include "trace.h"
@@ -117,9 +117,12 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
117 * 117 *
118 * It is default off, but you can enable it with either specifying 118 * It is default off, but you can enable it with either specifying
119 * "ftrace_dump_on_oops" in the kernel command line, or setting 119 * "ftrace_dump_on_oops" in the kernel command line, or setting
120 * /proc/sys/kernel/ftrace_dump_on_oops to true. 120 * /proc/sys/kernel/ftrace_dump_on_oops
121 * Set 1 if you want to dump buffers of all CPUs
122 * Set 2 if you want to dump the buffer of the CPU that triggered oops
121 */ 123 */
122int ftrace_dump_on_oops; 124
125enum ftrace_dump_mode ftrace_dump_on_oops;
123 126
124static int tracing_set_tracer(const char *buf); 127static int tracing_set_tracer(const char *buf);
125 128
@@ -139,8 +142,17 @@ __setup("ftrace=", set_cmdline_ftrace);
139 142
140static int __init set_ftrace_dump_on_oops(char *str) 143static int __init set_ftrace_dump_on_oops(char *str)
141{ 144{
142 ftrace_dump_on_oops = 1; 145 if (*str++ != '=' || !*str) {
143 return 1; 146 ftrace_dump_on_oops = DUMP_ALL;
147 return 1;
148 }
149
150 if (!strcmp("orig_cpu", str)) {
151 ftrace_dump_on_oops = DUMP_ORIG;
152 return 1;
153 }
154
155 return 0;
144} 156}
145__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 157__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
146 158
@@ -374,6 +386,21 @@ static int __init set_buf_size(char *str)
374} 386}
375__setup("trace_buf_size=", set_buf_size); 387__setup("trace_buf_size=", set_buf_size);
376 388
389static int __init set_tracing_thresh(char *str)
390{
391 unsigned long threshhold;
392 int ret;
393
394 if (!str)
395 return 0;
396 ret = strict_strtoul(str, 0, &threshhold);
397 if (ret < 0)
398 return 0;
399 tracing_thresh = threshhold * 1000;
400 return 1;
401}
402__setup("tracing_thresh=", set_tracing_thresh);
403
377unsigned long nsecs_to_usecs(unsigned long nsecs) 404unsigned long nsecs_to_usecs(unsigned long nsecs)
378{ 405{
379 return nsecs / 1000; 406 return nsecs / 1000;
@@ -579,9 +606,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
579static arch_spinlock_t ftrace_max_lock = 606static arch_spinlock_t ftrace_max_lock =
580 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 607 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
581 608
609unsigned long __read_mostly tracing_thresh;
610
582#ifdef CONFIG_TRACER_MAX_TRACE 611#ifdef CONFIG_TRACER_MAX_TRACE
583unsigned long __read_mostly tracing_max_latency; 612unsigned long __read_mostly tracing_max_latency;
584unsigned long __read_mostly tracing_thresh;
585 613
586/* 614/*
587 * Copy the new maximum trace into the separate maximum-trace 615 * Copy the new maximum trace into the separate maximum-trace
@@ -592,7 +620,7 @@ static void
592__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 620__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
593{ 621{
594 struct trace_array_cpu *data = tr->data[cpu]; 622 struct trace_array_cpu *data = tr->data[cpu];
595 struct trace_array_cpu *max_data = tr->data[cpu]; 623 struct trace_array_cpu *max_data;
596 624
597 max_tr.cpu = cpu; 625 max_tr.cpu = cpu;
598 max_tr.time_start = data->preempt_timestamp; 626 max_tr.time_start = data->preempt_timestamp;
@@ -602,7 +630,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
602 max_data->critical_start = data->critical_start; 630 max_data->critical_start = data->critical_start;
603 max_data->critical_end = data->critical_end; 631 max_data->critical_end = data->critical_end;
604 632
605 memcpy(data->comm, tsk->comm, TASK_COMM_LEN); 633 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
606 max_data->pid = tsk->pid; 634 max_data->pid = tsk->pid;
607 max_data->uid = task_uid(tsk); 635 max_data->uid = task_uid(tsk);
608 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 636 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
@@ -824,10 +852,10 @@ out:
824 mutex_unlock(&trace_types_lock); 852 mutex_unlock(&trace_types_lock);
825} 853}
826 854
827static void __tracing_reset(struct trace_array *tr, int cpu) 855static void __tracing_reset(struct ring_buffer *buffer, int cpu)
828{ 856{
829 ftrace_disable_cpu(); 857 ftrace_disable_cpu();
830 ring_buffer_reset_cpu(tr->buffer, cpu); 858 ring_buffer_reset_cpu(buffer, cpu);
831 ftrace_enable_cpu(); 859 ftrace_enable_cpu();
832} 860}
833 861
@@ -839,7 +867,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
839 867
840 /* Make sure all commits have finished */ 868 /* Make sure all commits have finished */
841 synchronize_sched(); 869 synchronize_sched();
842 __tracing_reset(tr, cpu); 870 __tracing_reset(buffer, cpu);
843 871
844 ring_buffer_record_enable(buffer); 872 ring_buffer_record_enable(buffer);
845} 873}
@@ -857,7 +885,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
857 tr->time_start = ftrace_now(tr->cpu); 885 tr->time_start = ftrace_now(tr->cpu);
858 886
859 for_each_online_cpu(cpu) 887 for_each_online_cpu(cpu)
860 __tracing_reset(tr, cpu); 888 __tracing_reset(buffer, cpu);
861 889
862 ring_buffer_record_enable(buffer); 890 ring_buffer_record_enable(buffer);
863} 891}
@@ -934,6 +962,8 @@ void tracing_start(void)
934 goto out; 962 goto out;
935 } 963 }
936 964
965 /* Prevent the buffers from switching */
966 arch_spin_lock(&ftrace_max_lock);
937 967
938 buffer = global_trace.buffer; 968 buffer = global_trace.buffer;
939 if (buffer) 969 if (buffer)
@@ -943,6 +973,8 @@ void tracing_start(void)
943 if (buffer) 973 if (buffer)
944 ring_buffer_record_enable(buffer); 974 ring_buffer_record_enable(buffer);
945 975
976 arch_spin_unlock(&ftrace_max_lock);
977
946 ftrace_start(); 978 ftrace_start();
947 out: 979 out:
948 spin_unlock_irqrestore(&tracing_start_lock, flags); 980 spin_unlock_irqrestore(&tracing_start_lock, flags);
@@ -964,6 +996,9 @@ void tracing_stop(void)
964 if (trace_stop_count++) 996 if (trace_stop_count++)
965 goto out; 997 goto out;
966 998
999 /* Prevent the buffers from switching */
1000 arch_spin_lock(&ftrace_max_lock);
1001
967 buffer = global_trace.buffer; 1002 buffer = global_trace.buffer;
968 if (buffer) 1003 if (buffer)
969 ring_buffer_record_disable(buffer); 1004 ring_buffer_record_disable(buffer);
@@ -972,6 +1007,8 @@ void tracing_stop(void)
972 if (buffer) 1007 if (buffer)
973 ring_buffer_record_disable(buffer); 1008 ring_buffer_record_disable(buffer);
974 1009
1010 arch_spin_unlock(&ftrace_max_lock);
1011
975 out: 1012 out:
976 spin_unlock_irqrestore(&tracing_start_lock, flags); 1013 spin_unlock_irqrestore(&tracing_start_lock, flags);
977} 1014}
@@ -1259,6 +1296,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1259 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1296 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1260 return; 1297 return;
1261 1298
1299 /*
1300 * NMIs can not handle page faults, even with fix ups.
1301 * The save user stack can (and often does) fault.
1302 */
1303 if (unlikely(in_nmi()))
1304 return;
1305
1262 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1306 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1263 sizeof(*entry), flags, pc); 1307 sizeof(*entry), flags, pc);
1264 if (!event) 1308 if (!event)
@@ -1513,7 +1557,8 @@ static void trace_iterator_increment(struct trace_iterator *iter)
1513} 1557}
1514 1558
1515static struct trace_entry * 1559static struct trace_entry *
1516peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) 1560peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1561 unsigned long *lost_events)
1517{ 1562{
1518 struct ring_buffer_event *event; 1563 struct ring_buffer_event *event;
1519 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; 1564 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
@@ -1524,7 +1569,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1524 if (buf_iter) 1569 if (buf_iter)
1525 event = ring_buffer_iter_peek(buf_iter, ts); 1570 event = ring_buffer_iter_peek(buf_iter, ts);
1526 else 1571 else
1527 event = ring_buffer_peek(iter->tr->buffer, cpu, ts); 1572 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1573 lost_events);
1528 1574
1529 ftrace_enable_cpu(); 1575 ftrace_enable_cpu();
1530 1576
@@ -1532,10 +1578,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1532} 1578}
1533 1579
1534static struct trace_entry * 1580static struct trace_entry *
1535__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) 1581__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1582 unsigned long *missing_events, u64 *ent_ts)
1536{ 1583{
1537 struct ring_buffer *buffer = iter->tr->buffer; 1584 struct ring_buffer *buffer = iter->tr->buffer;
1538 struct trace_entry *ent, *next = NULL; 1585 struct trace_entry *ent, *next = NULL;
1586 unsigned long lost_events = 0, next_lost = 0;
1539 int cpu_file = iter->cpu_file; 1587 int cpu_file = iter->cpu_file;
1540 u64 next_ts = 0, ts; 1588 u64 next_ts = 0, ts;
1541 int next_cpu = -1; 1589 int next_cpu = -1;
@@ -1548,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1548 if (cpu_file > TRACE_PIPE_ALL_CPU) { 1596 if (cpu_file > TRACE_PIPE_ALL_CPU) {
1549 if (ring_buffer_empty_cpu(buffer, cpu_file)) 1597 if (ring_buffer_empty_cpu(buffer, cpu_file))
1550 return NULL; 1598 return NULL;
1551 ent = peek_next_entry(iter, cpu_file, ent_ts); 1599 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1552 if (ent_cpu) 1600 if (ent_cpu)
1553 *ent_cpu = cpu_file; 1601 *ent_cpu = cpu_file;
1554 1602
@@ -1560,7 +1608,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1560 if (ring_buffer_empty_cpu(buffer, cpu)) 1608 if (ring_buffer_empty_cpu(buffer, cpu))
1561 continue; 1609 continue;
1562 1610
1563 ent = peek_next_entry(iter, cpu, &ts); 1611 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1564 1612
1565 /* 1613 /*
1566 * Pick the entry with the smallest timestamp: 1614 * Pick the entry with the smallest timestamp:
@@ -1569,6 +1617,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1569 next = ent; 1617 next = ent;
1570 next_cpu = cpu; 1618 next_cpu = cpu;
1571 next_ts = ts; 1619 next_ts = ts;
1620 next_lost = lost_events;
1572 } 1621 }
1573 } 1622 }
1574 1623
@@ -1578,6 +1627,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1578 if (ent_ts) 1627 if (ent_ts)
1579 *ent_ts = next_ts; 1628 *ent_ts = next_ts;
1580 1629
1630 if (missing_events)
1631 *missing_events = next_lost;
1632
1581 return next; 1633 return next;
1582} 1634}
1583 1635
@@ -1585,13 +1637,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1585struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 1637struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1586 int *ent_cpu, u64 *ent_ts) 1638 int *ent_cpu, u64 *ent_ts)
1587{ 1639{
1588 return __find_next_entry(iter, ent_cpu, ent_ts); 1640 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1589} 1641}
1590 1642
1591/* Find the next real entry, and increment the iterator to the next entry */ 1643/* Find the next real entry, and increment the iterator to the next entry */
1592static void *find_next_entry_inc(struct trace_iterator *iter) 1644static void *find_next_entry_inc(struct trace_iterator *iter)
1593{ 1645{
1594 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1646 iter->ent = __find_next_entry(iter, &iter->cpu,
1647 &iter->lost_events, &iter->ts);
1595 1648
1596 if (iter->ent) 1649 if (iter->ent)
1597 trace_iterator_increment(iter); 1650 trace_iterator_increment(iter);
@@ -1603,7 +1656,8 @@ static void trace_consume(struct trace_iterator *iter)
1603{ 1656{
1604 /* Don't allow ftrace to trace into the ring buffers */ 1657 /* Don't allow ftrace to trace into the ring buffers */
1605 ftrace_disable_cpu(); 1658 ftrace_disable_cpu();
1606 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); 1659 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1660 &iter->lost_events);
1607 ftrace_enable_cpu(); 1661 ftrace_enable_cpu();
1608} 1662}
1609 1663
@@ -1703,6 +1757,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1703 1757
1704 ftrace_enable_cpu(); 1758 ftrace_enable_cpu();
1705 1759
1760 iter->leftover = 0;
1706 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1761 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1707 ; 1762 ;
1708 1763
@@ -1753,7 +1808,7 @@ static void print_func_help_header(struct seq_file *m)
1753} 1808}
1754 1809
1755 1810
1756static void 1811void
1757print_trace_header(struct seq_file *m, struct trace_iterator *iter) 1812print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1758{ 1813{
1759 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1814 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1962,7 +2017,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1962 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; 2017 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
1963} 2018}
1964 2019
1965static int trace_empty(struct trace_iterator *iter) 2020int trace_empty(struct trace_iterator *iter)
1966{ 2021{
1967 int cpu; 2022 int cpu;
1968 2023
@@ -1997,6 +2052,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1997{ 2052{
1998 enum print_line_t ret; 2053 enum print_line_t ret;
1999 2054
2055 if (iter->lost_events)
2056 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2057 iter->cpu, iter->lost_events);
2058
2000 if (iter->trace && iter->trace->print_line) { 2059 if (iter->trace && iter->trace->print_line) {
2001 ret = iter->trace->print_line(iter); 2060 ret = iter->trace->print_line(iter);
2002 if (ret != TRACE_TYPE_UNHANDLED) 2061 if (ret != TRACE_TYPE_UNHANDLED)
@@ -2025,6 +2084,23 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
2025 return print_trace_fmt(iter); 2084 return print_trace_fmt(iter);
2026} 2085}
2027 2086
2087void trace_default_header(struct seq_file *m)
2088{
2089 struct trace_iterator *iter = m->private;
2090
2091 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2092 /* print nothing if the buffers are empty */
2093 if (trace_empty(iter))
2094 return;
2095 print_trace_header(m, iter);
2096 if (!(trace_flags & TRACE_ITER_VERBOSE))
2097 print_lat_help_header(m);
2098 } else {
2099 if (!(trace_flags & TRACE_ITER_VERBOSE))
2100 print_func_help_header(m);
2101 }
2102}
2103
2028static int s_show(struct seq_file *m, void *v) 2104static int s_show(struct seq_file *m, void *v)
2029{ 2105{
2030 struct trace_iterator *iter = v; 2106 struct trace_iterator *iter = v;
@@ -2037,17 +2113,9 @@ static int s_show(struct seq_file *m, void *v)
2037 } 2113 }
2038 if (iter->trace && iter->trace->print_header) 2114 if (iter->trace && iter->trace->print_header)
2039 iter->trace->print_header(m); 2115 iter->trace->print_header(m);
2040 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2116 else
2041 /* print nothing if the buffers are empty */ 2117 trace_default_header(m);
2042 if (trace_empty(iter)) 2118
2043 return 0;
2044 print_trace_header(m, iter);
2045 if (!(trace_flags & TRACE_ITER_VERBOSE))
2046 print_lat_help_header(m);
2047 } else {
2048 if (!(trace_flags & TRACE_ITER_VERBOSE))
2049 print_func_help_header(m);
2050 }
2051 } else if (iter->leftover) { 2119 } else if (iter->leftover) {
2052 /* 2120 /*
2053 * If we filled the seq_file buffer earlier, we 2121 * If we filled the seq_file buffer earlier, we
@@ -2133,15 +2201,20 @@ __tracing_open(struct inode *inode, struct file *file)
2133 2201
2134 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2202 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2135 for_each_tracing_cpu(cpu) { 2203 for_each_tracing_cpu(cpu) {
2136
2137 iter->buffer_iter[cpu] = 2204 iter->buffer_iter[cpu] =
2138 ring_buffer_read_start(iter->tr->buffer, cpu); 2205 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2206 }
2207 ring_buffer_read_prepare_sync();
2208 for_each_tracing_cpu(cpu) {
2209 ring_buffer_read_start(iter->buffer_iter[cpu]);
2139 tracing_iter_reset(iter, cpu); 2210 tracing_iter_reset(iter, cpu);
2140 } 2211 }
2141 } else { 2212 } else {
2142 cpu = iter->cpu_file; 2213 cpu = iter->cpu_file;
2143 iter->buffer_iter[cpu] = 2214 iter->buffer_iter[cpu] =
2144 ring_buffer_read_start(iter->tr->buffer, cpu); 2215 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2216 ring_buffer_read_prepare_sync();
2217 ring_buffer_read_start(iter->buffer_iter[cpu]);
2145 tracing_iter_reset(iter, cpu); 2218 tracing_iter_reset(iter, cpu);
2146 } 2219 }
2147 2220
@@ -4248,10 +4321,10 @@ static __init int tracer_init_debugfs(void)
4248#ifdef CONFIG_TRACER_MAX_TRACE 4321#ifdef CONFIG_TRACER_MAX_TRACE
4249 trace_create_file("tracing_max_latency", 0644, d_tracer, 4322 trace_create_file("tracing_max_latency", 0644, d_tracer,
4250 &tracing_max_latency, &tracing_max_lat_fops); 4323 &tracing_max_latency, &tracing_max_lat_fops);
4324#endif
4251 4325
4252 trace_create_file("tracing_thresh", 0644, d_tracer, 4326 trace_create_file("tracing_thresh", 0644, d_tracer,
4253 &tracing_thresh, &tracing_max_lat_fops); 4327 &tracing_thresh, &tracing_max_lat_fops);
4254#endif
4255 4328
4256 trace_create_file("README", 0444, d_tracer, 4329 trace_create_file("README", 0444, d_tracer,
4257 NULL, &tracing_readme_fops); 4330 NULL, &tracing_readme_fops);
@@ -4291,7 +4364,7 @@ static int trace_panic_handler(struct notifier_block *this,
4291 unsigned long event, void *unused) 4364 unsigned long event, void *unused)
4292{ 4365{
4293 if (ftrace_dump_on_oops) 4366 if (ftrace_dump_on_oops)
4294 ftrace_dump(); 4367 ftrace_dump(ftrace_dump_on_oops);
4295 return NOTIFY_OK; 4368 return NOTIFY_OK;
4296} 4369}
4297 4370
@@ -4308,7 +4381,7 @@ static int trace_die_handler(struct notifier_block *self,
4308 switch (val) { 4381 switch (val) {
4309 case DIE_OOPS: 4382 case DIE_OOPS:
4310 if (ftrace_dump_on_oops) 4383 if (ftrace_dump_on_oops)
4311 ftrace_dump(); 4384 ftrace_dump(ftrace_dump_on_oops);
4312 break; 4385 break;
4313 default: 4386 default:
4314 break; 4387 break;
@@ -4349,7 +4422,8 @@ trace_printk_seq(struct trace_seq *s)
4349 trace_seq_init(s); 4422 trace_seq_init(s);
4350} 4423}
4351 4424
4352static void __ftrace_dump(bool disable_tracing) 4425static void
4426__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4353{ 4427{
4354 static arch_spinlock_t ftrace_dump_lock = 4428 static arch_spinlock_t ftrace_dump_lock =
4355 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 4429 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -4382,12 +4456,25 @@ static void __ftrace_dump(bool disable_tracing)
4382 /* don't look at user memory in panic mode */ 4456 /* don't look at user memory in panic mode */
4383 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4457 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4384 4458
4385 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4386
4387 /* Simulate the iterator */ 4459 /* Simulate the iterator */
4388 iter.tr = &global_trace; 4460 iter.tr = &global_trace;
4389 iter.trace = current_trace; 4461 iter.trace = current_trace;
4390 iter.cpu_file = TRACE_PIPE_ALL_CPU; 4462
4463 switch (oops_dump_mode) {
4464 case DUMP_ALL:
4465 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4466 break;
4467 case DUMP_ORIG:
4468 iter.cpu_file = raw_smp_processor_id();
4469 break;
4470 case DUMP_NONE:
4471 goto out_enable;
4472 default:
4473 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4474 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4475 }
4476
4477 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4391 4478
4392 /* 4479 /*
4393 * We need to stop all tracing on all CPUS to read the 4480 * We need to stop all tracing on all CPUS to read the
@@ -4426,6 +4513,7 @@ static void __ftrace_dump(bool disable_tracing)
4426 else 4513 else
4427 printk(KERN_TRACE "---------------------------------\n"); 4514 printk(KERN_TRACE "---------------------------------\n");
4428 4515
4516 out_enable:
4429 /* Re-enable tracing if requested */ 4517 /* Re-enable tracing if requested */
4430 if (!disable_tracing) { 4518 if (!disable_tracing) {
4431 trace_flags |= old_userobj; 4519 trace_flags |= old_userobj;
@@ -4442,9 +4530,9 @@ static void __ftrace_dump(bool disable_tracing)
4442} 4530}
4443 4531
4444/* By default: disable tracing after the dump */ 4532/* By default: disable tracing after the dump */
4445void ftrace_dump(void) 4533void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4446{ 4534{
4447 __ftrace_dump(true); 4535 __ftrace_dump(true, oops_dump_mode);
4448} 4536}
4449 4537
4450__init static int tracer_alloc_buffers(void) 4538__init static int tracer_alloc_buffers(void)