aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ftrace.txt6
-rw-r--r--arch/Kconfig1
-rw-r--r--include/linux/ftrace.h58
-rw-r--r--include/linux/kernel.h58
-rw-r--r--kernel/trace/Kconfig29
-rw-r--r--kernel/trace/blktrace.c1
-rw-r--r--kernel/trace/ring_buffer.c6
-rw-r--r--kernel/trace/trace.c91
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_branch.c1
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_output.c32
-rw-r--r--kernel/trace/trace_output.h1
-rw-r--r--kernel/trace/trace_sched_wakeup.c8
14 files changed, 133 insertions, 172 deletions
diff --git a/Documentation/ftrace.txt b/Documentation/ftrace.txt
index 2041ee951c1a..22614bef6359 100644
--- a/Documentation/ftrace.txt
+++ b/Documentation/ftrace.txt
@@ -1466,11 +1466,11 @@ want, depending on your needs.
1466 1466
1467 1467
1468You can put some comments on specific functions by using 1468You can put some comments on specific functions by using
1469ftrace_printk() For example, if you want to put a comment inside 1469trace_printk() For example, if you want to put a comment inside
1470the __might_sleep() function, you just have to include 1470the __might_sleep() function, you just have to include
1471<linux/ftrace.h> and call ftrace_printk() inside __might_sleep() 1471<linux/ftrace.h> and call trace_printk() inside __might_sleep()
1472 1472
1473ftrace_printk("I'm a comment!\n") 1473trace_printk("I'm a comment!\n")
1474 1474
1475will produce: 1475will produce:
1476 1476
diff --git a/arch/Kconfig b/arch/Kconfig
index 550dab22daa1..a092dc77c24d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -6,6 +6,7 @@ config OPROFILE
6 tristate "OProfile system profiling (EXPERIMENTAL)" 6 tristate "OProfile system profiling (EXPERIMENTAL)"
7 depends on PROFILING 7 depends on PROFILING
8 depends on HAVE_OPROFILE 8 depends on HAVE_OPROFILE
9 depends on TRACING_SUPPORT
9 select TRACING 10 select TRACING
10 select RING_BUFFER 11 select RING_BUFFER
11 help 12 help
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6ea62acbe4b6..498769425eb2 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -319,62 +319,6 @@ static inline void __ftrace_enabled_restore(int enabled)
319# define trace_preempt_off(a0, a1) do { } while (0) 319# define trace_preempt_off(a0, a1) do { } while (0)
320#endif 320#endif
321 321
322#ifdef CONFIG_TRACING
323extern int ftrace_dump_on_oops;
324
325extern void tracing_start(void);
326extern void tracing_stop(void);
327extern void ftrace_off_permanent(void);
328
329extern void
330ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
331
332/**
333 * ftrace_printk - printf formatting in the ftrace buffer
334 * @fmt: the printf format for printing
335 *
336 * Note: __ftrace_printk is an internal function for ftrace_printk and
337 * the @ip is passed in via the ftrace_printk macro.
338 *
339 * This function allows a kernel developer to debug fast path sections
340 * that printk is not appropriate for. By scattering in various
341 * printk like tracing in the code, a developer can quickly see
342 * where problems are occurring.
343 *
344 * This is intended as a debugging tool for the developer only.
345 * Please refrain from leaving ftrace_printks scattered around in
346 * your code.
347 */
348# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
349extern int
350__ftrace_printk(unsigned long ip, const char *fmt, ...)
351 __attribute__ ((format (printf, 2, 3)));
352# define ftrace_vprintk(fmt, ap) __ftrace_printk(_THIS_IP_, fmt, ap)
353extern int
354__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
355extern void ftrace_dump(void);
356#else
357static inline void
358ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
359static inline int
360ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
361
362static inline void tracing_start(void) { }
363static inline void tracing_stop(void) { }
364static inline void ftrace_off_permanent(void) { }
365static inline int
366ftrace_printk(const char *fmt, ...)
367{
368 return 0;
369}
370static inline int
371ftrace_vprintk(const char *fmt, va_list ap)
372{
373 return 0;
374}
375static inline void ftrace_dump(void) { }
376#endif
377
378#ifdef CONFIG_FTRACE_MCOUNT_RECORD 322#ifdef CONFIG_FTRACE_MCOUNT_RECORD
379extern void ftrace_init(void); 323extern void ftrace_init(void);
380extern void ftrace_init_module(struct module *mod, 324extern void ftrace_init_module(struct module *mod,
@@ -543,6 +487,8 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
543 return tsk->trace & TSK_TRACE_FL_GRAPH; 487 return tsk->trace & TSK_TRACE_FL_GRAPH;
544} 488}
545 489
490extern int ftrace_dump_on_oops;
491
546#endif /* CONFIG_TRACING */ 492#endif /* CONFIG_TRACING */
547 493
548 494
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7fa371898e3e..08bf5da86676 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -368,6 +368,64 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
368#endif 368#endif
369 369
370/* 370/*
371 * General tracing related utility functions - trace_printk(),
372 * tracing_start()/tracing_stop:
373 */
374#ifdef CONFIG_TRACING
375extern void tracing_start(void);
376extern void tracing_stop(void);
377extern void ftrace_off_permanent(void);
378
379extern void
380ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
381
382/**
383 * trace_printk - printf formatting in the ftrace buffer
384 * @fmt: the printf format for printing
385 *
386 * Note: __trace_printk is an internal function for trace_printk and
387 * the @ip is passed in via the trace_printk macro.
388 *
389 * This function allows a kernel developer to debug fast path sections
390 * that printk is not appropriate for. By scattering in various
391 * printk like tracing in the code, a developer can quickly see
392 * where problems are occurring.
393 *
394 * This is intended as a debugging tool for the developer only.
395 * Please refrain from leaving trace_printks scattered around in
396 * your code.
397 */
398# define trace_printk(fmt...) __trace_printk(_THIS_IP_, fmt)
399extern int
400__trace_printk(unsigned long ip, const char *fmt, ...)
401 __attribute__ ((format (printf, 2, 3)));
402# define ftrace_vprintk(fmt, ap) __trace_printk(_THIS_IP_, fmt, ap)
403extern int
404__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
405extern void ftrace_dump(void);
406#else
407static inline void
408ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
409static inline int
410trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
411
412static inline void tracing_start(void) { }
413static inline void tracing_stop(void) { }
414static inline void ftrace_off_permanent(void) { }
415static inline int
416trace_printk(const char *fmt, ...)
417{
418 return 0;
419}
420static inline int
421ftrace_vprintk(const char *fmt, va_list ap)
422{
423 return 0;
424}
425static inline void ftrace_dump(void) { }
426#endif
427
428/*
371 * Display an IP address in readable format. 429 * Display an IP address in readable format.
372 */ 430 */
373 431
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 999c6a2485df..5d733da5345a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -53,12 +53,22 @@ config TRACING
53 select TRACEPOINTS 53 select TRACEPOINTS
54 select NOP_TRACER 54 select NOP_TRACER
55 55
56#
57# Minimum requirements an architecture has to meet for us to
58# be able to offer generic tracing facilities:
59#
60config TRACING_SUPPORT
61 bool
62 depends on TRACE_IRQFLAGS_SUPPORT
63 depends on STACKTRACE_SUPPORT
64
65if TRACING_SUPPORT
66
56menu "Tracers" 67menu "Tracers"
57 68
58config FUNCTION_TRACER 69config FUNCTION_TRACER
59 bool "Kernel Function Tracer" 70 bool "Kernel Function Tracer"
60 depends on HAVE_FUNCTION_TRACER 71 depends on HAVE_FUNCTION_TRACER
61 depends on DEBUG_KERNEL
62 select FRAME_POINTER 72 select FRAME_POINTER
63 select KALLSYMS 73 select KALLSYMS
64 select TRACING 74 select TRACING
@@ -91,7 +101,6 @@ config IRQSOFF_TRACER
91 default n 101 default n
92 depends on TRACE_IRQFLAGS_SUPPORT 102 depends on TRACE_IRQFLAGS_SUPPORT
93 depends on GENERIC_TIME 103 depends on GENERIC_TIME
94 depends on DEBUG_KERNEL
95 select TRACE_IRQFLAGS 104 select TRACE_IRQFLAGS
96 select TRACING 105 select TRACING
97 select TRACER_MAX_TRACE 106 select TRACER_MAX_TRACE
@@ -114,7 +123,6 @@ config PREEMPT_TRACER
114 default n 123 default n
115 depends on GENERIC_TIME 124 depends on GENERIC_TIME
116 depends on PREEMPT 125 depends on PREEMPT
117 depends on DEBUG_KERNEL
118 select TRACING 126 select TRACING
119 select TRACER_MAX_TRACE 127 select TRACER_MAX_TRACE
120 help 128 help
@@ -142,7 +150,6 @@ config SYSPROF_TRACER
142 150
143config SCHED_TRACER 151config SCHED_TRACER
144 bool "Scheduling Latency Tracer" 152 bool "Scheduling Latency Tracer"
145 depends on DEBUG_KERNEL
146 select TRACING 153 select TRACING
147 select CONTEXT_SWITCH_TRACER 154 select CONTEXT_SWITCH_TRACER
148 select TRACER_MAX_TRACE 155 select TRACER_MAX_TRACE
@@ -152,7 +159,6 @@ config SCHED_TRACER
152 159
153config CONTEXT_SWITCH_TRACER 160config CONTEXT_SWITCH_TRACER
154 bool "Trace process context switches" 161 bool "Trace process context switches"
155 depends on DEBUG_KERNEL
156 select TRACING 162 select TRACING
157 select MARKERS 163 select MARKERS
158 help 164 help
@@ -161,7 +167,6 @@ config CONTEXT_SWITCH_TRACER
161 167
162config EVENT_TRACER 168config EVENT_TRACER
163 bool "Trace various events in the kernel" 169 bool "Trace various events in the kernel"
164 depends on DEBUG_KERNEL
165 select TRACING 170 select TRACING
166 help 171 help
167 This tracer hooks to various trace points in the kernel 172 This tracer hooks to various trace points in the kernel
@@ -170,7 +175,6 @@ config EVENT_TRACER
170 175
171config BOOT_TRACER 176config BOOT_TRACER
172 bool "Trace boot initcalls" 177 bool "Trace boot initcalls"
173 depends on DEBUG_KERNEL
174 select TRACING 178 select TRACING
175 select CONTEXT_SWITCH_TRACER 179 select CONTEXT_SWITCH_TRACER
176 help 180 help
@@ -188,7 +192,6 @@ config BOOT_TRACER
188 192
189config TRACE_BRANCH_PROFILING 193config TRACE_BRANCH_PROFILING
190 bool "Trace likely/unlikely profiler" 194 bool "Trace likely/unlikely profiler"
191 depends on DEBUG_KERNEL
192 select TRACING 195 select TRACING
193 help 196 help
194 This tracer profiles all the the likely and unlikely macros 197 This tracer profiles all the the likely and unlikely macros
@@ -241,7 +244,6 @@ config BRANCH_TRACER
241 244
242config POWER_TRACER 245config POWER_TRACER
243 bool "Trace power consumption behavior" 246 bool "Trace power consumption behavior"
244 depends on DEBUG_KERNEL
245 depends on X86 247 depends on X86
246 select TRACING 248 select TRACING
247 help 249 help
@@ -253,7 +255,6 @@ config POWER_TRACER
253config STACK_TRACER 255config STACK_TRACER
254 bool "Trace max stack" 256 bool "Trace max stack"
255 depends on HAVE_FUNCTION_TRACER 257 depends on HAVE_FUNCTION_TRACER
256 depends on DEBUG_KERNEL
257 select FUNCTION_TRACER 258 select FUNCTION_TRACER
258 select STACKTRACE 259 select STACKTRACE
259 select KALLSYMS 260 select KALLSYMS
@@ -343,7 +344,6 @@ config DYNAMIC_FTRACE
343 bool "enable/disable ftrace tracepoints dynamically" 344 bool "enable/disable ftrace tracepoints dynamically"
344 depends on FUNCTION_TRACER 345 depends on FUNCTION_TRACER
345 depends on HAVE_DYNAMIC_FTRACE 346 depends on HAVE_DYNAMIC_FTRACE
346 depends on DEBUG_KERNEL
347 default y 347 default y
348 help 348 help
349 This option will modify all the calls to ftrace dynamically 349 This option will modify all the calls to ftrace dynamically
@@ -369,7 +369,7 @@ config FTRACE_SELFTEST
369 369
370config FTRACE_STARTUP_TEST 370config FTRACE_STARTUP_TEST
371 bool "Perform a startup test on ftrace" 371 bool "Perform a startup test on ftrace"
372 depends on TRACING && DEBUG_KERNEL 372 depends on TRACING
373 select FTRACE_SELFTEST 373 select FTRACE_SELFTEST
374 help 374 help
375 This option performs a series of startup tests on ftrace. On bootup 375 This option performs a series of startup tests on ftrace. On bootup
@@ -379,7 +379,7 @@ config FTRACE_STARTUP_TEST
379 379
380config MMIOTRACE 380config MMIOTRACE
381 bool "Memory mapped IO tracing" 381 bool "Memory mapped IO tracing"
382 depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI 382 depends on HAVE_MMIOTRACE_SUPPORT && PCI
383 select TRACING 383 select TRACING
384 help 384 help
385 Mmiotrace traces Memory Mapped I/O access and is meant for 385 Mmiotrace traces Memory Mapped I/O access and is meant for
@@ -401,3 +401,6 @@ config MMIOTRACE_TEST
401 Say N, unless you absolutely know what you are doing. 401 Say N, unless you absolutely know what you are doing.
402 402
403endmenu 403endmenu
404
405endif # TRACING_SUPPORT
406
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index e82cb9e930cc..e39679a72a3b 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1231,7 +1231,6 @@ static struct tracer blk_tracer __read_mostly = {
1231static struct trace_event trace_blk_event = { 1231static struct trace_event trace_blk_event = {
1232 .type = TRACE_BLK, 1232 .type = TRACE_BLK,
1233 .trace = blk_trace_event_print, 1233 .trace = blk_trace_event_print,
1234 .latency_trace = blk_trace_event_print,
1235 .binary = blk_trace_event_print_binary, 1234 .binary = blk_trace_event_print_binary,
1236}; 1235};
1237 1236
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f2a163db52f9..f7473645b9c6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2461,6 +2461,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2461 unsigned long flags; 2461 unsigned long flags;
2462 unsigned int commit; 2462 unsigned int commit;
2463 unsigned int read; 2463 unsigned int read;
2464 u64 save_timestamp;
2464 int ret = -1; 2465 int ret = -1;
2465 2466
2466 /* 2467 /*
@@ -2515,6 +2516,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2515 if (len < size) 2516 if (len < size)
2516 goto out; 2517 goto out;
2517 2518
2519 /* save the current timestamp, since the user will need it */
2520 save_timestamp = cpu_buffer->read_stamp;
2521
2518 /* Need to copy one event at a time */ 2522 /* Need to copy one event at a time */
2519 do { 2523 do {
2520 memcpy(bpage->data + pos, rpage->data + rpos, size); 2524 memcpy(bpage->data + pos, rpage->data + rpos, size);
@@ -2531,7 +2535,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2531 2535
2532 /* update bpage */ 2536 /* update bpage */
2533 local_set(&bpage->commit, pos); 2537 local_set(&bpage->commit, pos);
2534 bpage->time_stamp = rpage->time_stamp; 2538 bpage->time_stamp = save_timestamp;
2535 2539
2536 /* we copied everything to the beginning */ 2540 /* we copied everything to the beginning */
2537 read = 0; 2541 read = 0;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c8abbb0c8397..c0e9c1263393 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -48,7 +48,7 @@ unsigned long __read_mostly tracing_thresh;
48 * We need to change this state when a selftest is running. 48 * We need to change this state when a selftest is running.
49 * A selftest will lurk into the ring-buffer to count the 49 * A selftest will lurk into the ring-buffer to count the
50 * entries inserted during the selftest although some concurrent 50 * entries inserted during the selftest although some concurrent
51 * insertions into the ring-buffer such as ftrace_printk could occurred 51 * insertions into the ring-buffer such as trace_printk could occurred
52 * at the same time, giving false positive or negative results. 52 * at the same time, giving false positive or negative results.
53 */ 53 */
54static bool __read_mostly tracing_selftest_running; 54static bool __read_mostly tracing_selftest_running;
@@ -291,7 +291,7 @@ static const char *trace_options[] = {
291 "block", 291 "block",
292 "stacktrace", 292 "stacktrace",
293 "sched-tree", 293 "sched-tree",
294 "ftrace_printk", 294 "trace_printk",
295 "ftrace_preempt", 295 "ftrace_preempt",
296 "branch", 296 "branch",
297 "annotate", 297 "annotate",
@@ -299,6 +299,7 @@ static const char *trace_options[] = {
299 "sym-userobj", 299 "sym-userobj",
300 "printk-msg-only", 300 "printk-msg-only",
301 "context-info", 301 "context-info",
302 "latency-format",
302 NULL 303 NULL
303}; 304};
304 305
@@ -346,6 +347,9 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
346 int len; 347 int len;
347 int ret; 348 int ret;
348 349
350 if (!cnt)
351 return 0;
352
349 if (s->len <= s->readpos) 353 if (s->len <= s->readpos)
350 return -EBUSY; 354 return -EBUSY;
351 355
@@ -353,10 +357,12 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
353 if (cnt > len) 357 if (cnt > len)
354 cnt = len; 358 cnt = len;
355 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 359 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
356 if (ret) 360 if (ret == cnt)
357 return -EFAULT; 361 return -EFAULT;
358 362
359 s->readpos += len; 363 cnt -= ret;
364
365 s->readpos += cnt;
360 return cnt; 366 return cnt;
361} 367}
362 368
@@ -375,7 +381,7 @@ ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
375 if (!ret) 381 if (!ret)
376 return -EFAULT; 382 return -EFAULT;
377 383
378 s->readpos += len; 384 s->readpos += cnt;
379 return cnt; 385 return cnt;
380} 386}
381 387
@@ -1462,33 +1468,6 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1462 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1468 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1463} 1469}
1464 1470
1465static enum print_line_t print_lat_fmt(struct trace_iterator *iter)
1466{
1467 struct trace_seq *s = &iter->seq;
1468 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1469 struct trace_event *event;
1470 struct trace_entry *entry = iter->ent;
1471
1472 test_cpu_buff_start(iter);
1473
1474 event = ftrace_find_event(entry->type);
1475
1476 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1477 if (!trace_print_lat_context(iter))
1478 goto partial;
1479 }
1480
1481 if (event)
1482 return event->latency_trace(iter, sym_flags);
1483
1484 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1485 goto partial;
1486
1487 return TRACE_TYPE_HANDLED;
1488partial:
1489 return TRACE_TYPE_PARTIAL_LINE;
1490}
1491
1492static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 1471static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1493{ 1472{
1494 struct trace_seq *s = &iter->seq; 1473 struct trace_seq *s = &iter->seq;
@@ -1503,8 +1482,13 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1503 event = ftrace_find_event(entry->type); 1482 event = ftrace_find_event(entry->type);
1504 1483
1505 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 1484 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1506 if (!trace_print_context(iter)) 1485 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1507 goto partial; 1486 if (!trace_print_lat_context(iter))
1487 goto partial;
1488 } else {
1489 if (!trace_print_context(iter))
1490 goto partial;
1491 }
1508 } 1492 }
1509 1493
1510 if (event) 1494 if (event)
@@ -1646,9 +1630,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
1646 if (trace_flags & TRACE_ITER_RAW) 1630 if (trace_flags & TRACE_ITER_RAW)
1647 return print_raw_fmt(iter); 1631 return print_raw_fmt(iter);
1648 1632
1649 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1650 return print_lat_fmt(iter);
1651
1652 return print_trace_fmt(iter); 1633 return print_trace_fmt(iter);
1653} 1634}
1654 1635
@@ -1824,26 +1805,12 @@ static int tracing_open(struct inode *inode, struct file *file)
1824 iter = __tracing_open(inode, file); 1805 iter = __tracing_open(inode, file);
1825 if (IS_ERR(iter)) 1806 if (IS_ERR(iter))
1826 ret = PTR_ERR(iter); 1807 ret = PTR_ERR(iter);
1827 1808 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
1828 return ret;
1829}
1830
1831static int tracing_lt_open(struct inode *inode, struct file *file)
1832{
1833 struct trace_iterator *iter;
1834 int ret = 0;
1835
1836 iter = __tracing_open(inode, file);
1837
1838 if (IS_ERR(iter))
1839 ret = PTR_ERR(iter);
1840 else
1841 iter->iter_flags |= TRACE_FILE_LAT_FMT; 1809 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1842 1810
1843 return ret; 1811 return ret;
1844} 1812}
1845 1813
1846
1847static void * 1814static void *
1848t_next(struct seq_file *m, void *v, loff_t *pos) 1815t_next(struct seq_file *m, void *v, loff_t *pos)
1849{ 1816{
@@ -1922,13 +1889,6 @@ static struct file_operations tracing_fops = {
1922 .release = tracing_release, 1889 .release = tracing_release,
1923}; 1890};
1924 1891
1925static struct file_operations tracing_lt_fops = {
1926 .open = tracing_lt_open,
1927 .read = seq_read,
1928 .llseek = seq_lseek,
1929 .release = tracing_release,
1930};
1931
1932static struct file_operations show_traces_fops = { 1892static struct file_operations show_traces_fops = {
1933 .open = show_traces_open, 1893 .open = show_traces_open,
1934 .read = seq_read, 1894 .read = seq_read,
@@ -3049,6 +3009,9 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3049 ssize_t ret; 3009 ssize_t ret;
3050 size_t size; 3010 size_t size;
3051 3011
3012 if (!count)
3013 return 0;
3014
3052 /* Do we have previous read data to read? */ 3015 /* Do we have previous read data to read? */
3053 if (info->read < PAGE_SIZE) 3016 if (info->read < PAGE_SIZE)
3054 goto read; 3017 goto read;
@@ -3073,8 +3036,10 @@ read:
3073 size = count; 3036 size = count;
3074 3037
3075 ret = copy_to_user(ubuf, info->spare + info->read, size); 3038 ret = copy_to_user(ubuf, info->spare + info->read, size);
3076 if (ret) 3039 if (ret == size)
3077 return -EFAULT; 3040 return -EFAULT;
3041 size -= ret;
3042
3078 *ppos += size; 3043 *ppos += size;
3079 info->read += size; 3044 info->read += size;
3080 3045
@@ -3803,7 +3768,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3803} 3768}
3804EXPORT_SYMBOL_GPL(trace_vprintk); 3769EXPORT_SYMBOL_GPL(trace_vprintk);
3805 3770
3806int __ftrace_printk(unsigned long ip, const char *fmt, ...) 3771int __trace_printk(unsigned long ip, const char *fmt, ...)
3807{ 3772{
3808 int ret; 3773 int ret;
3809 va_list ap; 3774 va_list ap;
@@ -3816,7 +3781,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3816 va_end(ap); 3781 va_end(ap);
3817 return ret; 3782 return ret;
3818} 3783}
3819EXPORT_SYMBOL_GPL(__ftrace_printk); 3784EXPORT_SYMBOL_GPL(__trace_printk);
3820 3785
3821int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) 3786int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
3822{ 3787{
@@ -3918,8 +3883,10 @@ void ftrace_dump(void)
3918 3883
3919 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 3884 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3920 3885
3886 /* Simulate the iterator */
3921 iter.tr = &global_trace; 3887 iter.tr = &global_trace;
3922 iter.trace = current_trace; 3888 iter.trace = current_trace;
3889 iter.cpu_file = TRACE_PIPE_ALL_CPU;
3923 3890
3924 /* 3891 /*
3925 * We need to stop all tracing on all CPUS to read the 3892 * We need to stop all tracing on all CPUS to read the
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 561bb5c5d988..8beff03fda68 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -115,7 +115,7 @@ struct userstack_entry {
115}; 115};
116 116
117/* 117/*
118 * ftrace_printk entry: 118 * trace_printk entry:
119 */ 119 */
120struct print_entry { 120struct print_entry {
121 struct trace_entry ent; 121 struct trace_entry ent;
@@ -651,7 +651,8 @@ enum trace_iterator_flags {
651 TRACE_ITER_USERSTACKTRACE = 0x4000, 651 TRACE_ITER_USERSTACKTRACE = 0x4000,
652 TRACE_ITER_SYM_USEROBJ = 0x8000, 652 TRACE_ITER_SYM_USEROBJ = 0x8000,
653 TRACE_ITER_PRINTK_MSGONLY = 0x10000, 653 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
654 TRACE_ITER_CONTEXT_INFO = 0x20000 /* Print pid/cpu/time */ 654 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
655 TRACE_ITER_LATENCY_FMT = 0x40000,
655}; 656};
656 657
657/* 658/*
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index c2e68d440c4d..aaa0755268b9 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -159,7 +159,6 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter,
159static struct trace_event trace_branch_event = { 159static struct trace_event trace_branch_event = {
160 .type = TRACE_BRANCH, 160 .type = TRACE_BRANCH,
161 .trace = trace_branch_print, 161 .trace = trace_branch_print,
162 .latency_trace = trace_branch_print,
163}; 162};
164 163
165static struct tracer branch_trace __read_mostly = 164static struct tracer branch_trace __read_mostly =
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 9e5ebd844158..b923d13e2fad 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -32,6 +32,8 @@ enum {
32 32
33static int trace_type __read_mostly; 33static int trace_type __read_mostly;
34 34
35static int save_lat_flag;
36
35#ifdef CONFIG_PREEMPT_TRACER 37#ifdef CONFIG_PREEMPT_TRACER
36static inline int 38static inline int
37preempt_trace(void) 39preempt_trace(void)
@@ -370,6 +372,9 @@ static void stop_irqsoff_tracer(struct trace_array *tr)
370 372
371static void __irqsoff_tracer_init(struct trace_array *tr) 373static void __irqsoff_tracer_init(struct trace_array *tr)
372{ 374{
375 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
376 trace_flags |= TRACE_ITER_LATENCY_FMT;
377
373 tracing_max_latency = 0; 378 tracing_max_latency = 0;
374 irqsoff_trace = tr; 379 irqsoff_trace = tr;
375 /* make sure that the tracer is visible */ 380 /* make sure that the tracer is visible */
@@ -380,6 +385,9 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
380static void irqsoff_tracer_reset(struct trace_array *tr) 385static void irqsoff_tracer_reset(struct trace_array *tr)
381{ 386{
382 stop_irqsoff_tracer(tr); 387 stop_irqsoff_tracer(tr);
388
389 if (!save_lat_flag)
390 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
383} 391}
384 392
385static void irqsoff_tracer_start(struct trace_array *tr) 393static void irqsoff_tracer_start(struct trace_array *tr)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 9fc815031b09..306fef84c503 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -437,8 +437,6 @@ int register_ftrace_event(struct trace_event *event)
437 437
438 if (event->trace == NULL) 438 if (event->trace == NULL)
439 event->trace = trace_nop_print; 439 event->trace = trace_nop_print;
440 if (event->latency_trace == NULL)
441 event->latency_trace = trace_nop_print;
442 if (event->raw == NULL) 440 if (event->raw == NULL)
443 event->raw = trace_nop_print; 441 event->raw = trace_nop_print;
444 if (event->hex == NULL) 442 if (event->hex == NULL)
@@ -480,29 +478,6 @@ enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
480} 478}
481 479
482/* TRACE_FN */ 480/* TRACE_FN */
483static enum print_line_t trace_fn_latency(struct trace_iterator *iter,
484 int flags)
485{
486 struct ftrace_entry *field;
487 struct trace_seq *s = &iter->seq;
488
489 trace_assign_type(field, iter->ent);
490
491 if (!seq_print_ip_sym(s, field->ip, flags))
492 goto partial;
493 if (!trace_seq_puts(s, " ("))
494 goto partial;
495 if (!seq_print_ip_sym(s, field->parent_ip, flags))
496 goto partial;
497 if (!trace_seq_puts(s, ")\n"))
498 goto partial;
499
500 return TRACE_TYPE_HANDLED;
501
502 partial:
503 return TRACE_TYPE_PARTIAL_LINE;
504}
505
506static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags) 481static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
507{ 482{
508 struct ftrace_entry *field; 483 struct ftrace_entry *field;
@@ -573,7 +548,6 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
573static struct trace_event trace_fn_event = { 548static struct trace_event trace_fn_event = {
574 .type = TRACE_FN, 549 .type = TRACE_FN,
575 .trace = trace_fn_trace, 550 .trace = trace_fn_trace,
576 .latency_trace = trace_fn_latency,
577 .raw = trace_fn_raw, 551 .raw = trace_fn_raw,
578 .hex = trace_fn_hex, 552 .hex = trace_fn_hex,
579 .binary = trace_fn_bin, 553 .binary = trace_fn_bin,
@@ -705,7 +679,6 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
705static struct trace_event trace_ctx_event = { 679static struct trace_event trace_ctx_event = {
706 .type = TRACE_CTX, 680 .type = TRACE_CTX,
707 .trace = trace_ctx_print, 681 .trace = trace_ctx_print,
708 .latency_trace = trace_ctx_print,
709 .raw = trace_ctx_raw, 682 .raw = trace_ctx_raw,
710 .hex = trace_ctx_hex, 683 .hex = trace_ctx_hex,
711 .binary = trace_ctxwake_bin, 684 .binary = trace_ctxwake_bin,
@@ -714,7 +687,6 @@ static struct trace_event trace_ctx_event = {
714static struct trace_event trace_wake_event = { 687static struct trace_event trace_wake_event = {
715 .type = TRACE_WAKE, 688 .type = TRACE_WAKE,
716 .trace = trace_wake_print, 689 .trace = trace_wake_print,
717 .latency_trace = trace_wake_print,
718 .raw = trace_wake_raw, 690 .raw = trace_wake_raw,
719 .hex = trace_wake_hex, 691 .hex = trace_wake_hex,
720 .binary = trace_ctxwake_bin, 692 .binary = trace_ctxwake_bin,
@@ -770,7 +742,6 @@ static enum print_line_t trace_special_bin(struct trace_iterator *iter,
770static struct trace_event trace_special_event = { 742static struct trace_event trace_special_event = {
771 .type = TRACE_SPECIAL, 743 .type = TRACE_SPECIAL,
772 .trace = trace_special_print, 744 .trace = trace_special_print,
773 .latency_trace = trace_special_print,
774 .raw = trace_special_print, 745 .raw = trace_special_print,
775 .hex = trace_special_hex, 746 .hex = trace_special_hex,
776 .binary = trace_special_bin, 747 .binary = trace_special_bin,
@@ -808,7 +779,6 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
808static struct trace_event trace_stack_event = { 779static struct trace_event trace_stack_event = {
809 .type = TRACE_STACK, 780 .type = TRACE_STACK,
810 .trace = trace_stack_print, 781 .trace = trace_stack_print,
811 .latency_trace = trace_stack_print,
812 .raw = trace_special_print, 782 .raw = trace_special_print,
813 .hex = trace_special_hex, 783 .hex = trace_special_hex,
814 .binary = trace_special_bin, 784 .binary = trace_special_bin,
@@ -838,7 +808,6 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
838static struct trace_event trace_user_stack_event = { 808static struct trace_event trace_user_stack_event = {
839 .type = TRACE_USER_STACK, 809 .type = TRACE_USER_STACK,
840 .trace = trace_user_stack_print, 810 .trace = trace_user_stack_print,
841 .latency_trace = trace_user_stack_print,
842 .raw = trace_special_print, 811 .raw = trace_special_print,
843 .hex = trace_special_hex, 812 .hex = trace_special_hex,
844 .binary = trace_special_bin, 813 .binary = trace_special_bin,
@@ -883,7 +852,6 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
883static struct trace_event trace_print_event = { 852static struct trace_event trace_print_event = {
884 .type = TRACE_PRINT, 853 .type = TRACE_PRINT,
885 .trace = trace_print_print, 854 .trace = trace_print_print,
886 .latency_trace = trace_print_print,
887 .raw = trace_print_raw, 855 .raw = trace_print_raw,
888}; 856};
889 857
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 551a25a72217..8a34d688ed63 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -10,7 +10,6 @@ struct trace_event {
10 struct hlist_node node; 10 struct hlist_node node;
11 int type; 11 int type;
12 trace_print_func trace; 12 trace_print_func trace;
13 trace_print_func latency_trace;
14 trace_print_func raw; 13 trace_print_func raw;
15 trace_print_func hex; 14 trace_print_func hex;
16 trace_print_func binary; 15 trace_print_func binary;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index db55f7aaa640..3c5ad6b2ec84 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -32,6 +32,8 @@ static raw_spinlock_t wakeup_lock =
32 32
33static void __wakeup_reset(struct trace_array *tr); 33static void __wakeup_reset(struct trace_array *tr);
34 34
35static int save_lat_flag;
36
35#ifdef CONFIG_FUNCTION_TRACER 37#ifdef CONFIG_FUNCTION_TRACER
36/* 38/*
37 * irqsoff uses its own tracer function to keep the overhead down: 39 * irqsoff uses its own tracer function to keep the overhead down:
@@ -324,6 +326,9 @@ static void stop_wakeup_tracer(struct trace_array *tr)
324 326
325static int __wakeup_tracer_init(struct trace_array *tr) 327static int __wakeup_tracer_init(struct trace_array *tr)
326{ 328{
329 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
330 trace_flags |= TRACE_ITER_LATENCY_FMT;
331
327 tracing_max_latency = 0; 332 tracing_max_latency = 0;
328 wakeup_trace = tr; 333 wakeup_trace = tr;
329 start_wakeup_tracer(tr); 334 start_wakeup_tracer(tr);
@@ -347,6 +352,9 @@ static void wakeup_tracer_reset(struct trace_array *tr)
347 stop_wakeup_tracer(tr); 352 stop_wakeup_tracer(tr);
348 /* make sure we put back any tasks we are tracing */ 353 /* make sure we put back any tasks we are tracing */
349 wakeup_reset(tr); 354 wakeup_reset(tr);
355
356 if (!save_lat_flag)
357 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
350} 358}
351 359
352static void wakeup_tracer_start(struct trace_array *tr) 360static void wakeup_tracer_start(struct trace_array *tr)