aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/blktrace.c13
-rw-r--r--kernel/trace/ftrace.c84
-rw-r--r--kernel/trace/ring_buffer.c26
-rw-r--r--kernel/trace/trace.c37
-rw-r--r--kernel/trace/trace.h11
-rw-r--r--kernel/trace/trace_event_profile.c2
-rw-r--r--kernel/trace/trace_event_types.h3
-rw-r--r--kernel/trace/trace_events.c32
-rw-r--r--kernel/trace/trace_events_filter.c20
-rw-r--r--kernel/trace/trace_functions.c5
-rw-r--r--kernel/trace/trace_functions_graph.c11
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/trace/trace_printk.c28
-rw-r--r--kernel/trace/trace_stack.c11
-rw-r--r--kernel/trace/trace_stat.c40
16 files changed, 185 insertions, 147 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 1551f47e7669..019f380fd764 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -226,13 +226,13 @@ config BOOT_TRACER
226 the timings of the initcalls and traces key events and the identity 226 the timings of the initcalls and traces key events and the identity
227 of tasks that can cause boot delays, such as context-switches. 227 of tasks that can cause boot delays, such as context-switches.
228 228
229 Its aim is to be parsed by the /scripts/bootgraph.pl tool to 229 Its aim is to be parsed by the scripts/bootgraph.pl tool to
230 produce pretty graphics about boot inefficiencies, giving a visual 230 produce pretty graphics about boot inefficiencies, giving a visual
231 representation of the delays during initcalls - but the raw 231 representation of the delays during initcalls - but the raw
232 /debug/tracing/trace text output is readable too. 232 /debug/tracing/trace text output is readable too.
233 233
234 You must pass in ftrace=initcall to the kernel command line 234 You must pass in initcall_debug and ftrace=initcall to the kernel
235 to enable this on bootup. 235 command line to enable this on bootup.
236 236
237config TRACE_BRANCH_PROFILING 237config TRACE_BRANCH_PROFILING
238 bool 238 bool
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 39af8af6fc30..7a34cb563fec 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/smp_lock.h>
25#include <linux/time.h> 26#include <linux/time.h>
26#include <linux/uaccess.h> 27#include <linux/uaccess.h>
27 28
@@ -266,8 +267,8 @@ static void blk_trace_free(struct blk_trace *bt)
266{ 267{
267 debugfs_remove(bt->msg_file); 268 debugfs_remove(bt->msg_file);
268 debugfs_remove(bt->dropped_file); 269 debugfs_remove(bt->dropped_file);
269 debugfs_remove(bt->dir);
270 relay_close(bt->rchan); 270 relay_close(bt->rchan);
271 debugfs_remove(bt->dir);
271 free_percpu(bt->sequence); 272 free_percpu(bt->sequence);
272 free_percpu(bt->msg_data); 273 free_percpu(bt->msg_data);
273 kfree(bt); 274 kfree(bt);
@@ -377,18 +378,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
377 378
378static int blk_remove_buf_file_callback(struct dentry *dentry) 379static int blk_remove_buf_file_callback(struct dentry *dentry)
379{ 380{
380 struct dentry *parent = dentry->d_parent;
381 debugfs_remove(dentry); 381 debugfs_remove(dentry);
382 382
383 /*
384 * this will fail for all but the last file, but that is ok. what we
385 * care about is the top level buts->name directory going away, when
386 * the last trace file is gone. Then we don't have to rmdir() that
387 * manually on trace stop, so it nicely solves the issue with
388 * force killing of running traces.
389 */
390
391 debugfs_remove(parent);
392 return 0; 383 return 0;
393} 384}
394 385
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3718d55fb4c3..1e1d23c26308 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx)
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
292 292
293 again: 293 again:
294 rec++; 294 if (idx != 0)
295 rec++;
296
295 if ((void *)rec >= (void *)&pg->records[pg->index]) { 297 if ((void *)rec >= (void *)&pg->records[pg->index]) {
296 pg = pg->next; 298 pg = pg->next;
297 if (!pg) 299 if (!pg)
@@ -766,7 +768,7 @@ static struct tracer_stat function_stats __initdata = {
766 .stat_show = function_stat_show 768 .stat_show = function_stat_show
767}; 769};
768 770
769static void ftrace_profile_debugfs(struct dentry *d_tracer) 771static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
770{ 772{
771 struct ftrace_profile_stat *stat; 773 struct ftrace_profile_stat *stat;
772 struct dentry *entry; 774 struct dentry *entry;
@@ -784,7 +786,6 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
784 * The files created are permanent, if something happens 786 * The files created are permanent, if something happens
785 * we still do not free memory. 787 * we still do not free memory.
786 */ 788 */
787 kfree(stat);
788 WARN(1, 789 WARN(1,
789 "Could not allocate stat file for cpu %d\n", 790 "Could not allocate stat file for cpu %d\n",
790 cpu); 791 cpu);
@@ -811,7 +812,7 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer)
811} 812}
812 813
813#else /* CONFIG_FUNCTION_PROFILER */ 814#else /* CONFIG_FUNCTION_PROFILER */
814static void ftrace_profile_debugfs(struct dentry *d_tracer) 815static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
815{ 816{
816} 817}
817#endif /* CONFIG_FUNCTION_PROFILER */ 818#endif /* CONFIG_FUNCTION_PROFILER */
@@ -1417,10 +1418,20 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
1417{ 1418{
1418 struct ftrace_iterator *iter = m->private; 1419 struct ftrace_iterator *iter = m->private;
1419 void *p = NULL; 1420 void *p = NULL;
1421 loff_t l;
1422
1423 if (!(iter->flags & FTRACE_ITER_HASH))
1424 *pos = 0;
1420 1425
1421 iter->flags |= FTRACE_ITER_HASH; 1426 iter->flags |= FTRACE_ITER_HASH;
1422 1427
1423 return t_hash_next(m, p, pos); 1428 iter->hidx = 0;
1429 for (l = 0; l <= *pos; ) {
1430 p = t_hash_next(m, p, &l);
1431 if (!p)
1432 break;
1433 }
1434 return p;
1424} 1435}
1425 1436
1426static int t_hash_show(struct seq_file *m, void *v) 1437static int t_hash_show(struct seq_file *m, void *v)
@@ -1467,8 +1478,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
1467 iter->pg = iter->pg->next; 1478 iter->pg = iter->pg->next;
1468 iter->idx = 0; 1479 iter->idx = 0;
1469 goto retry; 1480 goto retry;
1470 } else {
1471 iter->idx = -1;
1472 } 1481 }
1473 } else { 1482 } else {
1474 rec = &iter->pg->records[iter->idx++]; 1483 rec = &iter->pg->records[iter->idx++];
@@ -1497,6 +1506,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1497{ 1506{
1498 struct ftrace_iterator *iter = m->private; 1507 struct ftrace_iterator *iter = m->private;
1499 void *p = NULL; 1508 void *p = NULL;
1509 loff_t l;
1500 1510
1501 mutex_lock(&ftrace_lock); 1511 mutex_lock(&ftrace_lock);
1502 /* 1512 /*
@@ -1508,23 +1518,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1508 if (*pos > 0) 1518 if (*pos > 0)
1509 return t_hash_start(m, pos); 1519 return t_hash_start(m, pos);
1510 iter->flags |= FTRACE_ITER_PRINTALL; 1520 iter->flags |= FTRACE_ITER_PRINTALL;
1511 (*pos)++;
1512 return iter; 1521 return iter;
1513 } 1522 }
1514 1523
1515 if (iter->flags & FTRACE_ITER_HASH) 1524 if (iter->flags & FTRACE_ITER_HASH)
1516 return t_hash_start(m, pos); 1525 return t_hash_start(m, pos);
1517 1526
1518 if (*pos > 0) { 1527 iter->pg = ftrace_pages_start;
1519 if (iter->idx < 0) 1528 iter->idx = 0;
1520 return p; 1529 for (l = 0; l <= *pos; ) {
1521 (*pos)--; 1530 p = t_next(m, p, &l);
1522 iter->idx--; 1531 if (!p)
1532 break;
1523 } 1533 }
1524 1534
1525 p = t_next(m, p, pos); 1535 if (!p && iter->flags & FTRACE_ITER_FILTER)
1526
1527 if (!p)
1528 return t_hash_start(m, pos); 1536 return t_hash_start(m, pos);
1529 1537
1530 return p; 1538 return p;
@@ -1654,7 +1662,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1654 1662
1655 mutex_lock(&ftrace_regex_lock); 1663 mutex_lock(&ftrace_regex_lock);
1656 if ((file->f_mode & FMODE_WRITE) && 1664 if ((file->f_mode & FMODE_WRITE) &&
1657 !(file->f_flags & O_APPEND)) 1665 (file->f_flags & O_TRUNC))
1658 ftrace_filter_reset(enable); 1666 ftrace_filter_reset(enable);
1659 1667
1660 if (file->f_mode & FMODE_READ) { 1668 if (file->f_mode & FMODE_READ) {
@@ -2500,32 +2508,31 @@ int ftrace_graph_count;
2500unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 2508unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2501 2509
2502static void * 2510static void *
2503g_next(struct seq_file *m, void *v, loff_t *pos) 2511__g_next(struct seq_file *m, loff_t *pos)
2504{ 2512{
2505 unsigned long *array = m->private; 2513 unsigned long *array = m->private;
2506 int index = *pos;
2507
2508 (*pos)++;
2509 2514
2510 if (index >= ftrace_graph_count) 2515 if (*pos >= ftrace_graph_count)
2511 return NULL; 2516 return NULL;
2517 return &array[*pos];
2518}
2512 2519
2513 return &array[index]; 2520static void *
2521g_next(struct seq_file *m, void *v, loff_t *pos)
2522{
2523 (*pos)++;
2524 return __g_next(m, pos);
2514} 2525}
2515 2526
2516static void *g_start(struct seq_file *m, loff_t *pos) 2527static void *g_start(struct seq_file *m, loff_t *pos)
2517{ 2528{
2518 void *p = NULL;
2519
2520 mutex_lock(&graph_lock); 2529 mutex_lock(&graph_lock);
2521 2530
2522 /* Nothing, tell g_show to print all functions are enabled */ 2531 /* Nothing, tell g_show to print all functions are enabled */
2523 if (!ftrace_graph_count && !*pos) 2532 if (!ftrace_graph_count && !*pos)
2524 return (void *)1; 2533 return (void *)1;
2525 2534
2526 p = g_next(m, p, pos); 2535 return __g_next(m, pos);
2527
2528 return p;
2529} 2536}
2530 2537
2531static void g_stop(struct seq_file *m, void *p) 2538static void g_stop(struct seq_file *m, void *p)
@@ -2570,7 +2577,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2570 2577
2571 mutex_lock(&graph_lock); 2578 mutex_lock(&graph_lock);
2572 if ((file->f_mode & FMODE_WRITE) && 2579 if ((file->f_mode & FMODE_WRITE) &&
2573 !(file->f_flags & O_APPEND)) { 2580 (file->f_flags & O_TRUNC)) {
2574 ftrace_graph_count = 0; 2581 ftrace_graph_count = 0;
2575 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2582 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2576 } 2583 }
@@ -2589,6 +2596,14 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2589} 2596}
2590 2597
2591static int 2598static int
2599ftrace_graph_release(struct inode *inode, struct file *file)
2600{
2601 if (file->f_mode & FMODE_READ)
2602 seq_release(inode, file);
2603 return 0;
2604}
2605
2606static int
2592ftrace_set_func(unsigned long *array, int *idx, char *buffer) 2607ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2593{ 2608{
2594 struct dyn_ftrace *rec; 2609 struct dyn_ftrace *rec;
@@ -2717,9 +2732,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2717} 2732}
2718 2733
2719static const struct file_operations ftrace_graph_fops = { 2734static const struct file_operations ftrace_graph_fops = {
2720 .open = ftrace_graph_open, 2735 .open = ftrace_graph_open,
2721 .read = seq_read, 2736 .read = seq_read,
2722 .write = ftrace_graph_write, 2737 .write = ftrace_graph_write,
2738 .release = ftrace_graph_release,
2723}; 2739};
2724#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2740#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2725 2741
@@ -3152,10 +3168,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
3152 3168
3153 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 3169 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
3154 3170
3155 if (ret || !write || (last_ftrace_enabled == ftrace_enabled)) 3171 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3156 goto out; 3172 goto out;
3157 3173
3158 last_ftrace_enabled = ftrace_enabled; 3174 last_ftrace_enabled = !!ftrace_enabled;
3159 3175
3160 if (ftrace_enabled) { 3176 if (ftrace_enabled) {
3161 3177
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 04dac2638258..a330513d96ce 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer)
735 735
736 put_online_cpus(); 736 put_online_cpus();
737 737
738 kfree(buffer->buffers);
738 free_cpumask_var(buffer->cpumask); 739 free_cpumask_var(buffer->cpumask);
739 740
740 kfree(buffer); 741 kfree(buffer);
@@ -1563,6 +1564,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1563 return NULL; 1564 return NULL;
1564} 1565}
1565 1566
1567#ifdef CONFIG_TRACING
1568
1566#define TRACE_RECURSIVE_DEPTH 16 1569#define TRACE_RECURSIVE_DEPTH 16
1567 1570
1568static int trace_recursive_lock(void) 1571static int trace_recursive_lock(void)
@@ -1593,6 +1596,13 @@ static void trace_recursive_unlock(void)
1593 current->trace_recursion--; 1596 current->trace_recursion--;
1594} 1597}
1595 1598
1599#else
1600
1601#define trace_recursive_lock() (0)
1602#define trace_recursive_unlock() do { } while (0)
1603
1604#endif
1605
1596static DEFINE_PER_CPU(int, rb_need_resched); 1606static DEFINE_PER_CPU(int, rb_need_resched);
1597 1607
1598/** 1608/**
@@ -1776,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
1776 */ 1786 */
1777 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 1787 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
1778 1788
1779 if (!rb_try_to_discard(cpu_buffer, event)) 1789 if (rb_try_to_discard(cpu_buffer, event))
1780 goto out; 1790 goto out;
1781 1791
1782 /* 1792 /*
@@ -2374,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2374 * the box. Return the padding, and we will release 2384 * the box. Return the padding, and we will release
2375 * the current locks, and try again. 2385 * the current locks, and try again.
2376 */ 2386 */
2377 rb_advance_reader(cpu_buffer);
2378 return event; 2387 return event;
2379 2388
2380 case RINGBUF_TYPE_TIME_EXTEND: 2389 case RINGBUF_TYPE_TIME_EXTEND:
@@ -2477,7 +2486,7 @@ static inline int rb_ok_to_lock(void)
2477 * buffer too. A one time deal is all you get from reading 2486 * buffer too. A one time deal is all you get from reading
2478 * the ring buffer from an NMI. 2487 * the ring buffer from an NMI.
2479 */ 2488 */
2480 if (likely(!in_nmi() && !oops_in_progress)) 2489 if (likely(!in_nmi()))
2481 return 1; 2490 return 1;
2482 2491
2483 tracing_off_permanent(); 2492 tracing_off_permanent();
@@ -2510,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2510 if (dolock) 2519 if (dolock)
2511 spin_lock(&cpu_buffer->reader_lock); 2520 spin_lock(&cpu_buffer->reader_lock);
2512 event = rb_buffer_peek(buffer, cpu, ts); 2521 event = rb_buffer_peek(buffer, cpu, ts);
2522 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2523 rb_advance_reader(cpu_buffer);
2513 if (dolock) 2524 if (dolock)
2514 spin_unlock(&cpu_buffer->reader_lock); 2525 spin_unlock(&cpu_buffer->reader_lock);
2515 local_irq_restore(flags); 2526 local_irq_restore(flags);
@@ -2581,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2581 spin_lock(&cpu_buffer->reader_lock); 2592 spin_lock(&cpu_buffer->reader_lock);
2582 2593
2583 event = rb_buffer_peek(buffer, cpu, ts); 2594 event = rb_buffer_peek(buffer, cpu, ts);
2584 if (!event) 2595 if (event)
2585 goto out_unlock; 2596 rb_advance_reader(cpu_buffer);
2586
2587 rb_advance_reader(cpu_buffer);
2588 2597
2589 out_unlock:
2590 if (dolock) 2598 if (dolock)
2591 spin_unlock(&cpu_buffer->reader_lock); 2599 spin_unlock(&cpu_buffer->reader_lock);
2592 local_irq_restore(flags); 2600 local_irq_restore(flags);
@@ -3104,6 +3112,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3104} 3112}
3105EXPORT_SYMBOL_GPL(ring_buffer_read_page); 3113EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3106 3114
3115#ifdef CONFIG_TRACING
3107static ssize_t 3116static ssize_t
3108rb_simple_read(struct file *filp, char __user *ubuf, 3117rb_simple_read(struct file *filp, char __user *ubuf,
3109 size_t cnt, loff_t *ppos) 3118 size_t cnt, loff_t *ppos)
@@ -3171,6 +3180,7 @@ static __init int rb_init_debugfs(void)
3171} 3180}
3172 3181
3173fs_initcall(rb_init_debugfs); 3182fs_initcall(rb_init_debugfs);
3183#endif
3174 3184
3175#ifdef CONFIG_HOTPLUG_CPU 3185#ifdef CONFIG_HOTPLUG_CPU
3176static int rb_cpu_notify(struct notifier_block *self, 3186static int rb_cpu_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 076fa6f0ee48..c22b40f8f576 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -17,6 +17,7 @@
17#include <linux/writeback.h> 17#include <linux/writeback.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/smp_lock.h>
20#include <linux/notifier.h> 21#include <linux/notifier.h>
21#include <linux/irqflags.h> 22#include <linux/irqflags.h>
22#include <linux/debugfs.h> 23#include <linux/debugfs.h>
@@ -284,13 +285,12 @@ void trace_wake_up(void)
284static int __init set_buf_size(char *str) 285static int __init set_buf_size(char *str)
285{ 286{
286 unsigned long buf_size; 287 unsigned long buf_size;
287 int ret;
288 288
289 if (!str) 289 if (!str)
290 return 0; 290 return 0;
291 ret = strict_strtoul(str, 0, &buf_size); 291 buf_size = memparse(str, &str);
292 /* nr_entries can not be zero */ 292 /* nr_entries can not be zero */
293 if (ret < 0 || buf_size == 0) 293 if (buf_size == 0)
294 return 0; 294 return 0;
295 trace_buf_size = buf_size; 295 trace_buf_size = buf_size;
296 return 1; 296 return 1;
@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
850} 850}
851EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
851 852
852struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 853struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
853 int type, 854 int type,
@@ -2031,7 +2032,7 @@ static int tracing_open(struct inode *inode, struct file *file)
2031 2032
2032 /* If this file was open for write, then erase contents */ 2033 /* If this file was open for write, then erase contents */
2033 if ((file->f_mode & FMODE_WRITE) && 2034 if ((file->f_mode & FMODE_WRITE) &&
2034 !(file->f_flags & O_APPEND)) { 2035 (file->f_flags & O_TRUNC)) {
2035 long cpu = (long) inode->i_private; 2036 long cpu = (long) inode->i_private;
2036 2037
2037 if (cpu == TRACE_PIPE_ALL_CPU) 2038 if (cpu == TRACE_PIPE_ALL_CPU)
@@ -2053,25 +2054,23 @@ static int tracing_open(struct inode *inode, struct file *file)
2053static void * 2054static void *
2054t_next(struct seq_file *m, void *v, loff_t *pos) 2055t_next(struct seq_file *m, void *v, loff_t *pos)
2055{ 2056{
2056 struct tracer *t = m->private; 2057 struct tracer *t = v;
2057 2058
2058 (*pos)++; 2059 (*pos)++;
2059 2060
2060 if (t) 2061 if (t)
2061 t = t->next; 2062 t = t->next;
2062 2063
2063 m->private = t;
2064
2065 return t; 2064 return t;
2066} 2065}
2067 2066
2068static void *t_start(struct seq_file *m, loff_t *pos) 2067static void *t_start(struct seq_file *m, loff_t *pos)
2069{ 2068{
2070 struct tracer *t = m->private; 2069 struct tracer *t;
2071 loff_t l = 0; 2070 loff_t l = 0;
2072 2071
2073 mutex_lock(&trace_types_lock); 2072 mutex_lock(&trace_types_lock);
2074 for (; t && l < *pos; t = t_next(m, t, &l)) 2073 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2075 ; 2074 ;
2076 2075
2077 return t; 2076 return t;
@@ -2107,18 +2106,10 @@ static struct seq_operations show_traces_seq_ops = {
2107 2106
2108static int show_traces_open(struct inode *inode, struct file *file) 2107static int show_traces_open(struct inode *inode, struct file *file)
2109{ 2108{
2110 int ret;
2111
2112 if (tracing_disabled) 2109 if (tracing_disabled)
2113 return -ENODEV; 2110 return -ENODEV;
2114 2111
2115 ret = seq_open(file, &show_traces_seq_ops); 2112 return seq_open(file, &show_traces_seq_ops);
2116 if (!ret) {
2117 struct seq_file *m = file->private_data;
2118 m->private = trace_types;
2119 }
2120
2121 return ret;
2122} 2113}
2123 2114
2124static ssize_t 2115static ssize_t
@@ -3095,7 +3086,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3095 break; 3086 break;
3096 } 3087 }
3097 3088
3098 trace_consume(iter); 3089 if (ret != TRACE_TYPE_NO_CONSUME)
3090 trace_consume(iter);
3099 rem -= count; 3091 rem -= count;
3100 if (!find_next_entry_inc(iter)) { 3092 if (!find_next_entry_inc(iter)) {
3101 rem = 0; 3093 rem = 0;
@@ -4243,8 +4235,11 @@ static void __ftrace_dump(bool disable_tracing)
4243 iter.pos = -1; 4235 iter.pos = -1;
4244 4236
4245 if (find_next_entry_inc(&iter) != NULL) { 4237 if (find_next_entry_inc(&iter) != NULL) {
4246 print_trace_line(&iter); 4238 int ret;
4247 trace_consume(&iter); 4239
4240 ret = print_trace_line(&iter);
4241 if (ret != TRACE_TYPE_NO_CONSUME)
4242 trace_consume(&iter);
4248 } 4243 }
4249 4244
4250 trace_printk_seq(&iter.seq); 4245 trace_printk_seq(&iter.seq);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6e735d4771f8..8b9f4f6e9559 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
439 int *ent_cpu, u64 *ent_ts); 439 int *ent_cpu, u64 *ent_ts);
440 440
441void tracing_generic_entry_update(struct trace_entry *entry,
442 unsigned long flags,
443 int pc);
444
445void default_wait_pipe(struct trace_iterator *iter); 441void default_wait_pipe(struct trace_iterator *iter);
446void poll_wait_pipe(struct trace_iterator *iter); 442void poll_wait_pipe(struct trace_iterator *iter);
447 443
@@ -597,6 +593,7 @@ print_graph_function(struct trace_iterator *iter)
597 593
598extern struct pid *ftrace_pid_trace; 594extern struct pid *ftrace_pid_trace;
599 595
596#ifdef CONFIG_FUNCTION_TRACER
600static inline int ftrace_trace_task(struct task_struct *task) 597static inline int ftrace_trace_task(struct task_struct *task)
601{ 598{
602 if (!ftrace_pid_trace) 599 if (!ftrace_pid_trace)
@@ -604,6 +601,12 @@ static inline int ftrace_trace_task(struct task_struct *task)
604 601
605 return test_tsk_trace_trace(task); 602 return test_tsk_trace_trace(task);
606} 603}
604#else
605static inline int ftrace_trace_task(struct task_struct *task)
606{
607 return 1;
608}
609#endif
607 610
608/* 611/*
609 * trace_iterator_flags is an enumeration that defines bit 612 * trace_iterator_flags is an enumeration that defines bit
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 5b5895afecfe..11ba5bb4ed0a 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id)
14 14
15 mutex_lock(&event_mutex); 15 mutex_lock(&event_mutex);
16 list_for_each_entry(event, &ftrace_events, list) { 16 list_for_each_entry(event, &ftrace_events, list) {
17 if (event->id == event_id) { 17 if (event->id == event_id && event->profile_enable) {
18 ret = event->profile_enable(event); 18 ret = event->profile_enable(event);
19 break; 19 break;
20 } 20 }
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h
index 5e32e375134d..6db005e12487 100644
--- a/kernel/trace/trace_event_types.h
+++ b/kernel/trace/trace_event_types.h
@@ -26,6 +26,9 @@ TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET,
26 ftrace_graph_ret_entry, ignore, 26 ftrace_graph_ret_entry, ignore,
27 TRACE_STRUCT( 27 TRACE_STRUCT(
28 TRACE_FIELD(unsigned long, ret.func, func) 28 TRACE_FIELD(unsigned long, ret.func, func)
29 TRACE_FIELD(unsigned long long, ret.calltime, calltime)
30 TRACE_FIELD(unsigned long long, ret.rettime, rettime)
31 TRACE_FIELD(unsigned long, ret.overrun, overrun)
29 TRACE_FIELD(int, ret.depth, depth) 32 TRACE_FIELD(int, ret.depth, depth)
30 ), 33 ),
31 TP_RAW_FMT("<-- %lx (%d)") 34 TP_RAW_FMT("<-- %lx (%d)")
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index aa08be69a1b6..e75276a49cf5 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -300,10 +300,18 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
300 300
301static void *t_start(struct seq_file *m, loff_t *pos) 301static void *t_start(struct seq_file *m, loff_t *pos)
302{ 302{
303 struct ftrace_event_call *call = NULL;
304 loff_t l;
305
303 mutex_lock(&event_mutex); 306 mutex_lock(&event_mutex);
304 if (*pos == 0) 307
305 m->private = ftrace_events.next; 308 m->private = ftrace_events.next;
306 return t_next(m, NULL, pos); 309 for (l = 0; l <= *pos; ) {
310 call = t_next(m, NULL, &l);
311 if (!call)
312 break;
313 }
314 return call;
307} 315}
308 316
309static void * 317static void *
@@ -332,10 +340,18 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
332 340
333static void *s_start(struct seq_file *m, loff_t *pos) 341static void *s_start(struct seq_file *m, loff_t *pos)
334{ 342{
343 struct ftrace_event_call *call = NULL;
344 loff_t l;
345
335 mutex_lock(&event_mutex); 346 mutex_lock(&event_mutex);
336 if (*pos == 0) 347
337 m->private = ftrace_events.next; 348 m->private = ftrace_events.next;
338 return s_next(m, NULL, pos); 349 for (l = 0; l <= *pos; ) {
350 call = s_next(m, NULL, &l);
351 if (!call)
352 break;
353 }
354 return call;
339} 355}
340 356
341static int t_show(struct seq_file *m, void *v) 357static int t_show(struct seq_file *m, void *v)
@@ -360,7 +376,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file)
360 const struct seq_operations *seq_ops; 376 const struct seq_operations *seq_ops;
361 377
362 if ((file->f_mode & FMODE_WRITE) && 378 if ((file->f_mode & FMODE_WRITE) &&
363 !(file->f_flags & O_APPEND)) 379 (file->f_flags & O_TRUNC))
364 ftrace_clear_events(); 380 ftrace_clear_events();
365 381
366 seq_ops = inode->i_private; 382 seq_ops = inode->i_private;
@@ -924,7 +940,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
924 entry = trace_create_file("enable", 0644, call->dir, call, 940 entry = trace_create_file("enable", 0644, call->dir, call,
925 enable); 941 enable);
926 942
927 if (call->id) 943 if (call->id && call->profile_enable)
928 entry = trace_create_file("id", 0444, call->dir, call, 944 entry = trace_create_file("id", 0444, call->dir, call,
929 id); 945 id);
930 946
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 936c621bbf46..f32dc9d1ea7b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
624 return -ENOSPC; 624 return -ENOSPC;
625 } 625 }
626 626
627 filter->preds[filter->n_preds] = pred;
628 filter->n_preds++;
629
630 list_for_each_entry(call, &ftrace_events, list) { 627 list_for_each_entry(call, &ftrace_events, list) {
631 628
632 if (!call->define_fields) 629 if (!call->define_fields)
@@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
643 } 640 }
644 replace_filter_string(call->filter, filter_string); 641 replace_filter_string(call->filter, filter_string);
645 } 642 }
643
644 filter->preds[filter->n_preds] = pred;
645 filter->n_preds++;
646out: 646out:
647 return err; 647 return err;
648} 648}
@@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system,
1029 1029
1030 if (elt->op == OP_AND || elt->op == OP_OR) { 1030 if (elt->op == OP_AND || elt->op == OP_OR) {
1031 pred = create_logical_pred(elt->op); 1031 pred = create_logical_pred(elt->op);
1032 if (!pred)
1033 return -ENOMEM;
1032 if (call) { 1034 if (call) {
1033 err = filter_add_pred(ps, call, pred); 1035 err = filter_add_pred(ps, call, pred);
1034 filter_free_pred(pred); 1036 filter_free_pred(pred);
1035 } else 1037 } else {
1036 err = filter_add_subsystem_pred(ps, system, 1038 err = filter_add_subsystem_pred(ps, system,
1037 pred, filter_string); 1039 pred, filter_string);
1040 if (err)
1041 filter_free_pred(pred);
1042 }
1038 if (err) 1043 if (err)
1039 return err; 1044 return err;
1040 1045
@@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system,
1048 } 1053 }
1049 1054
1050 pred = create_pred(elt->op, operand1, operand2); 1055 pred = create_pred(elt->op, operand1, operand2);
1056 if (!pred)
1057 return -ENOMEM;
1051 if (call) { 1058 if (call) {
1052 err = filter_add_pred(ps, call, pred); 1059 err = filter_add_pred(ps, call, pred);
1053 filter_free_pred(pred); 1060 filter_free_pred(pred);
1054 } else 1061 } else {
1055 err = filter_add_subsystem_pred(ps, system, pred, 1062 err = filter_add_subsystem_pred(ps, system, pred,
1056 filter_string); 1063 filter_string);
1064 if (err)
1065 filter_free_pred(pred);
1066 }
1057 if (err) 1067 if (err)
1058 return err; 1068 return err;
1059 1069
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 90f134764837..75ef000613c3 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -302,8 +302,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
302 if (count == -1) 302 if (count == -1)
303 seq_printf(m, ":unlimited\n"); 303 seq_printf(m, ":unlimited\n");
304 else 304 else
305 seq_printf(m, ":count=%ld", count); 305 seq_printf(m, ":count=%ld\n", count);
306 seq_putc(m, '\n');
307 306
308 return 0; 307 return 0;
309} 308}
@@ -364,7 +363,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
364 out_reg: 363 out_reg:
365 ret = register_ftrace_function_probe(glob, ops, count); 364 ret = register_ftrace_function_probe(glob, ops, count);
366 365
367 return ret; 366 return ret < 0 ? ret : 0;
368} 367}
369 368
370static struct ftrace_func_command ftrace_traceon_cmd = { 369static struct ftrace_func_command ftrace_traceon_cmd = {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d2249abafb53..420ec3487579 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -843,9 +843,16 @@ print_graph_function(struct trace_iterator *iter)
843 843
844 switch (entry->type) { 844 switch (entry->type) {
845 case TRACE_GRAPH_ENT: { 845 case TRACE_GRAPH_ENT: {
846 struct ftrace_graph_ent_entry *field; 846 /*
847 * print_graph_entry() may consume the current event,
848 * thus @field may become invalid, so we need to save it.
849 * sizeof(struct ftrace_graph_ent_entry) is very small,
850 * it can be safely saved at the stack.
851 */
852 struct ftrace_graph_ent_entry *field, saved;
847 trace_assign_type(field, entry); 853 trace_assign_type(field, entry);
848 return print_graph_entry(field, s, iter); 854 saved = *field;
855 return print_graph_entry(&saved, s, iter);
849 } 856 }
850 case TRACE_GRAPH_RET: { 857 case TRACE_GRAPH_RET: {
851 struct ftrace_graph_ret_entry *field; 858 struct ftrace_graph_ret_entry *field;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 7938f3ae93e3..e0c2545622e8 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -27,8 +27,7 @@ void trace_print_seq(struct seq_file *m, struct trace_seq *s)
27{ 27{
28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; 28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29 29
30 s->buffer[len] = 0; 30 seq_write(m, s->buffer, len);
31 seq_puts(m, s->buffer);
32 31
33 trace_seq_init(s); 32 trace_seq_init(s);
34} 33}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 9bece9687b62..687699d365ae 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -155,25 +155,19 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
155EXPORT_SYMBOL_GPL(__ftrace_vprintk); 155EXPORT_SYMBOL_GPL(__ftrace_vprintk);
156 156
157static void * 157static void *
158t_next(struct seq_file *m, void *v, loff_t *pos) 158t_start(struct seq_file *m, loff_t *pos)
159{ 159{
160 const char **fmt = m->private; 160 const char **fmt = __start___trace_bprintk_fmt + *pos;
161 const char **next = fmt;
162
163 (*pos)++;
164 161
165 if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) 162 if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
166 return NULL; 163 return NULL;
167
168 next = fmt;
169 m->private = ++next;
170
171 return fmt; 164 return fmt;
172} 165}
173 166
174static void *t_start(struct seq_file *m, loff_t *pos) 167static void *t_next(struct seq_file *m, void * v, loff_t *pos)
175{ 168{
176 return t_next(m, NULL, pos); 169 (*pos)++;
170 return t_start(m, pos);
177} 171}
178 172
179static int t_show(struct seq_file *m, void *v) 173static int t_show(struct seq_file *m, void *v)
@@ -182,7 +176,7 @@ static int t_show(struct seq_file *m, void *v)
182 const char *str = *fmt; 176 const char *str = *fmt;
183 int i; 177 int i;
184 178
185 seq_printf(m, "0x%lx : \"", (unsigned long)fmt); 179 seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
186 180
187 /* 181 /*
188 * Tabs and new lines need to be converted. 182 * Tabs and new lines need to be converted.
@@ -224,15 +218,7 @@ static const struct seq_operations show_format_seq_ops = {
224static int 218static int
225ftrace_formats_open(struct inode *inode, struct file *file) 219ftrace_formats_open(struct inode *inode, struct file *file)
226{ 220{
227 int ret; 221 return seq_open(file, &show_format_seq_ops);
228
229 ret = seq_open(file, &show_format_seq_ops);
230 if (!ret) {
231 struct seq_file *m = file->private_data;
232
233 m->private = __start___trace_bprintk_fmt;
234 }
235 return ret;
236} 222}
237 223
238static const struct file_operations ftrace_formats_fops = { 224static const struct file_operations ftrace_formats_fops = {
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 2d7aebd71dbd..6a2a9d484cd6 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -301,17 +301,14 @@ static const struct seq_operations stack_trace_seq_ops = {
301 301
302static int stack_trace_open(struct inode *inode, struct file *file) 302static int stack_trace_open(struct inode *inode, struct file *file)
303{ 303{
304 int ret; 304 return seq_open(file, &stack_trace_seq_ops);
305
306 ret = seq_open(file, &stack_trace_seq_ops);
307
308 return ret;
309} 305}
310 306
311static const struct file_operations stack_trace_fops = { 307static const struct file_operations stack_trace_fops = {
312 .open = stack_trace_open, 308 .open = stack_trace_open,
313 .read = seq_read, 309 .read = seq_read,
314 .llseek = seq_lseek, 310 .llseek = seq_lseek,
311 .release = seq_release,
315}; 312};
316 313
317int 314int
@@ -326,10 +323,10 @@ stack_trace_sysctl(struct ctl_table *table, int write,
326 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 323 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
327 324
328 if (ret || !write || 325 if (ret || !write ||
329 (last_stack_tracer_enabled == stack_tracer_enabled)) 326 (last_stack_tracer_enabled == !!stack_tracer_enabled))
330 goto out; 327 goto out;
331 328
332 last_stack_tracer_enabled = stack_tracer_enabled; 329 last_stack_tracer_enabled = !!stack_tracer_enabled;
333 330
334 if (stack_tracer_enabled) 331 if (stack_tracer_enabled)
335 register_ftrace_function(&trace_ops); 332 register_ftrace_function(&trace_ops);
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index c00643733f4c..aea321c82fa0 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -73,7 +73,7 @@ static struct rb_node *release_next(struct rb_node *node)
73 } 73 }
74} 74}
75 75
76static void reset_stat_session(struct stat_session *session) 76static void __reset_stat_session(struct stat_session *session)
77{ 77{
78 struct rb_node *node = session->stat_root.rb_node; 78 struct rb_node *node = session->stat_root.rb_node;
79 79
@@ -83,10 +83,17 @@ static void reset_stat_session(struct stat_session *session)
83 session->stat_root = RB_ROOT; 83 session->stat_root = RB_ROOT;
84} 84}
85 85
86static void reset_stat_session(struct stat_session *session)
87{
88 mutex_lock(&session->stat_mutex);
89 __reset_stat_session(session);
90 mutex_unlock(&session->stat_mutex);
91}
92
86static void destroy_session(struct stat_session *session) 93static void destroy_session(struct stat_session *session)
87{ 94{
88 debugfs_remove(session->file); 95 debugfs_remove(session->file);
89 reset_stat_session(session); 96 __reset_stat_session(session);
90 mutex_destroy(&session->stat_mutex); 97 mutex_destroy(&session->stat_mutex);
91 kfree(session); 98 kfree(session);
92} 99}
@@ -150,7 +157,7 @@ static int stat_seq_init(struct stat_session *session)
150 int i; 157 int i;
151 158
152 mutex_lock(&session->stat_mutex); 159 mutex_lock(&session->stat_mutex);
153 reset_stat_session(session); 160 __reset_stat_session(session);
154 161
155 if (!ts->stat_cmp) 162 if (!ts->stat_cmp)
156 ts->stat_cmp = dummy_cmp; 163 ts->stat_cmp = dummy_cmp;
@@ -183,7 +190,7 @@ exit:
183 return ret; 190 return ret;
184 191
185exit_free_rbtree: 192exit_free_rbtree:
186 reset_stat_session(session); 193 __reset_stat_session(session);
187 mutex_unlock(&session->stat_mutex); 194 mutex_unlock(&session->stat_mutex);
188 return ret; 195 return ret;
189} 196}
@@ -199,17 +206,13 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
199 mutex_lock(&session->stat_mutex); 206 mutex_lock(&session->stat_mutex);
200 207
201 /* If we are in the beginning of the file, print the headers */ 208 /* If we are in the beginning of the file, print the headers */
202 if (!*pos && session->ts->stat_headers) { 209 if (!*pos && session->ts->stat_headers)
203 (*pos)++;
204 return SEQ_START_TOKEN; 210 return SEQ_START_TOKEN;
205 }
206 211
207 node = rb_first(&session->stat_root); 212 node = rb_first(&session->stat_root);
208 for (i = 0; node && i < *pos; i++) 213 for (i = 0; node && i < *pos; i++)
209 node = rb_next(node); 214 node = rb_next(node);
210 215
211 (*pos)++;
212
213 return node; 216 return node;
214} 217}
215 218
@@ -254,16 +257,21 @@ static const struct seq_operations trace_stat_seq_ops = {
254static int tracing_stat_open(struct inode *inode, struct file *file) 257static int tracing_stat_open(struct inode *inode, struct file *file)
255{ 258{
256 int ret; 259 int ret;
257 260 struct seq_file *m;
258 struct stat_session *session = inode->i_private; 261 struct stat_session *session = inode->i_private;
259 262
263 ret = stat_seq_init(session);
264 if (ret)
265 return ret;
266
260 ret = seq_open(file, &trace_stat_seq_ops); 267 ret = seq_open(file, &trace_stat_seq_ops);
261 if (!ret) { 268 if (ret) {
262 struct seq_file *m = file->private_data; 269 reset_stat_session(session);
263 m->private = session; 270 return ret;
264 ret = stat_seq_init(session);
265 } 271 }
266 272
273 m = file->private_data;
274 m->private = session;
267 return ret; 275 return ret;
268} 276}
269 277
@@ -274,11 +282,9 @@ static int tracing_stat_release(struct inode *i, struct file *f)
274{ 282{
275 struct stat_session *session = i->i_private; 283 struct stat_session *session = i->i_private;
276 284
277 mutex_lock(&session->stat_mutex);
278 reset_stat_session(session); 285 reset_stat_session(session);
279 mutex_unlock(&session->stat_mutex);
280 286
281 return 0; 287 return seq_release(i, f);
282} 288}
283 289
284static const struct file_operations tracing_stat_fops = { 290static const struct file_operations tracing_stat_fops = {