aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-11 12:02:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-11 12:02:09 -0400
commitc72bb316916b1a6cf35e1d5238566ef27b0b7f80 (patch)
tree1bd7bb147302abf907bba9fb83cf74b4a5b6ef0d /kernel/trace/trace.c
parent6d128e1e72bf082542e85f72e6b7ddd704193588 (diff)
parentdcc302232c1f9b3ca16f6b8ee190eb0b1a8a0da3 (diff)
Merge tag 'trace-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing changes from Steven Rostedt: "The majority of the changes here are cleanups for the large changes that were added to 3.10, which includes several bug fixes that have been marked for stable. As for new features, there were a few, but nothing to write to LWN about. These include: New function trigger called "dump" and "cpudump" that will cause ftrace to dump its buffer to the console when the function is called. The difference between "dump" and "cpudump" is that "dump" will dump the entire contents of the ftrace buffer, where as "cpudump" will only dump the contents of the ftrace buffer for the CPU that called the function. Another small enhancement is a new sysctl switch called "traceoff_on_warning" which, when enabled, will disable tracing if any WARN_ON() is triggered. This is useful if you want to debug what caused a warning and do not want to risk losing your trace data by the ring buffer overwriting the data before you can disable it. There's also a kernel command line option that will make this enabled at boot up called the same thing" * tag 'trace-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (34 commits) tracing: Make tracing_open_generic_{tr,tc}() static tracing: Remove ftrace() function tracing: Remove TRACE_EVENT_TYPE enum definition tracing: Make tracer_tracing_{off,on,is_on}() static tracing: Fix irqs-off tag display in syscall tracing uprobes: Fix return value in error handling path tracing: Fix race between deleting buffer and setting events tracing: Add trace_array_get/put() to event handling tracing: Get trace_array ref counts when accessing trace files tracing: Add trace_array_get/put() to handle instance refs better tracing: Protect ftrace_trace_arrays list in trace_events.c tracing: Make trace_marker use the correct per-instance buffer ftrace: Do not run selftest if command line parameter is set tracing/kprobes: Don't pass addr=ip to perf_trace_buf_submit() tracing: Use flag buffer_disabled for irqsoff tracer tracing/kprobes: Turn trace_probe->files into list_head tracing: Fix disabling of soft disable tracing: Add missing syscall_metadata comment tracing: Simplify code for showing of soft disabled flag tracing/kprobes: Kill probe_enable_lock ...
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c338
1 files changed, 268 insertions, 70 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e71a8be4a6ee..0cd500bffd9b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -115,6 +115,9 @@ cpumask_var_t __read_mostly tracing_buffer_mask;
115 115
116enum ftrace_dump_mode ftrace_dump_on_oops; 116enum ftrace_dump_mode ftrace_dump_on_oops;
117 117
118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
118static int tracing_set_tracer(const char *buf); 121static int tracing_set_tracer(const char *buf);
119 122
120#define MAX_TRACER_SIZE 100 123#define MAX_TRACER_SIZE 100
@@ -149,6 +152,13 @@ static int __init set_ftrace_dump_on_oops(char *str)
149} 152}
150__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 153__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
151 154
155static int __init stop_trace_on_warning(char *str)
156{
157 __disable_trace_on_warning = 1;
158 return 1;
159}
160__setup("traceoff_on_warning=", stop_trace_on_warning);
161
152static int __init boot_alloc_snapshot(char *str) 162static int __init boot_alloc_snapshot(char *str)
153{ 163{
154 allocate_snapshot = true; 164 allocate_snapshot = true;
@@ -170,6 +180,7 @@ static int __init set_trace_boot_options(char *str)
170} 180}
171__setup("trace_options=", set_trace_boot_options); 181__setup("trace_options=", set_trace_boot_options);
172 182
183
173unsigned long long ns2usecs(cycle_t nsec) 184unsigned long long ns2usecs(cycle_t nsec)
174{ 185{
175 nsec += 500; 186 nsec += 500;
@@ -193,6 +204,37 @@ static struct trace_array global_trace;
193 204
194LIST_HEAD(ftrace_trace_arrays); 205LIST_HEAD(ftrace_trace_arrays);
195 206
207int trace_array_get(struct trace_array *this_tr)
208{
209 struct trace_array *tr;
210 int ret = -ENODEV;
211
212 mutex_lock(&trace_types_lock);
213 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
214 if (tr == this_tr) {
215 tr->ref++;
216 ret = 0;
217 break;
218 }
219 }
220 mutex_unlock(&trace_types_lock);
221
222 return ret;
223}
224
225static void __trace_array_put(struct trace_array *this_tr)
226{
227 WARN_ON(!this_tr->ref);
228 this_tr->ref--;
229}
230
231void trace_array_put(struct trace_array *this_tr)
232{
233 mutex_lock(&trace_types_lock);
234 __trace_array_put(this_tr);
235 mutex_unlock(&trace_types_lock);
236}
237
196int filter_current_check_discard(struct ring_buffer *buffer, 238int filter_current_check_discard(struct ring_buffer *buffer,
197 struct ftrace_event_call *call, void *rec, 239 struct ftrace_event_call *call, void *rec,
198 struct ring_buffer_event *event) 240 struct ring_buffer_event *event)
@@ -215,9 +257,24 @@ cycle_t ftrace_now(int cpu)
215 return ts; 257 return ts;
216} 258}
217 259
260/**
261 * tracing_is_enabled - Show if global_trace has been disabled
262 *
263 * Shows if the global trace has been enabled or not. It uses the
264 * mirror flag "buffer_disabled" to be used in fast paths such as for
265 * the irqsoff tracer. But it may be inaccurate due to races. If you
266 * need to know the accurate state, use tracing_is_on() which is a little
267 * slower, but accurate.
268 */
218int tracing_is_enabled(void) 269int tracing_is_enabled(void)
219{ 270{
220 return tracing_is_on(); 271 /*
272 * For quick access (irqsoff uses this in fast path), just
273 * return the mirror variable of the state of the ring buffer.
274 * It's a little racy, but we don't really care.
275 */
276 smp_rmb();
277 return !global_trace.buffer_disabled;
221} 278}
222 279
223/* 280/*
@@ -240,7 +297,7 @@ static struct tracer *trace_types __read_mostly;
240/* 297/*
241 * trace_types_lock is used to protect the trace_types list. 298 * trace_types_lock is used to protect the trace_types list.
242 */ 299 */
243static DEFINE_MUTEX(trace_types_lock); 300DEFINE_MUTEX(trace_types_lock);
244 301
245/* 302/*
246 * serialize the access of the ring buffer 303 * serialize the access of the ring buffer
@@ -330,6 +387,23 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
330 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | 387 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
331 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; 388 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
332 389
390static void tracer_tracing_on(struct trace_array *tr)
391{
392 if (tr->trace_buffer.buffer)
393 ring_buffer_record_on(tr->trace_buffer.buffer);
394 /*
395 * This flag is looked at when buffers haven't been allocated
396 * yet, or by some tracers (like irqsoff), that just want to
397 * know if the ring buffer has been disabled, but it can handle
398 * races of where it gets disabled but we still do a record.
399 * As the check is in the fast path of the tracers, it is more
400 * important to be fast than accurate.
401 */
402 tr->buffer_disabled = 0;
403 /* Make the flag seen by readers */
404 smp_wmb();
405}
406
333/** 407/**
334 * tracing_on - enable tracing buffers 408 * tracing_on - enable tracing buffers
335 * 409 *
@@ -338,15 +412,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
338 */ 412 */
339void tracing_on(void) 413void tracing_on(void)
340{ 414{
341 if (global_trace.trace_buffer.buffer) 415 tracer_tracing_on(&global_trace);
342 ring_buffer_record_on(global_trace.trace_buffer.buffer);
343 /*
344 * This flag is only looked at when buffers haven't been
345 * allocated yet. We don't really care about the race
346 * between setting this flag and actually turning
347 * on the buffer.
348 */
349 global_trace.buffer_disabled = 0;
350} 416}
351EXPORT_SYMBOL_GPL(tracing_on); 417EXPORT_SYMBOL_GPL(tracing_on);
352 418
@@ -540,6 +606,23 @@ void tracing_snapshot_alloc(void)
540EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 606EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
541#endif /* CONFIG_TRACER_SNAPSHOT */ 607#endif /* CONFIG_TRACER_SNAPSHOT */
542 608
609static void tracer_tracing_off(struct trace_array *tr)
610{
611 if (tr->trace_buffer.buffer)
612 ring_buffer_record_off(tr->trace_buffer.buffer);
613 /*
614 * This flag is looked at when buffers haven't been allocated
615 * yet, or by some tracers (like irqsoff), that just want to
616 * know if the ring buffer has been disabled, but it can handle
617 * races of where it gets disabled but we still do a record.
618 * As the check is in the fast path of the tracers, it is more
619 * important to be fast than accurate.
620 */
621 tr->buffer_disabled = 1;
622 /* Make the flag seen by readers */
623 smp_wmb();
624}
625
543/** 626/**
544 * tracing_off - turn off tracing buffers 627 * tracing_off - turn off tracing buffers
545 * 628 *
@@ -550,26 +633,35 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
550 */ 633 */
551void tracing_off(void) 634void tracing_off(void)
552{ 635{
553 if (global_trace.trace_buffer.buffer) 636 tracer_tracing_off(&global_trace);
554 ring_buffer_record_off(global_trace.trace_buffer.buffer);
555 /*
556 * This flag is only looked at when buffers haven't been
557 * allocated yet. We don't really care about the race
558 * between setting this flag and actually turning
559 * on the buffer.
560 */
561 global_trace.buffer_disabled = 1;
562} 637}
563EXPORT_SYMBOL_GPL(tracing_off); 638EXPORT_SYMBOL_GPL(tracing_off);
564 639
640void disable_trace_on_warning(void)
641{
642 if (__disable_trace_on_warning)
643 tracing_off();
644}
645
646/**
647 * tracer_tracing_is_on - show real state of ring buffer enabled
648 * @tr : the trace array to know if ring buffer is enabled
649 *
650 * Shows real state of the ring buffer if it is enabled or not.
651 */
652static int tracer_tracing_is_on(struct trace_array *tr)
653{
654 if (tr->trace_buffer.buffer)
655 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
656 return !tr->buffer_disabled;
657}
658
565/** 659/**
566 * tracing_is_on - show state of ring buffers enabled 660 * tracing_is_on - show state of ring buffers enabled
567 */ 661 */
568int tracing_is_on(void) 662int tracing_is_on(void)
569{ 663{
570 if (global_trace.trace_buffer.buffer) 664 return tracer_tracing_is_on(&global_trace);
571 return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
572 return !global_trace.buffer_disabled;
573} 665}
574EXPORT_SYMBOL_GPL(tracing_is_on); 666EXPORT_SYMBOL_GPL(tracing_is_on);
575 667
@@ -1543,15 +1635,6 @@ trace_function(struct trace_array *tr,
1543 __buffer_unlock_commit(buffer, event); 1635 __buffer_unlock_commit(buffer, event);
1544} 1636}
1545 1637
1546void
1547ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1548 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1549 int pc)
1550{
1551 if (likely(!atomic_read(&data->disabled)))
1552 trace_function(tr, ip, parent_ip, flags, pc);
1553}
1554
1555#ifdef CONFIG_STACKTRACE 1638#ifdef CONFIG_STACKTRACE
1556 1639
1557#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) 1640#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
@@ -2768,10 +2851,9 @@ static const struct seq_operations tracer_seq_ops = {
2768}; 2851};
2769 2852
2770static struct trace_iterator * 2853static struct trace_iterator *
2771__tracing_open(struct inode *inode, struct file *file, bool snapshot) 2854__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2855 struct inode *inode, struct file *file, bool snapshot)
2772{ 2856{
2773 struct trace_cpu *tc = inode->i_private;
2774 struct trace_array *tr = tc->tr;
2775 struct trace_iterator *iter; 2857 struct trace_iterator *iter;
2776 int cpu; 2858 int cpu;
2777 2859
@@ -2850,8 +2932,6 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2850 tracing_iter_reset(iter, cpu); 2932 tracing_iter_reset(iter, cpu);
2851 } 2933 }
2852 2934
2853 tr->ref++;
2854
2855 mutex_unlock(&trace_types_lock); 2935 mutex_unlock(&trace_types_lock);
2856 2936
2857 return iter; 2937 return iter;
@@ -2874,6 +2954,43 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
2874 return 0; 2954 return 0;
2875} 2955}
2876 2956
2957/*
2958 * Open and update trace_array ref count.
2959 * Must have the current trace_array passed to it.
2960 */
2961static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2962{
2963 struct trace_array *tr = inode->i_private;
2964
2965 if (tracing_disabled)
2966 return -ENODEV;
2967
2968 if (trace_array_get(tr) < 0)
2969 return -ENODEV;
2970
2971 filp->private_data = inode->i_private;
2972
2973 return 0;
2974
2975}
2976
2977static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
2978{
2979 struct trace_cpu *tc = inode->i_private;
2980 struct trace_array *tr = tc->tr;
2981
2982 if (tracing_disabled)
2983 return -ENODEV;
2984
2985 if (trace_array_get(tr) < 0)
2986 return -ENODEV;
2987
2988 filp->private_data = inode->i_private;
2989
2990 return 0;
2991
2992}
2993
2877static int tracing_release(struct inode *inode, struct file *file) 2994static int tracing_release(struct inode *inode, struct file *file)
2878{ 2995{
2879 struct seq_file *m = file->private_data; 2996 struct seq_file *m = file->private_data;
@@ -2881,17 +2998,20 @@ static int tracing_release(struct inode *inode, struct file *file)
2881 struct trace_array *tr; 2998 struct trace_array *tr;
2882 int cpu; 2999 int cpu;
2883 3000
2884 if (!(file->f_mode & FMODE_READ)) 3001 /* Writes do not use seq_file, need to grab tr from inode */
3002 if (!(file->f_mode & FMODE_READ)) {
3003 struct trace_cpu *tc = inode->i_private;
3004
3005 trace_array_put(tc->tr);
2885 return 0; 3006 return 0;
3007 }
2886 3008
2887 iter = m->private; 3009 iter = m->private;
2888 tr = iter->tr; 3010 tr = iter->tr;
3011 trace_array_put(tr);
2889 3012
2890 mutex_lock(&trace_types_lock); 3013 mutex_lock(&trace_types_lock);
2891 3014
2892 WARN_ON(!tr->ref);
2893 tr->ref--;
2894
2895 for_each_tracing_cpu(cpu) { 3015 for_each_tracing_cpu(cpu) {
2896 if (iter->buffer_iter[cpu]) 3016 if (iter->buffer_iter[cpu])
2897 ring_buffer_read_finish(iter->buffer_iter[cpu]); 3017 ring_buffer_read_finish(iter->buffer_iter[cpu]);
@@ -2910,20 +3030,49 @@ static int tracing_release(struct inode *inode, struct file *file)
2910 kfree(iter->trace); 3030 kfree(iter->trace);
2911 kfree(iter->buffer_iter); 3031 kfree(iter->buffer_iter);
2912 seq_release_private(inode, file); 3032 seq_release_private(inode, file);
3033
3034 return 0;
3035}
3036
3037static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3038{
3039 struct trace_array *tr = inode->i_private;
3040
3041 trace_array_put(tr);
2913 return 0; 3042 return 0;
2914} 3043}
2915 3044
3045static int tracing_release_generic_tc(struct inode *inode, struct file *file)
3046{
3047 struct trace_cpu *tc = inode->i_private;
3048 struct trace_array *tr = tc->tr;
3049
3050 trace_array_put(tr);
3051 return 0;
3052}
3053
3054static int tracing_single_release_tr(struct inode *inode, struct file *file)
3055{
3056 struct trace_array *tr = inode->i_private;
3057
3058 trace_array_put(tr);
3059
3060 return single_release(inode, file);
3061}
3062
2916static int tracing_open(struct inode *inode, struct file *file) 3063static int tracing_open(struct inode *inode, struct file *file)
2917{ 3064{
3065 struct trace_cpu *tc = inode->i_private;
3066 struct trace_array *tr = tc->tr;
2918 struct trace_iterator *iter; 3067 struct trace_iterator *iter;
2919 int ret = 0; 3068 int ret = 0;
2920 3069
3070 if (trace_array_get(tr) < 0)
3071 return -ENODEV;
3072
2921 /* If this file was open for write, then erase contents */ 3073 /* If this file was open for write, then erase contents */
2922 if ((file->f_mode & FMODE_WRITE) && 3074 if ((file->f_mode & FMODE_WRITE) &&
2923 (file->f_flags & O_TRUNC)) { 3075 (file->f_flags & O_TRUNC)) {
2924 struct trace_cpu *tc = inode->i_private;
2925 struct trace_array *tr = tc->tr;
2926
2927 if (tc->cpu == RING_BUFFER_ALL_CPUS) 3076 if (tc->cpu == RING_BUFFER_ALL_CPUS)
2928 tracing_reset_online_cpus(&tr->trace_buffer); 3077 tracing_reset_online_cpus(&tr->trace_buffer);
2929 else 3078 else
@@ -2931,12 +3080,16 @@ static int tracing_open(struct inode *inode, struct file *file)
2931 } 3080 }
2932 3081
2933 if (file->f_mode & FMODE_READ) { 3082 if (file->f_mode & FMODE_READ) {
2934 iter = __tracing_open(inode, file, false); 3083 iter = __tracing_open(tr, tc, inode, file, false);
2935 if (IS_ERR(iter)) 3084 if (IS_ERR(iter))
2936 ret = PTR_ERR(iter); 3085 ret = PTR_ERR(iter);
2937 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 3086 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2938 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3087 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2939 } 3088 }
3089
3090 if (ret < 0)
3091 trace_array_put(tr);
3092
2940 return ret; 3093 return ret;
2941} 3094}
2942 3095
@@ -3293,9 +3446,14 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3293 3446
3294static int tracing_trace_options_open(struct inode *inode, struct file *file) 3447static int tracing_trace_options_open(struct inode *inode, struct file *file)
3295{ 3448{
3449 struct trace_array *tr = inode->i_private;
3450
3296 if (tracing_disabled) 3451 if (tracing_disabled)
3297 return -ENODEV; 3452 return -ENODEV;
3298 3453
3454 if (trace_array_get(tr) < 0)
3455 return -ENODEV;
3456
3299 return single_open(file, tracing_trace_options_show, inode->i_private); 3457 return single_open(file, tracing_trace_options_show, inode->i_private);
3300} 3458}
3301 3459
@@ -3303,7 +3461,7 @@ static const struct file_operations tracing_iter_fops = {
3303 .open = tracing_trace_options_open, 3461 .open = tracing_trace_options_open,
3304 .read = seq_read, 3462 .read = seq_read,
3305 .llseek = seq_lseek, 3463 .llseek = seq_lseek,
3306 .release = single_release, 3464 .release = tracing_single_release_tr,
3307 .write = tracing_trace_options_write, 3465 .write = tracing_trace_options_write,
3308}; 3466};
3309 3467
@@ -3791,6 +3949,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3791 if (tracing_disabled) 3949 if (tracing_disabled)
3792 return -ENODEV; 3950 return -ENODEV;
3793 3951
3952 if (trace_array_get(tr) < 0)
3953 return -ENODEV;
3954
3794 mutex_lock(&trace_types_lock); 3955 mutex_lock(&trace_types_lock);
3795 3956
3796 /* create a buffer to store the information to pass to userspace */ 3957 /* create a buffer to store the information to pass to userspace */
@@ -3843,6 +4004,7 @@ out:
3843fail: 4004fail:
3844 kfree(iter->trace); 4005 kfree(iter->trace);
3845 kfree(iter); 4006 kfree(iter);
4007 __trace_array_put(tr);
3846 mutex_unlock(&trace_types_lock); 4008 mutex_unlock(&trace_types_lock);
3847 return ret; 4009 return ret;
3848} 4010}
@@ -3850,6 +4012,8 @@ fail:
3850static int tracing_release_pipe(struct inode *inode, struct file *file) 4012static int tracing_release_pipe(struct inode *inode, struct file *file)
3851{ 4013{
3852 struct trace_iterator *iter = file->private_data; 4014 struct trace_iterator *iter = file->private_data;
4015 struct trace_cpu *tc = inode->i_private;
4016 struct trace_array *tr = tc->tr;
3853 4017
3854 mutex_lock(&trace_types_lock); 4018 mutex_lock(&trace_types_lock);
3855 4019
@@ -3863,6 +4027,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
3863 kfree(iter->trace); 4027 kfree(iter->trace);
3864 kfree(iter); 4028 kfree(iter);
3865 4029
4030 trace_array_put(tr);
4031
3866 return 0; 4032 return 0;
3867} 4033}
3868 4034
@@ -3939,7 +4105,7 @@ static int tracing_wait_pipe(struct file *filp)
3939 * 4105 *
3940 * iter->pos will be 0 if we haven't read anything. 4106 * iter->pos will be 0 if we haven't read anything.
3941 */ 4107 */
3942 if (!tracing_is_enabled() && iter->pos) 4108 if (!tracing_is_on() && iter->pos)
3943 break; 4109 break;
3944 } 4110 }
3945 4111
@@ -4320,6 +4486,8 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
4320 /* resize the ring buffer to 0 */ 4486 /* resize the ring buffer to 0 */
4321 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 4487 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4322 4488
4489 trace_array_put(tr);
4490
4323 return 0; 4491 return 0;
4324} 4492}
4325 4493
@@ -4328,6 +4496,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
4328 size_t cnt, loff_t *fpos) 4496 size_t cnt, loff_t *fpos)
4329{ 4497{
4330 unsigned long addr = (unsigned long)ubuf; 4498 unsigned long addr = (unsigned long)ubuf;
4499 struct trace_array *tr = filp->private_data;
4331 struct ring_buffer_event *event; 4500 struct ring_buffer_event *event;
4332 struct ring_buffer *buffer; 4501 struct ring_buffer *buffer;
4333 struct print_entry *entry; 4502 struct print_entry *entry;
@@ -4387,7 +4556,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
4387 4556
4388 local_save_flags(irq_flags); 4557 local_save_flags(irq_flags);
4389 size = sizeof(*entry) + cnt + 2; /* possible \n added */ 4558 size = sizeof(*entry) + cnt + 2; /* possible \n added */
4390 buffer = global_trace.trace_buffer.buffer; 4559 buffer = tr->trace_buffer.buffer;
4391 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 4560 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4392 irq_flags, preempt_count()); 4561 irq_flags, preempt_count());
4393 if (!event) { 4562 if (!event) {
@@ -4495,10 +4664,20 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4495 4664
4496static int tracing_clock_open(struct inode *inode, struct file *file) 4665static int tracing_clock_open(struct inode *inode, struct file *file)
4497{ 4666{
4667 struct trace_array *tr = inode->i_private;
4668 int ret;
4669
4498 if (tracing_disabled) 4670 if (tracing_disabled)
4499 return -ENODEV; 4671 return -ENODEV;
4500 4672
4501 return single_open(file, tracing_clock_show, inode->i_private); 4673 if (trace_array_get(tr))
4674 return -ENODEV;
4675
4676 ret = single_open(file, tracing_clock_show, inode->i_private);
4677 if (ret < 0)
4678 trace_array_put(tr);
4679
4680 return ret;
4502} 4681}
4503 4682
4504struct ftrace_buffer_info { 4683struct ftrace_buffer_info {
@@ -4511,12 +4690,16 @@ struct ftrace_buffer_info {
4511static int tracing_snapshot_open(struct inode *inode, struct file *file) 4690static int tracing_snapshot_open(struct inode *inode, struct file *file)
4512{ 4691{
4513 struct trace_cpu *tc = inode->i_private; 4692 struct trace_cpu *tc = inode->i_private;
4693 struct trace_array *tr = tc->tr;
4514 struct trace_iterator *iter; 4694 struct trace_iterator *iter;
4515 struct seq_file *m; 4695 struct seq_file *m;
4516 int ret = 0; 4696 int ret = 0;
4517 4697
4698 if (trace_array_get(tr) < 0)
4699 return -ENODEV;
4700
4518 if (file->f_mode & FMODE_READ) { 4701 if (file->f_mode & FMODE_READ) {
4519 iter = __tracing_open(inode, file, true); 4702 iter = __tracing_open(tr, tc, inode, file, true);
4520 if (IS_ERR(iter)) 4703 if (IS_ERR(iter))
4521 ret = PTR_ERR(iter); 4704 ret = PTR_ERR(iter);
4522 } else { 4705 } else {
@@ -4529,13 +4712,16 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
4529 kfree(m); 4712 kfree(m);
4530 return -ENOMEM; 4713 return -ENOMEM;
4531 } 4714 }
4532 iter->tr = tc->tr; 4715 iter->tr = tr;
4533 iter->trace_buffer = &tc->tr->max_buffer; 4716 iter->trace_buffer = &tc->tr->max_buffer;
4534 iter->cpu_file = tc->cpu; 4717 iter->cpu_file = tc->cpu;
4535 m->private = iter; 4718 m->private = iter;
4536 file->private_data = m; 4719 file->private_data = m;
4537 } 4720 }
4538 4721
4722 if (ret < 0)
4723 trace_array_put(tr);
4724
4539 return ret; 4725 return ret;
4540} 4726}
4541 4727
@@ -4616,9 +4802,12 @@ out:
4616static int tracing_snapshot_release(struct inode *inode, struct file *file) 4802static int tracing_snapshot_release(struct inode *inode, struct file *file)
4617{ 4803{
4618 struct seq_file *m = file->private_data; 4804 struct seq_file *m = file->private_data;
4805 int ret;
4806
4807 ret = tracing_release(inode, file);
4619 4808
4620 if (file->f_mode & FMODE_READ) 4809 if (file->f_mode & FMODE_READ)
4621 return tracing_release(inode, file); 4810 return ret;
4622 4811
4623 /* If write only, the seq_file is just a stub */ 4812 /* If write only, the seq_file is just a stub */
4624 if (m) 4813 if (m)
@@ -4684,34 +4873,38 @@ static const struct file_operations tracing_pipe_fops = {
4684}; 4873};
4685 4874
4686static const struct file_operations tracing_entries_fops = { 4875static const struct file_operations tracing_entries_fops = {
4687 .open = tracing_open_generic, 4876 .open = tracing_open_generic_tc,
4688 .read = tracing_entries_read, 4877 .read = tracing_entries_read,
4689 .write = tracing_entries_write, 4878 .write = tracing_entries_write,
4690 .llseek = generic_file_llseek, 4879 .llseek = generic_file_llseek,
4880 .release = tracing_release_generic_tc,
4691}; 4881};
4692 4882
4693static const struct file_operations tracing_total_entries_fops = { 4883static const struct file_operations tracing_total_entries_fops = {
4694 .open = tracing_open_generic, 4884 .open = tracing_open_generic_tr,
4695 .read = tracing_total_entries_read, 4885 .read = tracing_total_entries_read,
4696 .llseek = generic_file_llseek, 4886 .llseek = generic_file_llseek,
4887 .release = tracing_release_generic_tr,
4697}; 4888};
4698 4889
4699static const struct file_operations tracing_free_buffer_fops = { 4890static const struct file_operations tracing_free_buffer_fops = {
4891 .open = tracing_open_generic_tr,
4700 .write = tracing_free_buffer_write, 4892 .write = tracing_free_buffer_write,
4701 .release = tracing_free_buffer_release, 4893 .release = tracing_free_buffer_release,
4702}; 4894};
4703 4895
4704static const struct file_operations tracing_mark_fops = { 4896static const struct file_operations tracing_mark_fops = {
4705 .open = tracing_open_generic, 4897 .open = tracing_open_generic_tr,
4706 .write = tracing_mark_write, 4898 .write = tracing_mark_write,
4707 .llseek = generic_file_llseek, 4899 .llseek = generic_file_llseek,
4900 .release = tracing_release_generic_tr,
4708}; 4901};
4709 4902
4710static const struct file_operations trace_clock_fops = { 4903static const struct file_operations trace_clock_fops = {
4711 .open = tracing_clock_open, 4904 .open = tracing_clock_open,
4712 .read = seq_read, 4905 .read = seq_read,
4713 .llseek = seq_lseek, 4906 .llseek = seq_lseek,
4714 .release = single_release, 4907 .release = tracing_single_release_tr,
4715 .write = tracing_clock_write, 4908 .write = tracing_clock_write,
4716}; 4909};
4717 4910
@@ -4739,13 +4932,19 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4739 struct trace_cpu *tc = inode->i_private; 4932 struct trace_cpu *tc = inode->i_private;
4740 struct trace_array *tr = tc->tr; 4933 struct trace_array *tr = tc->tr;
4741 struct ftrace_buffer_info *info; 4934 struct ftrace_buffer_info *info;
4935 int ret;
4742 4936
4743 if (tracing_disabled) 4937 if (tracing_disabled)
4744 return -ENODEV; 4938 return -ENODEV;
4745 4939
4940 if (trace_array_get(tr) < 0)
4941 return -ENODEV;
4942
4746 info = kzalloc(sizeof(*info), GFP_KERNEL); 4943 info = kzalloc(sizeof(*info), GFP_KERNEL);
4747 if (!info) 4944 if (!info) {
4945 trace_array_put(tr);
4748 return -ENOMEM; 4946 return -ENOMEM;
4947 }
4749 4948
4750 mutex_lock(&trace_types_lock); 4949 mutex_lock(&trace_types_lock);
4751 4950
@@ -4763,7 +4962,11 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4763 4962
4764 mutex_unlock(&trace_types_lock); 4963 mutex_unlock(&trace_types_lock);
4765 4964
4766 return nonseekable_open(inode, filp); 4965 ret = nonseekable_open(inode, filp);
4966 if (ret < 0)
4967 trace_array_put(tr);
4968
4969 return ret;
4767} 4970}
4768 4971
4769static unsigned int 4972static unsigned int
@@ -4863,8 +5066,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
4863 5066
4864 mutex_lock(&trace_types_lock); 5067 mutex_lock(&trace_types_lock);
4865 5068
4866 WARN_ON(!iter->tr->ref); 5069 __trace_array_put(iter->tr);
4867 iter->tr->ref--;
4868 5070
4869 if (info->spare) 5071 if (info->spare)
4870 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); 5072 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
@@ -5612,15 +5814,10 @@ rb_simple_read(struct file *filp, char __user *ubuf,
5612 size_t cnt, loff_t *ppos) 5814 size_t cnt, loff_t *ppos)
5613{ 5815{
5614 struct trace_array *tr = filp->private_data; 5816 struct trace_array *tr = filp->private_data;
5615 struct ring_buffer *buffer = tr->trace_buffer.buffer;
5616 char buf[64]; 5817 char buf[64];
5617 int r; 5818 int r;
5618 5819
5619 if (buffer) 5820 r = tracer_tracing_is_on(tr);
5620 r = ring_buffer_record_is_on(buffer);
5621 else
5622 r = 0;
5623
5624 r = sprintf(buf, "%d\n", r); 5821 r = sprintf(buf, "%d\n", r);
5625 5822
5626 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5823 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -5642,11 +5839,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
5642 if (buffer) { 5839 if (buffer) {
5643 mutex_lock(&trace_types_lock); 5840 mutex_lock(&trace_types_lock);
5644 if (val) { 5841 if (val) {
5645 ring_buffer_record_on(buffer); 5842 tracer_tracing_on(tr);
5646 if (tr->current_trace->start) 5843 if (tr->current_trace->start)
5647 tr->current_trace->start(tr); 5844 tr->current_trace->start(tr);
5648 } else { 5845 } else {
5649 ring_buffer_record_off(buffer); 5846 tracer_tracing_off(tr);
5650 if (tr->current_trace->stop) 5847 if (tr->current_trace->stop)
5651 tr->current_trace->stop(tr); 5848 tr->current_trace->stop(tr);
5652 } 5849 }
@@ -5659,9 +5856,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
5659} 5856}
5660 5857
5661static const struct file_operations rb_simple_fops = { 5858static const struct file_operations rb_simple_fops = {
5662 .open = tracing_open_generic, 5859 .open = tracing_open_generic_tr,
5663 .read = rb_simple_read, 5860 .read = rb_simple_read,
5664 .write = rb_simple_write, 5861 .write = rb_simple_write,
5862 .release = tracing_release_generic_tr,
5665 .llseek = default_llseek, 5863 .llseek = default_llseek,
5666}; 5864};
5667 5865
@@ -5933,7 +6131,7 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
5933 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 6131 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
5934 tr, &tracing_total_entries_fops); 6132 tr, &tracing_total_entries_fops);
5935 6133
5936 trace_create_file("free_buffer", 0644, d_tracer, 6134 trace_create_file("free_buffer", 0200, d_tracer,
5937 tr, &tracing_free_buffer_fops); 6135 tr, &tracing_free_buffer_fops);
5938 6136
5939 trace_create_file("trace_marker", 0220, d_tracer, 6137 trace_create_file("trace_marker", 0220, d_tracer,