diff options
author | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
---|---|---|
committer | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
commit | ee3e542fec6e69bc9fb668698889a37d93950ddf (patch) | |
tree | e74ee766a4764769ef1d3d45d266b4dea64101d3 /kernel/trace/trace.c | |
parent | fe2a801b50c0bb8039d627e5ae1fec249d10ff39 (diff) | |
parent | f1d6e17f540af37bb1891480143669ba7636c4cf (diff) |
Merge remote-tracking branch 'linus/master' into testing
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r-- | kernel/trace/trace.c | 496 |
1 files changed, 336 insertions, 160 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e71a8be4a6ee..496f94d57698 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -115,6 +115,9 @@ cpumask_var_t __read_mostly tracing_buffer_mask; | |||
115 | 115 | ||
116 | enum ftrace_dump_mode ftrace_dump_on_oops; | 116 | enum ftrace_dump_mode ftrace_dump_on_oops; |
117 | 117 | ||
118 | /* When set, tracing will stop when a WARN*() is hit */ | ||
119 | int __disable_trace_on_warning; | ||
120 | |||
118 | static int tracing_set_tracer(const char *buf); | 121 | static int tracing_set_tracer(const char *buf); |
119 | 122 | ||
120 | #define MAX_TRACER_SIZE 100 | 123 | #define MAX_TRACER_SIZE 100 |
@@ -149,6 +152,13 @@ static int __init set_ftrace_dump_on_oops(char *str) | |||
149 | } | 152 | } |
150 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | 153 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); |
151 | 154 | ||
155 | static int __init stop_trace_on_warning(char *str) | ||
156 | { | ||
157 | __disable_trace_on_warning = 1; | ||
158 | return 1; | ||
159 | } | ||
160 | __setup("traceoff_on_warning=", stop_trace_on_warning); | ||
161 | |||
152 | static int __init boot_alloc_snapshot(char *str) | 162 | static int __init boot_alloc_snapshot(char *str) |
153 | { | 163 | { |
154 | allocate_snapshot = true; | 164 | allocate_snapshot = true; |
@@ -170,6 +180,7 @@ static int __init set_trace_boot_options(char *str) | |||
170 | } | 180 | } |
171 | __setup("trace_options=", set_trace_boot_options); | 181 | __setup("trace_options=", set_trace_boot_options); |
172 | 182 | ||
183 | |||
173 | unsigned long long ns2usecs(cycle_t nsec) | 184 | unsigned long long ns2usecs(cycle_t nsec) |
174 | { | 185 | { |
175 | nsec += 500; | 186 | nsec += 500; |
@@ -193,6 +204,37 @@ static struct trace_array global_trace; | |||
193 | 204 | ||
194 | LIST_HEAD(ftrace_trace_arrays); | 205 | LIST_HEAD(ftrace_trace_arrays); |
195 | 206 | ||
207 | int trace_array_get(struct trace_array *this_tr) | ||
208 | { | ||
209 | struct trace_array *tr; | ||
210 | int ret = -ENODEV; | ||
211 | |||
212 | mutex_lock(&trace_types_lock); | ||
213 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | ||
214 | if (tr == this_tr) { | ||
215 | tr->ref++; | ||
216 | ret = 0; | ||
217 | break; | ||
218 | } | ||
219 | } | ||
220 | mutex_unlock(&trace_types_lock); | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | static void __trace_array_put(struct trace_array *this_tr) | ||
226 | { | ||
227 | WARN_ON(!this_tr->ref); | ||
228 | this_tr->ref--; | ||
229 | } | ||
230 | |||
231 | void trace_array_put(struct trace_array *this_tr) | ||
232 | { | ||
233 | mutex_lock(&trace_types_lock); | ||
234 | __trace_array_put(this_tr); | ||
235 | mutex_unlock(&trace_types_lock); | ||
236 | } | ||
237 | |||
196 | int filter_current_check_discard(struct ring_buffer *buffer, | 238 | int filter_current_check_discard(struct ring_buffer *buffer, |
197 | struct ftrace_event_call *call, void *rec, | 239 | struct ftrace_event_call *call, void *rec, |
198 | struct ring_buffer_event *event) | 240 | struct ring_buffer_event *event) |
@@ -201,23 +243,43 @@ int filter_current_check_discard(struct ring_buffer *buffer, | |||
201 | } | 243 | } |
202 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 244 | EXPORT_SYMBOL_GPL(filter_current_check_discard); |
203 | 245 | ||
204 | cycle_t ftrace_now(int cpu) | 246 | cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
205 | { | 247 | { |
206 | u64 ts; | 248 | u64 ts; |
207 | 249 | ||
208 | /* Early boot up does not have a buffer yet */ | 250 | /* Early boot up does not have a buffer yet */ |
209 | if (!global_trace.trace_buffer.buffer) | 251 | if (!buf->buffer) |
210 | return trace_clock_local(); | 252 | return trace_clock_local(); |
211 | 253 | ||
212 | ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu); | 254 | ts = ring_buffer_time_stamp(buf->buffer, cpu); |
213 | ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts); | 255 | ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); |
214 | 256 | ||
215 | return ts; | 257 | return ts; |
216 | } | 258 | } |
217 | 259 | ||
260 | cycle_t ftrace_now(int cpu) | ||
261 | { | ||
262 | return buffer_ftrace_now(&global_trace.trace_buffer, cpu); | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * tracing_is_enabled - Show if global_trace has been disabled | ||
267 | * | ||
268 | * Shows if the global trace has been enabled or not. It uses the | ||
269 | * mirror flag "buffer_disabled" to be used in fast paths such as for | ||
270 | * the irqsoff tracer. But it may be inaccurate due to races. If you | ||
271 | * need to know the accurate state, use tracing_is_on() which is a little | ||
272 | * slower, but accurate. | ||
273 | */ | ||
218 | int tracing_is_enabled(void) | 274 | int tracing_is_enabled(void) |
219 | { | 275 | { |
220 | return tracing_is_on(); | 276 | /* |
277 | * For quick access (irqsoff uses this in fast path), just | ||
278 | * return the mirror variable of the state of the ring buffer. | ||
279 | * It's a little racy, but we don't really care. | ||
280 | */ | ||
281 | smp_rmb(); | ||
282 | return !global_trace.buffer_disabled; | ||
221 | } | 283 | } |
222 | 284 | ||
223 | /* | 285 | /* |
@@ -240,7 +302,7 @@ static struct tracer *trace_types __read_mostly; | |||
240 | /* | 302 | /* |
241 | * trace_types_lock is used to protect the trace_types list. | 303 | * trace_types_lock is used to protect the trace_types list. |
242 | */ | 304 | */ |
243 | static DEFINE_MUTEX(trace_types_lock); | 305 | DEFINE_MUTEX(trace_types_lock); |
244 | 306 | ||
245 | /* | 307 | /* |
246 | * serialize the access of the ring buffer | 308 | * serialize the access of the ring buffer |
@@ -330,6 +392,23 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
330 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | | 392 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | |
331 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; | 393 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; |
332 | 394 | ||
395 | static void tracer_tracing_on(struct trace_array *tr) | ||
396 | { | ||
397 | if (tr->trace_buffer.buffer) | ||
398 | ring_buffer_record_on(tr->trace_buffer.buffer); | ||
399 | /* | ||
400 | * This flag is looked at when buffers haven't been allocated | ||
401 | * yet, or by some tracers (like irqsoff), that just want to | ||
402 | * know if the ring buffer has been disabled, but it can handle | ||
403 | * races of where it gets disabled but we still do a record. | ||
404 | * As the check is in the fast path of the tracers, it is more | ||
405 | * important to be fast than accurate. | ||
406 | */ | ||
407 | tr->buffer_disabled = 0; | ||
408 | /* Make the flag seen by readers */ | ||
409 | smp_wmb(); | ||
410 | } | ||
411 | |||
333 | /** | 412 | /** |
334 | * tracing_on - enable tracing buffers | 413 | * tracing_on - enable tracing buffers |
335 | * | 414 | * |
@@ -338,15 +417,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | |||
338 | */ | 417 | */ |
339 | void tracing_on(void) | 418 | void tracing_on(void) |
340 | { | 419 | { |
341 | if (global_trace.trace_buffer.buffer) | 420 | tracer_tracing_on(&global_trace); |
342 | ring_buffer_record_on(global_trace.trace_buffer.buffer); | ||
343 | /* | ||
344 | * This flag is only looked at when buffers haven't been | ||
345 | * allocated yet. We don't really care about the race | ||
346 | * between setting this flag and actually turning | ||
347 | * on the buffer. | ||
348 | */ | ||
349 | global_trace.buffer_disabled = 0; | ||
350 | } | 421 | } |
351 | EXPORT_SYMBOL_GPL(tracing_on); | 422 | EXPORT_SYMBOL_GPL(tracing_on); |
352 | 423 | ||
@@ -540,6 +611,23 @@ void tracing_snapshot_alloc(void) | |||
540 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | 611 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
541 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 612 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
542 | 613 | ||
614 | static void tracer_tracing_off(struct trace_array *tr) | ||
615 | { | ||
616 | if (tr->trace_buffer.buffer) | ||
617 | ring_buffer_record_off(tr->trace_buffer.buffer); | ||
618 | /* | ||
619 | * This flag is looked at when buffers haven't been allocated | ||
620 | * yet, or by some tracers (like irqsoff), that just want to | ||
621 | * know if the ring buffer has been disabled, but it can handle | ||
622 | * races of where it gets disabled but we still do a record. | ||
623 | * As the check is in the fast path of the tracers, it is more | ||
624 | * important to be fast than accurate. | ||
625 | */ | ||
626 | tr->buffer_disabled = 1; | ||
627 | /* Make the flag seen by readers */ | ||
628 | smp_wmb(); | ||
629 | } | ||
630 | |||
543 | /** | 631 | /** |
544 | * tracing_off - turn off tracing buffers | 632 | * tracing_off - turn off tracing buffers |
545 | * | 633 | * |
@@ -550,26 +638,35 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | |||
550 | */ | 638 | */ |
551 | void tracing_off(void) | 639 | void tracing_off(void) |
552 | { | 640 | { |
553 | if (global_trace.trace_buffer.buffer) | 641 | tracer_tracing_off(&global_trace); |
554 | ring_buffer_record_off(global_trace.trace_buffer.buffer); | ||
555 | /* | ||
556 | * This flag is only looked at when buffers haven't been | ||
557 | * allocated yet. We don't really care about the race | ||
558 | * between setting this flag and actually turning | ||
559 | * on the buffer. | ||
560 | */ | ||
561 | global_trace.buffer_disabled = 1; | ||
562 | } | 642 | } |
563 | EXPORT_SYMBOL_GPL(tracing_off); | 643 | EXPORT_SYMBOL_GPL(tracing_off); |
564 | 644 | ||
645 | void disable_trace_on_warning(void) | ||
646 | { | ||
647 | if (__disable_trace_on_warning) | ||
648 | tracing_off(); | ||
649 | } | ||
650 | |||
651 | /** | ||
652 | * tracer_tracing_is_on - show real state of ring buffer enabled | ||
653 | * @tr : the trace array to know if ring buffer is enabled | ||
654 | * | ||
655 | * Shows real state of the ring buffer if it is enabled or not. | ||
656 | */ | ||
657 | static int tracer_tracing_is_on(struct trace_array *tr) | ||
658 | { | ||
659 | if (tr->trace_buffer.buffer) | ||
660 | return ring_buffer_record_is_on(tr->trace_buffer.buffer); | ||
661 | return !tr->buffer_disabled; | ||
662 | } | ||
663 | |||
565 | /** | 664 | /** |
566 | * tracing_is_on - show state of ring buffers enabled | 665 | * tracing_is_on - show state of ring buffers enabled |
567 | */ | 666 | */ |
568 | int tracing_is_on(void) | 667 | int tracing_is_on(void) |
569 | { | 668 | { |
570 | if (global_trace.trace_buffer.buffer) | 669 | return tracer_tracing_is_on(&global_trace); |
571 | return ring_buffer_record_is_on(global_trace.trace_buffer.buffer); | ||
572 | return !global_trace.buffer_disabled; | ||
573 | } | 670 | } |
574 | EXPORT_SYMBOL_GPL(tracing_is_on); | 671 | EXPORT_SYMBOL_GPL(tracing_is_on); |
575 | 672 | ||
@@ -1119,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) | |||
1119 | /* Make sure all commits have finished */ | 1216 | /* Make sure all commits have finished */ |
1120 | synchronize_sched(); | 1217 | synchronize_sched(); |
1121 | 1218 | ||
1122 | buf->time_start = ftrace_now(buf->cpu); | 1219 | buf->time_start = buffer_ftrace_now(buf, buf->cpu); |
1123 | 1220 | ||
1124 | for_each_online_cpu(cpu) | 1221 | for_each_online_cpu(cpu) |
1125 | ring_buffer_reset_cpu(buffer, cpu); | 1222 | ring_buffer_reset_cpu(buffer, cpu); |
@@ -1127,23 +1224,17 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) | |||
1127 | ring_buffer_record_enable(buffer); | 1224 | ring_buffer_record_enable(buffer); |
1128 | } | 1225 | } |
1129 | 1226 | ||
1130 | void tracing_reset_current(int cpu) | 1227 | /* Must have trace_types_lock held */ |
1131 | { | ||
1132 | tracing_reset(&global_trace.trace_buffer, cpu); | ||
1133 | } | ||
1134 | |||
1135 | void tracing_reset_all_online_cpus(void) | 1228 | void tracing_reset_all_online_cpus(void) |
1136 | { | 1229 | { |
1137 | struct trace_array *tr; | 1230 | struct trace_array *tr; |
1138 | 1231 | ||
1139 | mutex_lock(&trace_types_lock); | ||
1140 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | 1232 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
1141 | tracing_reset_online_cpus(&tr->trace_buffer); | 1233 | tracing_reset_online_cpus(&tr->trace_buffer); |
1142 | #ifdef CONFIG_TRACER_MAX_TRACE | 1234 | #ifdef CONFIG_TRACER_MAX_TRACE |
1143 | tracing_reset_online_cpus(&tr->max_buffer); | 1235 | tracing_reset_online_cpus(&tr->max_buffer); |
1144 | #endif | 1236 | #endif |
1145 | } | 1237 | } |
1146 | mutex_unlock(&trace_types_lock); | ||
1147 | } | 1238 | } |
1148 | 1239 | ||
1149 | #define SAVED_CMDLINES 128 | 1240 | #define SAVED_CMDLINES 128 |
@@ -1543,15 +1634,6 @@ trace_function(struct trace_array *tr, | |||
1543 | __buffer_unlock_commit(buffer, event); | 1634 | __buffer_unlock_commit(buffer, event); |
1544 | } | 1635 | } |
1545 | 1636 | ||
1546 | void | ||
1547 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | ||
1548 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | ||
1549 | int pc) | ||
1550 | { | ||
1551 | if (likely(!atomic_read(&data->disabled))) | ||
1552 | trace_function(tr, ip, parent_ip, flags, pc); | ||
1553 | } | ||
1554 | |||
1555 | #ifdef CONFIG_STACKTRACE | 1637 | #ifdef CONFIG_STACKTRACE |
1556 | 1638 | ||
1557 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) | 1639 | #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) |
@@ -2760,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v) | |||
2760 | return 0; | 2842 | return 0; |
2761 | } | 2843 | } |
2762 | 2844 | ||
2845 | /* | ||
2846 | * Should be used after trace_array_get(), trace_types_lock | ||
2847 | * ensures that i_cdev was already initialized. | ||
2848 | */ | ||
2849 | static inline int tracing_get_cpu(struct inode *inode) | ||
2850 | { | ||
2851 | if (inode->i_cdev) /* See trace_create_cpu_file() */ | ||
2852 | return (long)inode->i_cdev - 1; | ||
2853 | return RING_BUFFER_ALL_CPUS; | ||
2854 | } | ||
2855 | |||
2763 | static const struct seq_operations tracer_seq_ops = { | 2856 | static const struct seq_operations tracer_seq_ops = { |
2764 | .start = s_start, | 2857 | .start = s_start, |
2765 | .next = s_next, | 2858 | .next = s_next, |
@@ -2770,8 +2863,7 @@ static const struct seq_operations tracer_seq_ops = { | |||
2770 | static struct trace_iterator * | 2863 | static struct trace_iterator * |
2771 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) | 2864 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) |
2772 | { | 2865 | { |
2773 | struct trace_cpu *tc = inode->i_private; | 2866 | struct trace_array *tr = inode->i_private; |
2774 | struct trace_array *tr = tc->tr; | ||
2775 | struct trace_iterator *iter; | 2867 | struct trace_iterator *iter; |
2776 | int cpu; | 2868 | int cpu; |
2777 | 2869 | ||
@@ -2812,8 +2904,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |||
2812 | iter->trace_buffer = &tr->trace_buffer; | 2904 | iter->trace_buffer = &tr->trace_buffer; |
2813 | iter->snapshot = snapshot; | 2905 | iter->snapshot = snapshot; |
2814 | iter->pos = -1; | 2906 | iter->pos = -1; |
2907 | iter->cpu_file = tracing_get_cpu(inode); | ||
2815 | mutex_init(&iter->mutex); | 2908 | mutex_init(&iter->mutex); |
2816 | iter->cpu_file = tc->cpu; | ||
2817 | 2909 | ||
2818 | /* Notify the tracer early; before we stop tracing. */ | 2910 | /* Notify the tracer early; before we stop tracing. */ |
2819 | if (iter->trace && iter->trace->open) | 2911 | if (iter->trace && iter->trace->open) |
@@ -2850,8 +2942,6 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |||
2850 | tracing_iter_reset(iter, cpu); | 2942 | tracing_iter_reset(iter, cpu); |
2851 | } | 2943 | } |
2852 | 2944 | ||
2853 | tr->ref++; | ||
2854 | |||
2855 | mutex_unlock(&trace_types_lock); | 2945 | mutex_unlock(&trace_types_lock); |
2856 | 2946 | ||
2857 | return iter; | 2947 | return iter; |
@@ -2874,24 +2964,41 @@ int tracing_open_generic(struct inode *inode, struct file *filp) | |||
2874 | return 0; | 2964 | return 0; |
2875 | } | 2965 | } |
2876 | 2966 | ||
2967 | /* | ||
2968 | * Open and update trace_array ref count. | ||
2969 | * Must have the current trace_array passed to it. | ||
2970 | */ | ||
2971 | static int tracing_open_generic_tr(struct inode *inode, struct file *filp) | ||
2972 | { | ||
2973 | struct trace_array *tr = inode->i_private; | ||
2974 | |||
2975 | if (tracing_disabled) | ||
2976 | return -ENODEV; | ||
2977 | |||
2978 | if (trace_array_get(tr) < 0) | ||
2979 | return -ENODEV; | ||
2980 | |||
2981 | filp->private_data = inode->i_private; | ||
2982 | |||
2983 | return 0; | ||
2984 | } | ||
2985 | |||
2877 | static int tracing_release(struct inode *inode, struct file *file) | 2986 | static int tracing_release(struct inode *inode, struct file *file) |
2878 | { | 2987 | { |
2988 | struct trace_array *tr = inode->i_private; | ||
2879 | struct seq_file *m = file->private_data; | 2989 | struct seq_file *m = file->private_data; |
2880 | struct trace_iterator *iter; | 2990 | struct trace_iterator *iter; |
2881 | struct trace_array *tr; | ||
2882 | int cpu; | 2991 | int cpu; |
2883 | 2992 | ||
2884 | if (!(file->f_mode & FMODE_READ)) | 2993 | if (!(file->f_mode & FMODE_READ)) { |
2994 | trace_array_put(tr); | ||
2885 | return 0; | 2995 | return 0; |
2996 | } | ||
2886 | 2997 | ||
2998 | /* Writes do not use seq_file */ | ||
2887 | iter = m->private; | 2999 | iter = m->private; |
2888 | tr = iter->tr; | ||
2889 | |||
2890 | mutex_lock(&trace_types_lock); | 3000 | mutex_lock(&trace_types_lock); |
2891 | 3001 | ||
2892 | WARN_ON(!tr->ref); | ||
2893 | tr->ref--; | ||
2894 | |||
2895 | for_each_tracing_cpu(cpu) { | 3002 | for_each_tracing_cpu(cpu) { |
2896 | if (iter->buffer_iter[cpu]) | 3003 | if (iter->buffer_iter[cpu]) |
2897 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 3004 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
@@ -2903,6 +3010,9 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2903 | if (!iter->snapshot) | 3010 | if (!iter->snapshot) |
2904 | /* reenable tracing if it was previously enabled */ | 3011 | /* reenable tracing if it was previously enabled */ |
2905 | tracing_start_tr(tr); | 3012 | tracing_start_tr(tr); |
3013 | |||
3014 | __trace_array_put(tr); | ||
3015 | |||
2906 | mutex_unlock(&trace_types_lock); | 3016 | mutex_unlock(&trace_types_lock); |
2907 | 3017 | ||
2908 | mutex_destroy(&iter->mutex); | 3018 | mutex_destroy(&iter->mutex); |
@@ -2910,24 +3020,44 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
2910 | kfree(iter->trace); | 3020 | kfree(iter->trace); |
2911 | kfree(iter->buffer_iter); | 3021 | kfree(iter->buffer_iter); |
2912 | seq_release_private(inode, file); | 3022 | seq_release_private(inode, file); |
3023 | |||
3024 | return 0; | ||
3025 | } | ||
3026 | |||
3027 | static int tracing_release_generic_tr(struct inode *inode, struct file *file) | ||
3028 | { | ||
3029 | struct trace_array *tr = inode->i_private; | ||
3030 | |||
3031 | trace_array_put(tr); | ||
2913 | return 0; | 3032 | return 0; |
2914 | } | 3033 | } |
2915 | 3034 | ||
3035 | static int tracing_single_release_tr(struct inode *inode, struct file *file) | ||
3036 | { | ||
3037 | struct trace_array *tr = inode->i_private; | ||
3038 | |||
3039 | trace_array_put(tr); | ||
3040 | |||
3041 | return single_release(inode, file); | ||
3042 | } | ||
3043 | |||
2916 | static int tracing_open(struct inode *inode, struct file *file) | 3044 | static int tracing_open(struct inode *inode, struct file *file) |
2917 | { | 3045 | { |
3046 | struct trace_array *tr = inode->i_private; | ||
2918 | struct trace_iterator *iter; | 3047 | struct trace_iterator *iter; |
2919 | int ret = 0; | 3048 | int ret = 0; |
2920 | 3049 | ||
3050 | if (trace_array_get(tr) < 0) | ||
3051 | return -ENODEV; | ||
3052 | |||
2921 | /* If this file was open for write, then erase contents */ | 3053 | /* If this file was open for write, then erase contents */ |
2922 | if ((file->f_mode & FMODE_WRITE) && | 3054 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
2923 | (file->f_flags & O_TRUNC)) { | 3055 | int cpu = tracing_get_cpu(inode); |
2924 | struct trace_cpu *tc = inode->i_private; | ||
2925 | struct trace_array *tr = tc->tr; | ||
2926 | 3056 | ||
2927 | if (tc->cpu == RING_BUFFER_ALL_CPUS) | 3057 | if (cpu == RING_BUFFER_ALL_CPUS) |
2928 | tracing_reset_online_cpus(&tr->trace_buffer); | 3058 | tracing_reset_online_cpus(&tr->trace_buffer); |
2929 | else | 3059 | else |
2930 | tracing_reset(&tr->trace_buffer, tc->cpu); | 3060 | tracing_reset(&tr->trace_buffer, cpu); |
2931 | } | 3061 | } |
2932 | 3062 | ||
2933 | if (file->f_mode & FMODE_READ) { | 3063 | if (file->f_mode & FMODE_READ) { |
@@ -2937,6 +3067,10 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2937 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 3067 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) |
2938 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 3068 | iter->iter_flags |= TRACE_FILE_LAT_FMT; |
2939 | } | 3069 | } |
3070 | |||
3071 | if (ret < 0) | ||
3072 | trace_array_put(tr); | ||
3073 | |||
2940 | return ret; | 3074 | return ret; |
2941 | } | 3075 | } |
2942 | 3076 | ||
@@ -3293,17 +3427,27 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
3293 | 3427 | ||
3294 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | 3428 | static int tracing_trace_options_open(struct inode *inode, struct file *file) |
3295 | { | 3429 | { |
3430 | struct trace_array *tr = inode->i_private; | ||
3431 | int ret; | ||
3432 | |||
3296 | if (tracing_disabled) | 3433 | if (tracing_disabled) |
3297 | return -ENODEV; | 3434 | return -ENODEV; |
3298 | 3435 | ||
3299 | return single_open(file, tracing_trace_options_show, inode->i_private); | 3436 | if (trace_array_get(tr) < 0) |
3437 | return -ENODEV; | ||
3438 | |||
3439 | ret = single_open(file, tracing_trace_options_show, inode->i_private); | ||
3440 | if (ret < 0) | ||
3441 | trace_array_put(tr); | ||
3442 | |||
3443 | return ret; | ||
3300 | } | 3444 | } |
3301 | 3445 | ||
3302 | static const struct file_operations tracing_iter_fops = { | 3446 | static const struct file_operations tracing_iter_fops = { |
3303 | .open = tracing_trace_options_open, | 3447 | .open = tracing_trace_options_open, |
3304 | .read = seq_read, | 3448 | .read = seq_read, |
3305 | .llseek = seq_lseek, | 3449 | .llseek = seq_lseek, |
3306 | .release = single_release, | 3450 | .release = tracing_single_release_tr, |
3307 | .write = tracing_trace_options_write, | 3451 | .write = tracing_trace_options_write, |
3308 | }; | 3452 | }; |
3309 | 3453 | ||
@@ -3379,14 +3523,14 @@ static const char readme_msg[] = | |||
3379 | "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" | 3523 | "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" |
3380 | "\t\t\t Read the contents for more information\n" | 3524 | "\t\t\t Read the contents for more information\n" |
3381 | #endif | 3525 | #endif |
3382 | #ifdef CONFIG_STACKTRACE | 3526 | #ifdef CONFIG_STACK_TRACER |
3383 | " stack_trace\t\t- Shows the max stack trace when active\n" | 3527 | " stack_trace\t\t- Shows the max stack trace when active\n" |
3384 | " stack_max_size\t- Shows current max stack size that was traced\n" | 3528 | " stack_max_size\t- Shows current max stack size that was traced\n" |
3385 | "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" | 3529 | "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" |
3386 | #ifdef CONFIG_DYNAMIC_FTRACE | 3530 | #ifdef CONFIG_DYNAMIC_FTRACE |
3387 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" | 3531 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" |
3388 | #endif | 3532 | #endif |
3389 | #endif /* CONFIG_STACKTRACE */ | 3533 | #endif /* CONFIG_STACK_TRACER */ |
3390 | ; | 3534 | ; |
3391 | 3535 | ||
3392 | static ssize_t | 3536 | static ssize_t |
@@ -3783,20 +3927,23 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
3783 | 3927 | ||
3784 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 3928 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
3785 | { | 3929 | { |
3786 | struct trace_cpu *tc = inode->i_private; | 3930 | struct trace_array *tr = inode->i_private; |
3787 | struct trace_array *tr = tc->tr; | ||
3788 | struct trace_iterator *iter; | 3931 | struct trace_iterator *iter; |
3789 | int ret = 0; | 3932 | int ret = 0; |
3790 | 3933 | ||
3791 | if (tracing_disabled) | 3934 | if (tracing_disabled) |
3792 | return -ENODEV; | 3935 | return -ENODEV; |
3793 | 3936 | ||
3937 | if (trace_array_get(tr) < 0) | ||
3938 | return -ENODEV; | ||
3939 | |||
3794 | mutex_lock(&trace_types_lock); | 3940 | mutex_lock(&trace_types_lock); |
3795 | 3941 | ||
3796 | /* create a buffer to store the information to pass to userspace */ | 3942 | /* create a buffer to store the information to pass to userspace */ |
3797 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 3943 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
3798 | if (!iter) { | 3944 | if (!iter) { |
3799 | ret = -ENOMEM; | 3945 | ret = -ENOMEM; |
3946 | __trace_array_put(tr); | ||
3800 | goto out; | 3947 | goto out; |
3801 | } | 3948 | } |
3802 | 3949 | ||
@@ -3826,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3826 | if (trace_clocks[tr->clock_id].in_ns) | 3973 | if (trace_clocks[tr->clock_id].in_ns) |
3827 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 3974 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
3828 | 3975 | ||
3829 | iter->cpu_file = tc->cpu; | 3976 | iter->tr = tr; |
3830 | iter->tr = tc->tr; | 3977 | iter->trace_buffer = &tr->trace_buffer; |
3831 | iter->trace_buffer = &tc->tr->trace_buffer; | 3978 | iter->cpu_file = tracing_get_cpu(inode); |
3832 | mutex_init(&iter->mutex); | 3979 | mutex_init(&iter->mutex); |
3833 | filp->private_data = iter; | 3980 | filp->private_data = iter; |
3834 | 3981 | ||
@@ -3843,6 +3990,7 @@ out: | |||
3843 | fail: | 3990 | fail: |
3844 | kfree(iter->trace); | 3991 | kfree(iter->trace); |
3845 | kfree(iter); | 3992 | kfree(iter); |
3993 | __trace_array_put(tr); | ||
3846 | mutex_unlock(&trace_types_lock); | 3994 | mutex_unlock(&trace_types_lock); |
3847 | return ret; | 3995 | return ret; |
3848 | } | 3996 | } |
@@ -3850,6 +3998,7 @@ fail: | |||
3850 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 3998 | static int tracing_release_pipe(struct inode *inode, struct file *file) |
3851 | { | 3999 | { |
3852 | struct trace_iterator *iter = file->private_data; | 4000 | struct trace_iterator *iter = file->private_data; |
4001 | struct trace_array *tr = inode->i_private; | ||
3853 | 4002 | ||
3854 | mutex_lock(&trace_types_lock); | 4003 | mutex_lock(&trace_types_lock); |
3855 | 4004 | ||
@@ -3863,6 +4012,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
3863 | kfree(iter->trace); | 4012 | kfree(iter->trace); |
3864 | kfree(iter); | 4013 | kfree(iter); |
3865 | 4014 | ||
4015 | trace_array_put(tr); | ||
4016 | |||
3866 | return 0; | 4017 | return 0; |
3867 | } | 4018 | } |
3868 | 4019 | ||
@@ -3939,7 +4090,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
3939 | * | 4090 | * |
3940 | * iter->pos will be 0 if we haven't read anything. | 4091 | * iter->pos will be 0 if we haven't read anything. |
3941 | */ | 4092 | */ |
3942 | if (!tracing_is_enabled() && iter->pos) | 4093 | if (!tracing_is_on() && iter->pos) |
3943 | break; | 4094 | break; |
3944 | } | 4095 | } |
3945 | 4096 | ||
@@ -4000,6 +4151,7 @@ waitagain: | |||
4000 | memset(&iter->seq, 0, | 4151 | memset(&iter->seq, 0, |
4001 | sizeof(struct trace_iterator) - | 4152 | sizeof(struct trace_iterator) - |
4002 | offsetof(struct trace_iterator, seq)); | 4153 | offsetof(struct trace_iterator, seq)); |
4154 | cpumask_clear(iter->started); | ||
4003 | iter->pos = -1; | 4155 | iter->pos = -1; |
4004 | 4156 | ||
4005 | trace_event_read_lock(); | 4157 | trace_event_read_lock(); |
@@ -4200,15 +4352,16 @@ static ssize_t | |||
4200 | tracing_entries_read(struct file *filp, char __user *ubuf, | 4352 | tracing_entries_read(struct file *filp, char __user *ubuf, |
4201 | size_t cnt, loff_t *ppos) | 4353 | size_t cnt, loff_t *ppos) |
4202 | { | 4354 | { |
4203 | struct trace_cpu *tc = filp->private_data; | 4355 | struct inode *inode = file_inode(filp); |
4204 | struct trace_array *tr = tc->tr; | 4356 | struct trace_array *tr = inode->i_private; |
4357 | int cpu = tracing_get_cpu(inode); | ||
4205 | char buf[64]; | 4358 | char buf[64]; |
4206 | int r = 0; | 4359 | int r = 0; |
4207 | ssize_t ret; | 4360 | ssize_t ret; |
4208 | 4361 | ||
4209 | mutex_lock(&trace_types_lock); | 4362 | mutex_lock(&trace_types_lock); |
4210 | 4363 | ||
4211 | if (tc->cpu == RING_BUFFER_ALL_CPUS) { | 4364 | if (cpu == RING_BUFFER_ALL_CPUS) { |
4212 | int cpu, buf_size_same; | 4365 | int cpu, buf_size_same; |
4213 | unsigned long size; | 4366 | unsigned long size; |
4214 | 4367 | ||
@@ -4235,7 +4388,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
4235 | } else | 4388 | } else |
4236 | r = sprintf(buf, "X\n"); | 4389 | r = sprintf(buf, "X\n"); |
4237 | } else | 4390 | } else |
4238 | r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10); | 4391 | r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); |
4239 | 4392 | ||
4240 | mutex_unlock(&trace_types_lock); | 4393 | mutex_unlock(&trace_types_lock); |
4241 | 4394 | ||
@@ -4247,7 +4400,8 @@ static ssize_t | |||
4247 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 4400 | tracing_entries_write(struct file *filp, const char __user *ubuf, |
4248 | size_t cnt, loff_t *ppos) | 4401 | size_t cnt, loff_t *ppos) |
4249 | { | 4402 | { |
4250 | struct trace_cpu *tc = filp->private_data; | 4403 | struct inode *inode = file_inode(filp); |
4404 | struct trace_array *tr = inode->i_private; | ||
4251 | unsigned long val; | 4405 | unsigned long val; |
4252 | int ret; | 4406 | int ret; |
4253 | 4407 | ||
@@ -4261,8 +4415,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
4261 | 4415 | ||
4262 | /* value is in KB */ | 4416 | /* value is in KB */ |
4263 | val <<= 10; | 4417 | val <<= 10; |
4264 | 4418 | ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); | |
4265 | ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu); | ||
4266 | if (ret < 0) | 4419 | if (ret < 0) |
4267 | return ret; | 4420 | return ret; |
4268 | 4421 | ||
@@ -4316,10 +4469,12 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) | |||
4316 | 4469 | ||
4317 | /* disable tracing ? */ | 4470 | /* disable tracing ? */ |
4318 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) | 4471 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) |
4319 | tracing_off(); | 4472 | tracer_tracing_off(tr); |
4320 | /* resize the ring buffer to 0 */ | 4473 | /* resize the ring buffer to 0 */ |
4321 | tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); | 4474 | tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); |
4322 | 4475 | ||
4476 | trace_array_put(tr); | ||
4477 | |||
4323 | return 0; | 4478 | return 0; |
4324 | } | 4479 | } |
4325 | 4480 | ||
@@ -4328,6 +4483,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
4328 | size_t cnt, loff_t *fpos) | 4483 | size_t cnt, loff_t *fpos) |
4329 | { | 4484 | { |
4330 | unsigned long addr = (unsigned long)ubuf; | 4485 | unsigned long addr = (unsigned long)ubuf; |
4486 | struct trace_array *tr = filp->private_data; | ||
4331 | struct ring_buffer_event *event; | 4487 | struct ring_buffer_event *event; |
4332 | struct ring_buffer *buffer; | 4488 | struct ring_buffer *buffer; |
4333 | struct print_entry *entry; | 4489 | struct print_entry *entry; |
@@ -4387,7 +4543,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
4387 | 4543 | ||
4388 | local_save_flags(irq_flags); | 4544 | local_save_flags(irq_flags); |
4389 | size = sizeof(*entry) + cnt + 2; /* possible \n added */ | 4545 | size = sizeof(*entry) + cnt + 2; /* possible \n added */ |
4390 | buffer = global_trace.trace_buffer.buffer; | 4546 | buffer = tr->trace_buffer.buffer; |
4391 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 4547 | event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
4392 | irq_flags, preempt_count()); | 4548 | irq_flags, preempt_count()); |
4393 | if (!event) { | 4549 | if (!event) { |
@@ -4478,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4478 | * New clock may not be consistent with the previous clock. | 4634 | * New clock may not be consistent with the previous clock. |
4479 | * Reset the buffer so that it doesn't have incomparable timestamps. | 4635 | * Reset the buffer so that it doesn't have incomparable timestamps. |
4480 | */ | 4636 | */ |
4481 | tracing_reset_online_cpus(&global_trace.trace_buffer); | 4637 | tracing_reset_online_cpus(&tr->trace_buffer); |
4482 | 4638 | ||
4483 | #ifdef CONFIG_TRACER_MAX_TRACE | 4639 | #ifdef CONFIG_TRACER_MAX_TRACE |
4484 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) | 4640 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) |
4485 | ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); | 4641 | ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); |
4486 | tracing_reset_online_cpus(&global_trace.max_buffer); | 4642 | tracing_reset_online_cpus(&tr->max_buffer); |
4487 | #endif | 4643 | #endif |
4488 | 4644 | ||
4489 | mutex_unlock(&trace_types_lock); | 4645 | mutex_unlock(&trace_types_lock); |
@@ -4495,10 +4651,20 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4495 | 4651 | ||
4496 | static int tracing_clock_open(struct inode *inode, struct file *file) | 4652 | static int tracing_clock_open(struct inode *inode, struct file *file) |
4497 | { | 4653 | { |
4654 | struct trace_array *tr = inode->i_private; | ||
4655 | int ret; | ||
4656 | |||
4498 | if (tracing_disabled) | 4657 | if (tracing_disabled) |
4499 | return -ENODEV; | 4658 | return -ENODEV; |
4500 | 4659 | ||
4501 | return single_open(file, tracing_clock_show, inode->i_private); | 4660 | if (trace_array_get(tr)) |
4661 | return -ENODEV; | ||
4662 | |||
4663 | ret = single_open(file, tracing_clock_show, inode->i_private); | ||
4664 | if (ret < 0) | ||
4665 | trace_array_put(tr); | ||
4666 | |||
4667 | return ret; | ||
4502 | } | 4668 | } |
4503 | 4669 | ||
4504 | struct ftrace_buffer_info { | 4670 | struct ftrace_buffer_info { |
@@ -4510,31 +4676,40 @@ struct ftrace_buffer_info { | |||
4510 | #ifdef CONFIG_TRACER_SNAPSHOT | 4676 | #ifdef CONFIG_TRACER_SNAPSHOT |
4511 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | 4677 | static int tracing_snapshot_open(struct inode *inode, struct file *file) |
4512 | { | 4678 | { |
4513 | struct trace_cpu *tc = inode->i_private; | 4679 | struct trace_array *tr = inode->i_private; |
4514 | struct trace_iterator *iter; | 4680 | struct trace_iterator *iter; |
4515 | struct seq_file *m; | 4681 | struct seq_file *m; |
4516 | int ret = 0; | 4682 | int ret = 0; |
4517 | 4683 | ||
4684 | if (trace_array_get(tr) < 0) | ||
4685 | return -ENODEV; | ||
4686 | |||
4518 | if (file->f_mode & FMODE_READ) { | 4687 | if (file->f_mode & FMODE_READ) { |
4519 | iter = __tracing_open(inode, file, true); | 4688 | iter = __tracing_open(inode, file, true); |
4520 | if (IS_ERR(iter)) | 4689 | if (IS_ERR(iter)) |
4521 | ret = PTR_ERR(iter); | 4690 | ret = PTR_ERR(iter); |
4522 | } else { | 4691 | } else { |
4523 | /* Writes still need the seq_file to hold the private data */ | 4692 | /* Writes still need the seq_file to hold the private data */ |
4693 | ret = -ENOMEM; | ||
4524 | m = kzalloc(sizeof(*m), GFP_KERNEL); | 4694 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
4525 | if (!m) | 4695 | if (!m) |
4526 | return -ENOMEM; | 4696 | goto out; |
4527 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 4697 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
4528 | if (!iter) { | 4698 | if (!iter) { |
4529 | kfree(m); | 4699 | kfree(m); |
4530 | return -ENOMEM; | 4700 | goto out; |
4531 | } | 4701 | } |
4532 | iter->tr = tc->tr; | 4702 | ret = 0; |
4533 | iter->trace_buffer = &tc->tr->max_buffer; | 4703 | |
4534 | iter->cpu_file = tc->cpu; | 4704 | iter->tr = tr; |
4705 | iter->trace_buffer = &tr->max_buffer; | ||
4706 | iter->cpu_file = tracing_get_cpu(inode); | ||
4535 | m->private = iter; | 4707 | m->private = iter; |
4536 | file->private_data = m; | 4708 | file->private_data = m; |
4537 | } | 4709 | } |
4710 | out: | ||
4711 | if (ret < 0) | ||
4712 | trace_array_put(tr); | ||
4538 | 4713 | ||
4539 | return ret; | 4714 | return ret; |
4540 | } | 4715 | } |
@@ -4616,9 +4791,12 @@ out: | |||
4616 | static int tracing_snapshot_release(struct inode *inode, struct file *file) | 4791 | static int tracing_snapshot_release(struct inode *inode, struct file *file) |
4617 | { | 4792 | { |
4618 | struct seq_file *m = file->private_data; | 4793 | struct seq_file *m = file->private_data; |
4794 | int ret; | ||
4795 | |||
4796 | ret = tracing_release(inode, file); | ||
4619 | 4797 | ||
4620 | if (file->f_mode & FMODE_READ) | 4798 | if (file->f_mode & FMODE_READ) |
4621 | return tracing_release(inode, file); | 4799 | return ret; |
4622 | 4800 | ||
4623 | /* If write only, the seq_file is just a stub */ | 4801 | /* If write only, the seq_file is just a stub */ |
4624 | if (m) | 4802 | if (m) |
@@ -4684,34 +4862,38 @@ static const struct file_operations tracing_pipe_fops = { | |||
4684 | }; | 4862 | }; |
4685 | 4863 | ||
4686 | static const struct file_operations tracing_entries_fops = { | 4864 | static const struct file_operations tracing_entries_fops = { |
4687 | .open = tracing_open_generic, | 4865 | .open = tracing_open_generic_tr, |
4688 | .read = tracing_entries_read, | 4866 | .read = tracing_entries_read, |
4689 | .write = tracing_entries_write, | 4867 | .write = tracing_entries_write, |
4690 | .llseek = generic_file_llseek, | 4868 | .llseek = generic_file_llseek, |
4869 | .release = tracing_release_generic_tr, | ||
4691 | }; | 4870 | }; |
4692 | 4871 | ||
4693 | static const struct file_operations tracing_total_entries_fops = { | 4872 | static const struct file_operations tracing_total_entries_fops = { |
4694 | .open = tracing_open_generic, | 4873 | .open = tracing_open_generic_tr, |
4695 | .read = tracing_total_entries_read, | 4874 | .read = tracing_total_entries_read, |
4696 | .llseek = generic_file_llseek, | 4875 | .llseek = generic_file_llseek, |
4876 | .release = tracing_release_generic_tr, | ||
4697 | }; | 4877 | }; |
4698 | 4878 | ||
4699 | static const struct file_operations tracing_free_buffer_fops = { | 4879 | static const struct file_operations tracing_free_buffer_fops = { |
4880 | .open = tracing_open_generic_tr, | ||
4700 | .write = tracing_free_buffer_write, | 4881 | .write = tracing_free_buffer_write, |
4701 | .release = tracing_free_buffer_release, | 4882 | .release = tracing_free_buffer_release, |
4702 | }; | 4883 | }; |
4703 | 4884 | ||
4704 | static const struct file_operations tracing_mark_fops = { | 4885 | static const struct file_operations tracing_mark_fops = { |
4705 | .open = tracing_open_generic, | 4886 | .open = tracing_open_generic_tr, |
4706 | .write = tracing_mark_write, | 4887 | .write = tracing_mark_write, |
4707 | .llseek = generic_file_llseek, | 4888 | .llseek = generic_file_llseek, |
4889 | .release = tracing_release_generic_tr, | ||
4708 | }; | 4890 | }; |
4709 | 4891 | ||
4710 | static const struct file_operations trace_clock_fops = { | 4892 | static const struct file_operations trace_clock_fops = { |
4711 | .open = tracing_clock_open, | 4893 | .open = tracing_clock_open, |
4712 | .read = seq_read, | 4894 | .read = seq_read, |
4713 | .llseek = seq_lseek, | 4895 | .llseek = seq_lseek, |
4714 | .release = single_release, | 4896 | .release = tracing_single_release_tr, |
4715 | .write = tracing_clock_write, | 4897 | .write = tracing_clock_write, |
4716 | }; | 4898 | }; |
4717 | 4899 | ||
@@ -4736,23 +4918,26 @@ static const struct file_operations snapshot_raw_fops = { | |||
4736 | 4918 | ||
4737 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | 4919 | static int tracing_buffers_open(struct inode *inode, struct file *filp) |
4738 | { | 4920 | { |
4739 | struct trace_cpu *tc = inode->i_private; | 4921 | struct trace_array *tr = inode->i_private; |
4740 | struct trace_array *tr = tc->tr; | ||
4741 | struct ftrace_buffer_info *info; | 4922 | struct ftrace_buffer_info *info; |
4923 | int ret; | ||
4742 | 4924 | ||
4743 | if (tracing_disabled) | 4925 | if (tracing_disabled) |
4744 | return -ENODEV; | 4926 | return -ENODEV; |
4745 | 4927 | ||
4928 | if (trace_array_get(tr) < 0) | ||
4929 | return -ENODEV; | ||
4930 | |||
4746 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 4931 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
4747 | if (!info) | 4932 | if (!info) { |
4933 | trace_array_put(tr); | ||
4748 | return -ENOMEM; | 4934 | return -ENOMEM; |
4935 | } | ||
4749 | 4936 | ||
4750 | mutex_lock(&trace_types_lock); | 4937 | mutex_lock(&trace_types_lock); |
4751 | 4938 | ||
4752 | tr->ref++; | ||
4753 | |||
4754 | info->iter.tr = tr; | 4939 | info->iter.tr = tr; |
4755 | info->iter.cpu_file = tc->cpu; | 4940 | info->iter.cpu_file = tracing_get_cpu(inode); |
4756 | info->iter.trace = tr->current_trace; | 4941 | info->iter.trace = tr->current_trace; |
4757 | info->iter.trace_buffer = &tr->trace_buffer; | 4942 | info->iter.trace_buffer = &tr->trace_buffer; |
4758 | info->spare = NULL; | 4943 | info->spare = NULL; |
@@ -4763,7 +4948,11 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) | |||
4763 | 4948 | ||
4764 | mutex_unlock(&trace_types_lock); | 4949 | mutex_unlock(&trace_types_lock); |
4765 | 4950 | ||
4766 | return nonseekable_open(inode, filp); | 4951 | ret = nonseekable_open(inode, filp); |
4952 | if (ret < 0) | ||
4953 | trace_array_put(tr); | ||
4954 | |||
4955 | return ret; | ||
4767 | } | 4956 | } |
4768 | 4957 | ||
4769 | static unsigned int | 4958 | static unsigned int |
@@ -4863,8 +5052,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) | |||
4863 | 5052 | ||
4864 | mutex_lock(&trace_types_lock); | 5053 | mutex_lock(&trace_types_lock); |
4865 | 5054 | ||
4866 | WARN_ON(!iter->tr->ref); | 5055 | __trace_array_put(iter->tr); |
4867 | iter->tr->ref--; | ||
4868 | 5056 | ||
4869 | if (info->spare) | 5057 | if (info->spare) |
4870 | ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); | 5058 | ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); |
@@ -5066,14 +5254,14 @@ static ssize_t | |||
5066 | tracing_stats_read(struct file *filp, char __user *ubuf, | 5254 | tracing_stats_read(struct file *filp, char __user *ubuf, |
5067 | size_t count, loff_t *ppos) | 5255 | size_t count, loff_t *ppos) |
5068 | { | 5256 | { |
5069 | struct trace_cpu *tc = filp->private_data; | 5257 | struct inode *inode = file_inode(filp); |
5070 | struct trace_array *tr = tc->tr; | 5258 | struct trace_array *tr = inode->i_private; |
5071 | struct trace_buffer *trace_buf = &tr->trace_buffer; | 5259 | struct trace_buffer *trace_buf = &tr->trace_buffer; |
5260 | int cpu = tracing_get_cpu(inode); | ||
5072 | struct trace_seq *s; | 5261 | struct trace_seq *s; |
5073 | unsigned long cnt; | 5262 | unsigned long cnt; |
5074 | unsigned long long t; | 5263 | unsigned long long t; |
5075 | unsigned long usec_rem; | 5264 | unsigned long usec_rem; |
5076 | int cpu = tc->cpu; | ||
5077 | 5265 | ||
5078 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 5266 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
5079 | if (!s) | 5267 | if (!s) |
@@ -5126,9 +5314,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
5126 | } | 5314 | } |
5127 | 5315 | ||
5128 | static const struct file_operations tracing_stats_fops = { | 5316 | static const struct file_operations tracing_stats_fops = { |
5129 | .open = tracing_open_generic, | 5317 | .open = tracing_open_generic_tr, |
5130 | .read = tracing_stats_read, | 5318 | .read = tracing_stats_read, |
5131 | .llseek = generic_file_llseek, | 5319 | .llseek = generic_file_llseek, |
5320 | .release = tracing_release_generic_tr, | ||
5132 | }; | 5321 | }; |
5133 | 5322 | ||
5134 | #ifdef CONFIG_DYNAMIC_FTRACE | 5323 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -5317,10 +5506,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) | |||
5317 | return tr->percpu_dir; | 5506 | return tr->percpu_dir; |
5318 | } | 5507 | } |
5319 | 5508 | ||
5509 | static struct dentry * | ||
5510 | trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, | ||
5511 | void *data, long cpu, const struct file_operations *fops) | ||
5512 | { | ||
5513 | struct dentry *ret = trace_create_file(name, mode, parent, data, fops); | ||
5514 | |||
5515 | if (ret) /* See tracing_get_cpu() */ | ||
5516 | ret->d_inode->i_cdev = (void *)(cpu + 1); | ||
5517 | return ret; | ||
5518 | } | ||
5519 | |||
5320 | static void | 5520 | static void |
5321 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | 5521 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) |
5322 | { | 5522 | { |
5323 | struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu); | ||
5324 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); | 5523 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); |
5325 | struct dentry *d_cpu; | 5524 | struct dentry *d_cpu; |
5326 | char cpu_dir[30]; /* 30 characters should be more than enough */ | 5525 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
@@ -5336,28 +5535,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | |||
5336 | } | 5535 | } |
5337 | 5536 | ||
5338 | /* per cpu trace_pipe */ | 5537 | /* per cpu trace_pipe */ |
5339 | trace_create_file("trace_pipe", 0444, d_cpu, | 5538 | trace_create_cpu_file("trace_pipe", 0444, d_cpu, |
5340 | (void *)&data->trace_cpu, &tracing_pipe_fops); | 5539 | tr, cpu, &tracing_pipe_fops); |
5341 | 5540 | ||
5342 | /* per cpu trace */ | 5541 | /* per cpu trace */ |
5343 | trace_create_file("trace", 0644, d_cpu, | 5542 | trace_create_cpu_file("trace", 0644, d_cpu, |
5344 | (void *)&data->trace_cpu, &tracing_fops); | 5543 | tr, cpu, &tracing_fops); |
5345 | 5544 | ||
5346 | trace_create_file("trace_pipe_raw", 0444, d_cpu, | 5545 | trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, |
5347 | (void *)&data->trace_cpu, &tracing_buffers_fops); | 5546 | tr, cpu, &tracing_buffers_fops); |
5348 | 5547 | ||
5349 | trace_create_file("stats", 0444, d_cpu, | 5548 | trace_create_cpu_file("stats", 0444, d_cpu, |
5350 | (void *)&data->trace_cpu, &tracing_stats_fops); | 5549 | tr, cpu, &tracing_stats_fops); |
5351 | 5550 | ||
5352 | trace_create_file("buffer_size_kb", 0444, d_cpu, | 5551 | trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, |
5353 | (void *)&data->trace_cpu, &tracing_entries_fops); | 5552 | tr, cpu, &tracing_entries_fops); |
5354 | 5553 | ||
5355 | #ifdef CONFIG_TRACER_SNAPSHOT | 5554 | #ifdef CONFIG_TRACER_SNAPSHOT |
5356 | trace_create_file("snapshot", 0644, d_cpu, | 5555 | trace_create_cpu_file("snapshot", 0644, d_cpu, |
5357 | (void *)&data->trace_cpu, &snapshot_fops); | 5556 | tr, cpu, &snapshot_fops); |
5358 | 5557 | ||
5359 | trace_create_file("snapshot_raw", 0444, d_cpu, | 5558 | trace_create_cpu_file("snapshot_raw", 0444, d_cpu, |
5360 | (void *)&data->trace_cpu, &snapshot_raw_fops); | 5559 | tr, cpu, &snapshot_raw_fops); |
5361 | #endif | 5560 | #endif |
5362 | } | 5561 | } |
5363 | 5562 | ||
@@ -5612,15 +5811,10 @@ rb_simple_read(struct file *filp, char __user *ubuf, | |||
5612 | size_t cnt, loff_t *ppos) | 5811 | size_t cnt, loff_t *ppos) |
5613 | { | 5812 | { |
5614 | struct trace_array *tr = filp->private_data; | 5813 | struct trace_array *tr = filp->private_data; |
5615 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
5616 | char buf[64]; | 5814 | char buf[64]; |
5617 | int r; | 5815 | int r; |
5618 | 5816 | ||
5619 | if (buffer) | 5817 | r = tracer_tracing_is_on(tr); |
5620 | r = ring_buffer_record_is_on(buffer); | ||
5621 | else | ||
5622 | r = 0; | ||
5623 | |||
5624 | r = sprintf(buf, "%d\n", r); | 5818 | r = sprintf(buf, "%d\n", r); |
5625 | 5819 | ||
5626 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 5820 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
@@ -5642,11 +5836,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
5642 | if (buffer) { | 5836 | if (buffer) { |
5643 | mutex_lock(&trace_types_lock); | 5837 | mutex_lock(&trace_types_lock); |
5644 | if (val) { | 5838 | if (val) { |
5645 | ring_buffer_record_on(buffer); | 5839 | tracer_tracing_on(tr); |
5646 | if (tr->current_trace->start) | 5840 | if (tr->current_trace->start) |
5647 | tr->current_trace->start(tr); | 5841 | tr->current_trace->start(tr); |
5648 | } else { | 5842 | } else { |
5649 | ring_buffer_record_off(buffer); | 5843 | tracer_tracing_off(tr); |
5650 | if (tr->current_trace->stop) | 5844 | if (tr->current_trace->stop) |
5651 | tr->current_trace->stop(tr); | 5845 | tr->current_trace->stop(tr); |
5652 | } | 5846 | } |
@@ -5659,9 +5853,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
5659 | } | 5853 | } |
5660 | 5854 | ||
5661 | static const struct file_operations rb_simple_fops = { | 5855 | static const struct file_operations rb_simple_fops = { |
5662 | .open = tracing_open_generic, | 5856 | .open = tracing_open_generic_tr, |
5663 | .read = rb_simple_read, | 5857 | .read = rb_simple_read, |
5664 | .write = rb_simple_write, | 5858 | .write = rb_simple_write, |
5859 | .release = tracing_release_generic_tr, | ||
5665 | .llseek = default_llseek, | 5860 | .llseek = default_llseek, |
5666 | }; | 5861 | }; |
5667 | 5862 | ||
@@ -5670,17 +5865,6 @@ struct dentry *trace_instance_dir; | |||
5670 | static void | 5865 | static void |
5671 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); | 5866 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); |
5672 | 5867 | ||
5673 | static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf) | ||
5674 | { | ||
5675 | int cpu; | ||
5676 | |||
5677 | for_each_tracing_cpu(cpu) { | ||
5678 | memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu)); | ||
5679 | per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu; | ||
5680 | per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr; | ||
5681 | } | ||
5682 | } | ||
5683 | |||
5684 | static int | 5868 | static int |
5685 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) | 5869 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) |
5686 | { | 5870 | { |
@@ -5698,8 +5882,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size | |||
5698 | return -ENOMEM; | 5882 | return -ENOMEM; |
5699 | } | 5883 | } |
5700 | 5884 | ||
5701 | init_trace_buffers(tr, buf); | ||
5702 | |||
5703 | /* Allocate the first page for all buffers */ | 5885 | /* Allocate the first page for all buffers */ |
5704 | set_buffer_entries(&tr->trace_buffer, | 5886 | set_buffer_entries(&tr->trace_buffer, |
5705 | ring_buffer_size(tr->trace_buffer.buffer, 0)); | 5887 | ring_buffer_size(tr->trace_buffer.buffer, 0)); |
@@ -5766,17 +5948,15 @@ static int new_instance_create(const char *name) | |||
5766 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) | 5948 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) |
5767 | goto out_free_tr; | 5949 | goto out_free_tr; |
5768 | 5950 | ||
5769 | /* Holder for file callbacks */ | ||
5770 | tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS; | ||
5771 | tr->trace_cpu.tr = tr; | ||
5772 | |||
5773 | tr->dir = debugfs_create_dir(name, trace_instance_dir); | 5951 | tr->dir = debugfs_create_dir(name, trace_instance_dir); |
5774 | if (!tr->dir) | 5952 | if (!tr->dir) |
5775 | goto out_free_tr; | 5953 | goto out_free_tr; |
5776 | 5954 | ||
5777 | ret = event_trace_add_tracer(tr->dir, tr); | 5955 | ret = event_trace_add_tracer(tr->dir, tr); |
5778 | if (ret) | 5956 | if (ret) { |
5957 | debugfs_remove_recursive(tr->dir); | ||
5779 | goto out_free_tr; | 5958 | goto out_free_tr; |
5959 | } | ||
5780 | 5960 | ||
5781 | init_tracer_debugfs(tr, tr->dir); | 5961 | init_tracer_debugfs(tr, tr->dir); |
5782 | 5962 | ||
@@ -5922,18 +6102,18 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
5922 | tr, &tracing_iter_fops); | 6102 | tr, &tracing_iter_fops); |
5923 | 6103 | ||
5924 | trace_create_file("trace", 0644, d_tracer, | 6104 | trace_create_file("trace", 0644, d_tracer, |
5925 | (void *)&tr->trace_cpu, &tracing_fops); | 6105 | tr, &tracing_fops); |
5926 | 6106 | ||
5927 | trace_create_file("trace_pipe", 0444, d_tracer, | 6107 | trace_create_file("trace_pipe", 0444, d_tracer, |
5928 | (void *)&tr->trace_cpu, &tracing_pipe_fops); | 6108 | tr, &tracing_pipe_fops); |
5929 | 6109 | ||
5930 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 6110 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
5931 | (void *)&tr->trace_cpu, &tracing_entries_fops); | 6111 | tr, &tracing_entries_fops); |
5932 | 6112 | ||
5933 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | 6113 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, |
5934 | tr, &tracing_total_entries_fops); | 6114 | tr, &tracing_total_entries_fops); |
5935 | 6115 | ||
5936 | trace_create_file("free_buffer", 0644, d_tracer, | 6116 | trace_create_file("free_buffer", 0200, d_tracer, |
5937 | tr, &tracing_free_buffer_fops); | 6117 | tr, &tracing_free_buffer_fops); |
5938 | 6118 | ||
5939 | trace_create_file("trace_marker", 0220, d_tracer, | 6119 | trace_create_file("trace_marker", 0220, d_tracer, |
@@ -5943,11 +6123,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
5943 | &trace_clock_fops); | 6123 | &trace_clock_fops); |
5944 | 6124 | ||
5945 | trace_create_file("tracing_on", 0644, d_tracer, | 6125 | trace_create_file("tracing_on", 0644, d_tracer, |
5946 | tr, &rb_simple_fops); | 6126 | tr, &rb_simple_fops); |
5947 | 6127 | ||
5948 | #ifdef CONFIG_TRACER_SNAPSHOT | 6128 | #ifdef CONFIG_TRACER_SNAPSHOT |
5949 | trace_create_file("snapshot", 0644, d_tracer, | 6129 | trace_create_file("snapshot", 0644, d_tracer, |
5950 | (void *)&tr->trace_cpu, &snapshot_fops); | 6130 | tr, &snapshot_fops); |
5951 | #endif | 6131 | #endif |
5952 | 6132 | ||
5953 | for_each_tracing_cpu(cpu) | 6133 | for_each_tracing_cpu(cpu) |
@@ -6241,10 +6421,6 @@ __init static int tracer_alloc_buffers(void) | |||
6241 | 6421 | ||
6242 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; | 6422 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; |
6243 | 6423 | ||
6244 | /* Holder for file callbacks */ | ||
6245 | global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS; | ||
6246 | global_trace.trace_cpu.tr = &global_trace; | ||
6247 | |||
6248 | INIT_LIST_HEAD(&global_trace.systems); | 6424 | INIT_LIST_HEAD(&global_trace.systems); |
6249 | INIT_LIST_HEAD(&global_trace.events); | 6425 | INIT_LIST_HEAD(&global_trace.events); |
6250 | list_add(&global_trace.list, &ftrace_trace_arrays); | 6426 | list_add(&global_trace.list, &ftrace_trace_arrays); |