aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c352
1 files changed, 269 insertions, 83 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e5125677efa0..4f1dade56981 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -39,6 +39,7 @@
39#include <linux/poll.h> 39#include <linux/poll.h>
40#include <linux/nmi.h> 40#include <linux/nmi.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/sched/rt.h>
42 43
43#include "trace.h" 44#include "trace.h"
44#include "trace_output.h" 45#include "trace_output.h"
@@ -249,7 +250,7 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
249static struct tracer *trace_types __read_mostly; 250static struct tracer *trace_types __read_mostly;
250 251
251/* current_trace points to the tracer that is currently active */ 252/* current_trace points to the tracer that is currently active */
252static struct tracer *current_trace __read_mostly; 253static struct tracer *current_trace __read_mostly = &nop_trace;
253 254
254/* 255/*
255 * trace_types_lock is used to protect the trace_types list. 256 * trace_types_lock is used to protect the trace_types list.
@@ -703,18 +704,22 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
703void 704void
704update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 705update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
705{ 706{
706 struct ring_buffer *buf = tr->buffer; 707 struct ring_buffer *buf;
707 708
708 if (trace_stop_count) 709 if (trace_stop_count)
709 return; 710 return;
710 711
711 WARN_ON_ONCE(!irqs_disabled()); 712 WARN_ON_ONCE(!irqs_disabled());
712 if (!current_trace->use_max_tr) { 713
713 WARN_ON_ONCE(1); 714 if (!current_trace->allocated_snapshot) {
715 /* Only the nop tracer should hit this when disabling */
716 WARN_ON_ONCE(current_trace != &nop_trace);
714 return; 717 return;
715 } 718 }
719
716 arch_spin_lock(&ftrace_max_lock); 720 arch_spin_lock(&ftrace_max_lock);
717 721
722 buf = tr->buffer;
718 tr->buffer = max_tr.buffer; 723 tr->buffer = max_tr.buffer;
719 max_tr.buffer = buf; 724 max_tr.buffer = buf;
720 725
@@ -739,10 +744,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
739 return; 744 return;
740 745
741 WARN_ON_ONCE(!irqs_disabled()); 746 WARN_ON_ONCE(!irqs_disabled());
742 if (!current_trace->use_max_tr) { 747 if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
743 WARN_ON_ONCE(1);
744 return; 748 return;
745 }
746 749
747 arch_spin_lock(&ftrace_max_lock); 750 arch_spin_lock(&ftrace_max_lock);
748 751
@@ -862,10 +865,13 @@ int register_tracer(struct tracer *type)
862 865
863 current_trace = type; 866 current_trace = type;
864 867
865 /* If we expanded the buffers, make sure the max is expanded too */ 868 if (type->use_max_tr) {
866 if (ring_buffer_expanded && type->use_max_tr) 869 /* If we expanded the buffers, make sure the max is expanded too */
867 ring_buffer_resize(max_tr.buffer, trace_buf_size, 870 if (ring_buffer_expanded)
868 RING_BUFFER_ALL_CPUS); 871 ring_buffer_resize(max_tr.buffer, trace_buf_size,
872 RING_BUFFER_ALL_CPUS);
873 type->allocated_snapshot = true;
874 }
869 875
870 /* the test is responsible for initializing and enabling */ 876 /* the test is responsible for initializing and enabling */
871 pr_info("Testing tracer %s: ", type->name); 877 pr_info("Testing tracer %s: ", type->name);
@@ -881,10 +887,14 @@ int register_tracer(struct tracer *type)
881 /* Only reset on passing, to avoid touching corrupted buffers */ 887 /* Only reset on passing, to avoid touching corrupted buffers */
882 tracing_reset_online_cpus(tr); 888 tracing_reset_online_cpus(tr);
883 889
884 /* Shrink the max buffer again */ 890 if (type->use_max_tr) {
885 if (ring_buffer_expanded && type->use_max_tr) 891 type->allocated_snapshot = false;
886 ring_buffer_resize(max_tr.buffer, 1, 892
887 RING_BUFFER_ALL_CPUS); 893 /* Shrink the max buffer again */
894 if (ring_buffer_expanded)
895 ring_buffer_resize(max_tr.buffer, 1,
896 RING_BUFFER_ALL_CPUS);
897 }
888 898
889 printk(KERN_CONT "PASSED\n"); 899 printk(KERN_CONT "PASSED\n");
890 } 900 }
@@ -922,6 +932,9 @@ void tracing_reset(struct trace_array *tr, int cpu)
922{ 932{
923 struct ring_buffer *buffer = tr->buffer; 933 struct ring_buffer *buffer = tr->buffer;
924 934
935 if (!buffer)
936 return;
937
925 ring_buffer_record_disable(buffer); 938 ring_buffer_record_disable(buffer);
926 939
927 /* Make sure all commits have finished */ 940 /* Make sure all commits have finished */
@@ -936,6 +949,9 @@ void tracing_reset_online_cpus(struct trace_array *tr)
936 struct ring_buffer *buffer = tr->buffer; 949 struct ring_buffer *buffer = tr->buffer;
937 int cpu; 950 int cpu;
938 951
952 if (!buffer)
953 return;
954
939 ring_buffer_record_disable(buffer); 955 ring_buffer_record_disable(buffer);
940 956
941 /* Make sure all commits have finished */ 957 /* Make sure all commits have finished */
@@ -1167,7 +1183,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1167 1183
1168 entry->preempt_count = pc & 0xff; 1184 entry->preempt_count = pc & 0xff;
1169 entry->pid = (tsk) ? tsk->pid : 0; 1185 entry->pid = (tsk) ? tsk->pid : 0;
1170 entry->padding = 0;
1171 entry->flags = 1186 entry->flags =
1172#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1187#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1173 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1188 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -1335,7 +1350,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1335 */ 1350 */
1336 preempt_disable_notrace(); 1351 preempt_disable_notrace();
1337 1352
1338 use_stack = ++__get_cpu_var(ftrace_stack_reserve); 1353 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1339 /* 1354 /*
1340 * We don't need any atomic variables, just a barrier. 1355 * We don't need any atomic variables, just a barrier.
1341 * If an interrupt comes in, we don't care, because it would 1356 * If an interrupt comes in, we don't care, because it would
@@ -1389,7 +1404,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
1389 out: 1404 out:
1390 /* Again, don't let gcc optimize things here */ 1405 /* Again, don't let gcc optimize things here */
1391 barrier(); 1406 barrier();
1392 __get_cpu_var(ftrace_stack_reserve)--; 1407 __this_cpu_dec(ftrace_stack_reserve);
1393 preempt_enable_notrace(); 1408 preempt_enable_notrace();
1394 1409
1395} 1410}
@@ -1517,7 +1532,6 @@ static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1517static char *get_trace_buf(void) 1532static char *get_trace_buf(void)
1518{ 1533{
1519 struct trace_buffer_struct *percpu_buffer; 1534 struct trace_buffer_struct *percpu_buffer;
1520 struct trace_buffer_struct *buffer;
1521 1535
1522 /* 1536 /*
1523 * If we have allocated per cpu buffers, then we do not 1537 * If we have allocated per cpu buffers, then we do not
@@ -1535,9 +1549,7 @@ static char *get_trace_buf(void)
1535 if (!percpu_buffer) 1549 if (!percpu_buffer)
1536 return NULL; 1550 return NULL;
1537 1551
1538 buffer = per_cpu_ptr(percpu_buffer, smp_processor_id()); 1552 return this_cpu_ptr(&percpu_buffer->buffer[0]);
1539
1540 return buffer->buffer;
1541} 1553}
1542 1554
1543static int alloc_percpu_trace_buffer(void) 1555static int alloc_percpu_trace_buffer(void)
@@ -1942,21 +1954,27 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1942static void *s_start(struct seq_file *m, loff_t *pos) 1954static void *s_start(struct seq_file *m, loff_t *pos)
1943{ 1955{
1944 struct trace_iterator *iter = m->private; 1956 struct trace_iterator *iter = m->private;
1945 static struct tracer *old_tracer;
1946 int cpu_file = iter->cpu_file; 1957 int cpu_file = iter->cpu_file;
1947 void *p = NULL; 1958 void *p = NULL;
1948 loff_t l = 0; 1959 loff_t l = 0;
1949 int cpu; 1960 int cpu;
1950 1961
1951 /* copy the tracer to avoid using a global lock all around */ 1962 /*
1963 * copy the tracer to avoid using a global lock all around.
1964 * iter->trace is a copy of current_trace, the pointer to the
1965 * name may be used instead of a strcmp(), as iter->trace->name
1966 * will point to the same string as current_trace->name.
1967 */
1952 mutex_lock(&trace_types_lock); 1968 mutex_lock(&trace_types_lock);
1953 if (unlikely(old_tracer != current_trace && current_trace)) { 1969 if (unlikely(current_trace && iter->trace->name != current_trace->name))
1954 old_tracer = current_trace;
1955 *iter->trace = *current_trace; 1970 *iter->trace = *current_trace;
1956 }
1957 mutex_unlock(&trace_types_lock); 1971 mutex_unlock(&trace_types_lock);
1958 1972
1959 atomic_inc(&trace_record_cmdline_disabled); 1973 if (iter->snapshot && iter->trace->use_max_tr)
1974 return ERR_PTR(-EBUSY);
1975
1976 if (!iter->snapshot)
1977 atomic_inc(&trace_record_cmdline_disabled);
1960 1978
1961 if (*pos != iter->pos) { 1979 if (*pos != iter->pos) {
1962 iter->ent = NULL; 1980 iter->ent = NULL;
@@ -1995,7 +2013,11 @@ static void s_stop(struct seq_file *m, void *p)
1995{ 2013{
1996 struct trace_iterator *iter = m->private; 2014 struct trace_iterator *iter = m->private;
1997 2015
1998 atomic_dec(&trace_record_cmdline_disabled); 2016 if (iter->snapshot && iter->trace->use_max_tr)
2017 return;
2018
2019 if (!iter->snapshot)
2020 atomic_dec(&trace_record_cmdline_disabled);
1999 trace_access_unlock(iter->cpu_file); 2021 trace_access_unlock(iter->cpu_file);
2000 trace_event_read_unlock(); 2022 trace_event_read_unlock();
2001} 2023}
@@ -2080,8 +2102,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2080 unsigned long total; 2102 unsigned long total;
2081 const char *name = "preemption"; 2103 const char *name = "preemption";
2082 2104
2083 if (type) 2105 name = type->name;
2084 name = type->name;
2085 2106
2086 get_total_entries(tr, &total, &entries); 2107 get_total_entries(tr, &total, &entries);
2087 2108
@@ -2380,6 +2401,27 @@ static void test_ftrace_alive(struct seq_file *m)
2380 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2401 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2381} 2402}
2382 2403
2404#ifdef CONFIG_TRACER_MAX_TRACE
2405static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2406{
2407 if (iter->trace->allocated_snapshot)
2408 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2409 else
2410 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2411
2412 seq_printf(m, "# Snapshot commands:\n");
2413 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2414 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2415 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2416 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2417 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2418 seq_printf(m, "# is not a '0' or '1')\n");
2419}
2420#else
2421/* Should never be called */
2422static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2423#endif
2424
2383static int s_show(struct seq_file *m, void *v) 2425static int s_show(struct seq_file *m, void *v)
2384{ 2426{
2385 struct trace_iterator *iter = v; 2427 struct trace_iterator *iter = v;
@@ -2391,7 +2433,9 @@ static int s_show(struct seq_file *m, void *v)
2391 seq_puts(m, "#\n"); 2433 seq_puts(m, "#\n");
2392 test_ftrace_alive(m); 2434 test_ftrace_alive(m);
2393 } 2435 }
2394 if (iter->trace && iter->trace->print_header) 2436 if (iter->snapshot && trace_empty(iter))
2437 print_snapshot_help(m, iter);
2438 else if (iter->trace && iter->trace->print_header)
2395 iter->trace->print_header(m); 2439 iter->trace->print_header(m);
2396 else 2440 else
2397 trace_default_header(m); 2441 trace_default_header(m);
@@ -2430,7 +2474,7 @@ static const struct seq_operations tracer_seq_ops = {
2430}; 2474};
2431 2475
2432static struct trace_iterator * 2476static struct trace_iterator *
2433__tracing_open(struct inode *inode, struct file *file) 2477__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2434{ 2478{
2435 long cpu_file = (long) inode->i_private; 2479 long cpu_file = (long) inode->i_private;
2436 struct trace_iterator *iter; 2480 struct trace_iterator *iter;
@@ -2457,16 +2501,16 @@ __tracing_open(struct inode *inode, struct file *file)
2457 if (!iter->trace) 2501 if (!iter->trace)
2458 goto fail; 2502 goto fail;
2459 2503
2460 if (current_trace) 2504 *iter->trace = *current_trace;
2461 *iter->trace = *current_trace;
2462 2505
2463 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2506 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2464 goto fail; 2507 goto fail;
2465 2508
2466 if (current_trace && current_trace->print_max) 2509 if (current_trace->print_max || snapshot)
2467 iter->tr = &max_tr; 2510 iter->tr = &max_tr;
2468 else 2511 else
2469 iter->tr = &global_trace; 2512 iter->tr = &global_trace;
2513 iter->snapshot = snapshot;
2470 iter->pos = -1; 2514 iter->pos = -1;
2471 mutex_init(&iter->mutex); 2515 mutex_init(&iter->mutex);
2472 iter->cpu_file = cpu_file; 2516 iter->cpu_file = cpu_file;
@@ -2483,8 +2527,9 @@ __tracing_open(struct inode *inode, struct file *file)
2483 if (trace_clocks[trace_clock_id].in_ns) 2527 if (trace_clocks[trace_clock_id].in_ns)
2484 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2528 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2485 2529
2486 /* stop the trace while dumping */ 2530 /* stop the trace while dumping if we are not opening "snapshot" */
2487 tracing_stop(); 2531 if (!iter->snapshot)
2532 tracing_stop();
2488 2533
2489 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2534 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2490 for_each_tracing_cpu(cpu) { 2535 for_each_tracing_cpu(cpu) {
@@ -2547,8 +2592,9 @@ static int tracing_release(struct inode *inode, struct file *file)
2547 if (iter->trace && iter->trace->close) 2592 if (iter->trace && iter->trace->close)
2548 iter->trace->close(iter); 2593 iter->trace->close(iter);
2549 2594
2550 /* reenable tracing if it was previously enabled */ 2595 if (!iter->snapshot)
2551 tracing_start(); 2596 /* reenable tracing if it was previously enabled */
2597 tracing_start();
2552 mutex_unlock(&trace_types_lock); 2598 mutex_unlock(&trace_types_lock);
2553 2599
2554 mutex_destroy(&iter->mutex); 2600 mutex_destroy(&iter->mutex);
@@ -2576,7 +2622,7 @@ static int tracing_open(struct inode *inode, struct file *file)
2576 } 2622 }
2577 2623
2578 if (file->f_mode & FMODE_READ) { 2624 if (file->f_mode & FMODE_READ) {
2579 iter = __tracing_open(inode, file); 2625 iter = __tracing_open(inode, file, false);
2580 if (IS_ERR(iter)) 2626 if (IS_ERR(iter))
2581 ret = PTR_ERR(iter); 2627 ret = PTR_ERR(iter);
2582 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 2628 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -2835,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2835 return -EINVAL; 2881 return -EINVAL;
2836} 2882}
2837 2883
2838static void set_tracer_flags(unsigned int mask, int enabled) 2884/* Some tracers require overwrite to stay enabled */
2885int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
2886{
2887 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
2888 return -1;
2889
2890 return 0;
2891}
2892
2893int set_tracer_flag(unsigned int mask, int enabled)
2839{ 2894{
2840 /* do nothing if flag is already set */ 2895 /* do nothing if flag is already set */
2841 if (!!(trace_flags & mask) == !!enabled) 2896 if (!!(trace_flags & mask) == !!enabled)
2842 return; 2897 return 0;
2898
2899 /* Give the tracer a chance to approve the change */
2900 if (current_trace->flag_changed)
2901 if (current_trace->flag_changed(current_trace, mask, !!enabled))
2902 return -EINVAL;
2843 2903
2844 if (enabled) 2904 if (enabled)
2845 trace_flags |= mask; 2905 trace_flags |= mask;
@@ -2849,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2849 if (mask == TRACE_ITER_RECORD_CMD) 2909 if (mask == TRACE_ITER_RECORD_CMD)
2850 trace_event_enable_cmd_record(enabled); 2910 trace_event_enable_cmd_record(enabled);
2851 2911
2852 if (mask == TRACE_ITER_OVERWRITE) 2912 if (mask == TRACE_ITER_OVERWRITE) {
2853 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2913 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2914#ifdef CONFIG_TRACER_MAX_TRACE
2915 ring_buffer_change_overwrite(max_tr.buffer, enabled);
2916#endif
2917 }
2854 2918
2855 if (mask == TRACE_ITER_PRINTK) 2919 if (mask == TRACE_ITER_PRINTK)
2856 trace_printk_start_stop_comm(enabled); 2920 trace_printk_start_stop_comm(enabled);
2921
2922 return 0;
2857} 2923}
2858 2924
2859static int trace_set_options(char *option) 2925static int trace_set_options(char *option)
2860{ 2926{
2861 char *cmp; 2927 char *cmp;
2862 int neg = 0; 2928 int neg = 0;
2863 int ret = 0; 2929 int ret = -ENODEV;
2864 int i; 2930 int i;
2865 2931
2866 cmp = strstrip(option); 2932 cmp = strstrip(option);
@@ -2870,19 +2936,20 @@ static int trace_set_options(char *option)
2870 cmp += 2; 2936 cmp += 2;
2871 } 2937 }
2872 2938
2939 mutex_lock(&trace_types_lock);
2940
2873 for (i = 0; trace_options[i]; i++) { 2941 for (i = 0; trace_options[i]; i++) {
2874 if (strcmp(cmp, trace_options[i]) == 0) { 2942 if (strcmp(cmp, trace_options[i]) == 0) {
2875 set_tracer_flags(1 << i, !neg); 2943 ret = set_tracer_flag(1 << i, !neg);
2876 break; 2944 break;
2877 } 2945 }
2878 } 2946 }
2879 2947
2880 /* If no option could be set, test the specific tracer options */ 2948 /* If no option could be set, test the specific tracer options */
2881 if (!trace_options[i]) { 2949 if (!trace_options[i])
2882 mutex_lock(&trace_types_lock);
2883 ret = set_tracer_option(current_trace, cmp, neg); 2950 ret = set_tracer_option(current_trace, cmp, neg);
2884 mutex_unlock(&trace_types_lock); 2951
2885 } 2952 mutex_unlock(&trace_types_lock);
2886 2953
2887 return ret; 2954 return ret;
2888} 2955}
@@ -2892,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2892 size_t cnt, loff_t *ppos) 2959 size_t cnt, loff_t *ppos)
2893{ 2960{
2894 char buf[64]; 2961 char buf[64];
2962 int ret;
2895 2963
2896 if (cnt >= sizeof(buf)) 2964 if (cnt >= sizeof(buf))
2897 return -EINVAL; 2965 return -EINVAL;
@@ -2899,7 +2967,11 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2899 if (copy_from_user(&buf, ubuf, cnt)) 2967 if (copy_from_user(&buf, ubuf, cnt))
2900 return -EFAULT; 2968 return -EFAULT;
2901 2969
2902 trace_set_options(buf); 2970 buf[cnt] = 0;
2971
2972 ret = trace_set_options(buf);
2973 if (ret < 0)
2974 return ret;
2903 2975
2904 *ppos += cnt; 2976 *ppos += cnt;
2905 2977
@@ -3012,10 +3084,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
3012 int r; 3084 int r;
3013 3085
3014 mutex_lock(&trace_types_lock); 3086 mutex_lock(&trace_types_lock);
3015 if (current_trace) 3087 r = sprintf(buf, "%s\n", current_trace->name);
3016 r = sprintf(buf, "%s\n", current_trace->name);
3017 else
3018 r = sprintf(buf, "\n");
3019 mutex_unlock(&trace_types_lock); 3088 mutex_unlock(&trace_types_lock);
3020 3089
3021 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3090 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -3181,6 +3250,7 @@ static int tracing_set_tracer(const char *buf)
3181 static struct trace_option_dentry *topts; 3250 static struct trace_option_dentry *topts;
3182 struct trace_array *tr = &global_trace; 3251 struct trace_array *tr = &global_trace;
3183 struct tracer *t; 3252 struct tracer *t;
3253 bool had_max_tr;
3184 int ret = 0; 3254 int ret = 0;
3185 3255
3186 mutex_lock(&trace_types_lock); 3256 mutex_lock(&trace_types_lock);
@@ -3205,9 +3275,24 @@ static int tracing_set_tracer(const char *buf)
3205 goto out; 3275 goto out;
3206 3276
3207 trace_branch_disable(); 3277 trace_branch_disable();
3208 if (current_trace && current_trace->reset) 3278
3279 current_trace->enabled = false;
3280
3281 if (current_trace->reset)
3209 current_trace->reset(tr); 3282 current_trace->reset(tr);
3210 if (current_trace && current_trace->use_max_tr) { 3283
3284 had_max_tr = current_trace->allocated_snapshot;
3285 current_trace = &nop_trace;
3286
3287 if (had_max_tr && !t->use_max_tr) {
3288 /*
3289 * We need to make sure that the update_max_tr sees that
3290 * current_trace changed to nop_trace to keep it from
3291 * swapping the buffers after we resize it.
3292 * The update_max_tr is called from interrupts disabled
3293 * so a synchronized_sched() is sufficient.
3294 */
3295 synchronize_sched();
3211 /* 3296 /*
3212 * We don't free the ring buffer. instead, resize it because 3297 * We don't free the ring buffer. instead, resize it because
3213 * The max_tr ring buffer has some state (e.g. ring->clock) and 3298 * The max_tr ring buffer has some state (e.g. ring->clock) and
@@ -3215,18 +3300,19 @@ static int tracing_set_tracer(const char *buf)
3215 */ 3300 */
3216 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); 3301 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3217 set_buffer_entries(&max_tr, 1); 3302 set_buffer_entries(&max_tr, 1);
3303 tracing_reset_online_cpus(&max_tr);
3304 current_trace->allocated_snapshot = false;
3218 } 3305 }
3219 destroy_trace_option_files(topts); 3306 destroy_trace_option_files(topts);
3220 3307
3221 current_trace = &nop_trace;
3222
3223 topts = create_trace_option_files(t); 3308 topts = create_trace_option_files(t);
3224 if (t->use_max_tr) { 3309 if (t->use_max_tr && !had_max_tr) {
3225 /* we need to make per cpu buffer sizes equivalent */ 3310 /* we need to make per cpu buffer sizes equivalent */
3226 ret = resize_buffer_duplicate_size(&max_tr, &global_trace, 3311 ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
3227 RING_BUFFER_ALL_CPUS); 3312 RING_BUFFER_ALL_CPUS);
3228 if (ret < 0) 3313 if (ret < 0)
3229 goto out; 3314 goto out;
3315 t->allocated_snapshot = true;
3230 } 3316 }
3231 3317
3232 if (t->init) { 3318 if (t->init) {
@@ -3236,6 +3322,7 @@ static int tracing_set_tracer(const char *buf)
3236 } 3322 }
3237 3323
3238 current_trace = t; 3324 current_trace = t;
3325 current_trace->enabled = true;
3239 trace_branch_enable(tr); 3326 trace_branch_enable(tr);
3240 out: 3327 out:
3241 mutex_unlock(&trace_types_lock); 3328 mutex_unlock(&trace_types_lock);
@@ -3334,8 +3421,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3334 ret = -ENOMEM; 3421 ret = -ENOMEM;
3335 goto fail; 3422 goto fail;
3336 } 3423 }
3337 if (current_trace) 3424 *iter->trace = *current_trace;
3338 *iter->trace = *current_trace;
3339 3425
3340 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3426 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3341 ret = -ENOMEM; 3427 ret = -ENOMEM;
@@ -3452,7 +3538,7 @@ static int tracing_wait_pipe(struct file *filp)
3452 return -EINTR; 3538 return -EINTR;
3453 3539
3454 /* 3540 /*
3455 * We block until we read something and tracing is enabled. 3541 * We block until we read something and tracing is disabled.
3456 * We still block if tracing is disabled, but we have never 3542 * We still block if tracing is disabled, but we have never
3457 * read anything. This allows a user to cat this file, and 3543 * read anything. This allows a user to cat this file, and
3458 * then enable tracing. But after we have read something, 3544 * then enable tracing. But after we have read something,
@@ -3460,7 +3546,7 @@ static int tracing_wait_pipe(struct file *filp)
3460 * 3546 *
3461 * iter->pos will be 0 if we haven't read anything. 3547 * iter->pos will be 0 if we haven't read anything.
3462 */ 3548 */
3463 if (tracing_is_enabled() && iter->pos) 3549 if (!tracing_is_enabled() && iter->pos)
3464 break; 3550 break;
3465 } 3551 }
3466 3552
@@ -3475,7 +3561,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
3475 size_t cnt, loff_t *ppos) 3561 size_t cnt, loff_t *ppos)
3476{ 3562{
3477 struct trace_iterator *iter = filp->private_data; 3563 struct trace_iterator *iter = filp->private_data;
3478 static struct tracer *old_tracer;
3479 ssize_t sret; 3564 ssize_t sret;
3480 3565
3481 /* return any leftover data */ 3566 /* return any leftover data */
@@ -3487,10 +3572,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
3487 3572
3488 /* copy the tracer to avoid using a global lock all around */ 3573 /* copy the tracer to avoid using a global lock all around */
3489 mutex_lock(&trace_types_lock); 3574 mutex_lock(&trace_types_lock);
3490 if (unlikely(old_tracer != current_trace && current_trace)) { 3575 if (unlikely(iter->trace->name != current_trace->name))
3491 old_tracer = current_trace;
3492 *iter->trace = *current_trace; 3576 *iter->trace = *current_trace;
3493 }
3494 mutex_unlock(&trace_types_lock); 3577 mutex_unlock(&trace_types_lock);
3495 3578
3496 /* 3579 /*
@@ -3646,7 +3729,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3646 .ops = &tracing_pipe_buf_ops, 3729 .ops = &tracing_pipe_buf_ops,
3647 .spd_release = tracing_spd_release_pipe, 3730 .spd_release = tracing_spd_release_pipe,
3648 }; 3731 };
3649 static struct tracer *old_tracer;
3650 ssize_t ret; 3732 ssize_t ret;
3651 size_t rem; 3733 size_t rem;
3652 unsigned int i; 3734 unsigned int i;
@@ -3656,10 +3738,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
3656 3738
3657 /* copy the tracer to avoid using a global lock all around */ 3739 /* copy the tracer to avoid using a global lock all around */
3658 mutex_lock(&trace_types_lock); 3740 mutex_lock(&trace_types_lock);
3659 if (unlikely(old_tracer != current_trace && current_trace)) { 3741 if (unlikely(iter->trace->name != current_trace->name))
3660 old_tracer = current_trace;
3661 *iter->trace = *current_trace; 3742 *iter->trace = *current_trace;
3662 }
3663 mutex_unlock(&trace_types_lock); 3743 mutex_unlock(&trace_types_lock);
3664 3744
3665 mutex_lock(&iter->mutex); 3745 mutex_lock(&iter->mutex);
@@ -4035,8 +4115,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4035 * Reset the buffer so that it doesn't have incomparable timestamps. 4115 * Reset the buffer so that it doesn't have incomparable timestamps.
4036 */ 4116 */
4037 tracing_reset_online_cpus(&global_trace); 4117 tracing_reset_online_cpus(&global_trace);
4038 if (max_tr.buffer) 4118 tracing_reset_online_cpus(&max_tr);
4039 tracing_reset_online_cpus(&max_tr);
4040 4119
4041 mutex_unlock(&trace_types_lock); 4120 mutex_unlock(&trace_types_lock);
4042 4121
@@ -4052,6 +4131,85 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
4052 return single_open(file, tracing_clock_show, NULL); 4131 return single_open(file, tracing_clock_show, NULL);
4053} 4132}
4054 4133
4134#ifdef CONFIG_TRACER_SNAPSHOT
4135static int tracing_snapshot_open(struct inode *inode, struct file *file)
4136{
4137 struct trace_iterator *iter;
4138 int ret = 0;
4139
4140 if (file->f_mode & FMODE_READ) {
4141 iter = __tracing_open(inode, file, true);
4142 if (IS_ERR(iter))
4143 ret = PTR_ERR(iter);
4144 }
4145 return ret;
4146}
4147
4148static ssize_t
4149tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4150 loff_t *ppos)
4151{
4152 unsigned long val;
4153 int ret;
4154
4155 ret = tracing_update_buffers();
4156 if (ret < 0)
4157 return ret;
4158
4159 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4160 if (ret)
4161 return ret;
4162
4163 mutex_lock(&trace_types_lock);
4164
4165 if (current_trace->use_max_tr) {
4166 ret = -EBUSY;
4167 goto out;
4168 }
4169
4170 switch (val) {
4171 case 0:
4172 if (current_trace->allocated_snapshot) {
4173 /* free spare buffer */
4174 ring_buffer_resize(max_tr.buffer, 1,
4175 RING_BUFFER_ALL_CPUS);
4176 set_buffer_entries(&max_tr, 1);
4177 tracing_reset_online_cpus(&max_tr);
4178 current_trace->allocated_snapshot = false;
4179 }
4180 break;
4181 case 1:
4182 if (!current_trace->allocated_snapshot) {
4183 /* allocate spare buffer */
4184 ret = resize_buffer_duplicate_size(&max_tr,
4185 &global_trace, RING_BUFFER_ALL_CPUS);
4186 if (ret < 0)
4187 break;
4188 current_trace->allocated_snapshot = true;
4189 }
4190
4191 local_irq_disable();
4192 /* Now, we're going to swap */
4193 update_max_tr(&global_trace, current, smp_processor_id());
4194 local_irq_enable();
4195 break;
4196 default:
4197 if (current_trace->allocated_snapshot)
4198 tracing_reset_online_cpus(&max_tr);
4199 break;
4200 }
4201
4202 if (ret >= 0) {
4203 *ppos += cnt;
4204 ret = cnt;
4205 }
4206out:
4207 mutex_unlock(&trace_types_lock);
4208 return ret;
4209}
4210#endif /* CONFIG_TRACER_SNAPSHOT */
4211
4212
4055static const struct file_operations tracing_max_lat_fops = { 4213static const struct file_operations tracing_max_lat_fops = {
4056 .open = tracing_open_generic, 4214 .open = tracing_open_generic,
4057 .read = tracing_max_lat_read, 4215 .read = tracing_max_lat_read,
@@ -4108,6 +4266,16 @@ static const struct file_operations trace_clock_fops = {
4108 .write = tracing_clock_write, 4266 .write = tracing_clock_write,
4109}; 4267};
4110 4268
4269#ifdef CONFIG_TRACER_SNAPSHOT
4270static const struct file_operations snapshot_fops = {
4271 .open = tracing_snapshot_open,
4272 .read = seq_read,
4273 .write = tracing_snapshot_write,
4274 .llseek = tracing_seek,
4275 .release = tracing_release,
4276};
4277#endif /* CONFIG_TRACER_SNAPSHOT */
4278
4111struct ftrace_buffer_info { 4279struct ftrace_buffer_info {
4112 struct trace_array *tr; 4280 struct trace_array *tr;
4113 void *spare; 4281 void *spare;
@@ -4412,6 +4580,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
4412 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); 4580 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
4413 trace_seq_printf(s, "dropped events: %ld\n", cnt); 4581 trace_seq_printf(s, "dropped events: %ld\n", cnt);
4414 4582
4583 cnt = ring_buffer_read_events_cpu(tr->buffer, cpu);
4584 trace_seq_printf(s, "read events: %ld\n", cnt);
4585
4415 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4586 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4416 4587
4417 kfree(s); 4588 kfree(s);
@@ -4488,7 +4659,7 @@ struct dentry *tracing_init_dentry(void)
4488 4659
4489static struct dentry *d_percpu; 4660static struct dentry *d_percpu;
4490 4661
4491struct dentry *tracing_dentry_percpu(void) 4662static struct dentry *tracing_dentry_percpu(void)
4492{ 4663{
4493 static int once; 4664 static int once;
4494 struct dentry *d_tracer; 4665 struct dentry *d_tracer;
@@ -4638,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4638 4809
4639 if (val != 0 && val != 1) 4810 if (val != 0 && val != 1)
4640 return -EINVAL; 4811 return -EINVAL;
4641 set_tracer_flags(1 << index, val); 4812
4813 mutex_lock(&trace_types_lock);
4814 ret = set_tracer_flag(1 << index, val);
4815 mutex_unlock(&trace_types_lock);
4816
4817 if (ret < 0)
4818 return ret;
4642 4819
4643 *ppos += cnt; 4820 *ppos += cnt;
4644 4821
@@ -4815,10 +4992,17 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
4815 return ret; 4992 return ret;
4816 4993
4817 if (buffer) { 4994 if (buffer) {
4818 if (val) 4995 mutex_lock(&trace_types_lock);
4996 if (val) {
4819 ring_buffer_record_on(buffer); 4997 ring_buffer_record_on(buffer);
4820 else 4998 if (current_trace->start)
4999 current_trace->start(tr);
5000 } else {
4821 ring_buffer_record_off(buffer); 5001 ring_buffer_record_off(buffer);
5002 if (current_trace->stop)
5003 current_trace->stop(tr);
5004 }
5005 mutex_unlock(&trace_types_lock);
4822 } 5006 }
4823 5007
4824 (*ppos)++; 5008 (*ppos)++;
@@ -4897,6 +5081,11 @@ static __init int tracer_init_debugfs(void)
4897 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 5081 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4898#endif 5082#endif
4899 5083
5084#ifdef CONFIG_TRACER_SNAPSHOT
5085 trace_create_file("snapshot", 0644, d_tracer,
5086 (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
5087#endif
5088
4900 create_trace_options_dir(); 5089 create_trace_options_dir();
4901 5090
4902 for_each_tracing_cpu(cpu) 5091 for_each_tracing_cpu(cpu)
@@ -5005,6 +5194,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5005 if (disable_tracing) 5194 if (disable_tracing)
5006 ftrace_kill(); 5195 ftrace_kill();
5007 5196
5197 /* Simulate the iterator */
5008 trace_init_global_iter(&iter); 5198 trace_init_global_iter(&iter);
5009 5199
5010 for_each_tracing_cpu(cpu) { 5200 for_each_tracing_cpu(cpu) {
@@ -5016,10 +5206,6 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5016 /* don't look at user memory in panic mode */ 5206 /* don't look at user memory in panic mode */
5017 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 5207 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
5018 5208
5019 /* Simulate the iterator */
5020 iter.tr = &global_trace;
5021 iter.trace = current_trace;
5022
5023 switch (oops_dump_mode) { 5209 switch (oops_dump_mode) {
5024 case DUMP_ALL: 5210 case DUMP_ALL:
5025 iter.cpu_file = TRACE_PIPE_ALL_CPU; 5211 iter.cpu_file = TRACE_PIPE_ALL_CPU;
@@ -5164,7 +5350,7 @@ __init static int tracer_alloc_buffers(void)
5164 init_irq_work(&trace_work_wakeup, trace_wake_up); 5350 init_irq_work(&trace_work_wakeup, trace_wake_up);
5165 5351
5166 register_tracer(&nop_trace); 5352 register_tracer(&nop_trace);
5167 current_trace = &nop_trace; 5353
5168 /* All seems OK, enable tracing */ 5354 /* All seems OK, enable tracing */
5169 tracing_disabled = 0; 5355 tracing_disabled = 0;
5170 5356