aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace_event.h3
-rw-r--r--kernel/trace/Kconfig10
-rw-r--r--kernel/trace/trace.c166
-rw-r--r--kernel/trace/trace.h1
4 files changed, 154 insertions, 26 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 6f8d0b77006b..13a54d0bdfa8 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -83,6 +83,9 @@ struct trace_iterator {
83 long idx; 83 long idx;
84 84
85 cpumask_var_t started; 85 cpumask_var_t started;
86
87 /* it's true when current open file is snapshot */
88 bool snapshot;
86}; 89};
87 90
88enum trace_iter_flags { 91enum trace_iter_flags {
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index cdc9d284d24e..36567564e221 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -253,6 +253,16 @@ config FTRACE_SYSCALLS
253 help 253 help
254 Basic tracer to catch the syscall entry and exit events. 254 Basic tracer to catch the syscall entry and exit events.
255 255
256config TRACER_SNAPSHOT
257 bool "Create a snapshot trace buffer"
258 select TRACER_MAX_TRACE
259 help
260 Allow tracing users to take snapshot of the current buffer using the
261 ftrace interface, e.g.:
262
263 echo 1 > /sys/kernel/debug/tracing/snapshot
264 cat snapshot
265
256config TRACE_BRANCH_PROFILING 266config TRACE_BRANCH_PROFILING
257 bool 267 bool
258 select GENERIC_TRACER 268 select GENERIC_TRACER
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2c724662a3e8..70dce64b9ecf 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
710 710
711 WARN_ON_ONCE(!irqs_disabled()); 711 WARN_ON_ONCE(!irqs_disabled());
712 712
713 /* If we disabled the tracer, stop now */ 713 if (!current_trace->allocated_snapshot) {
714 if (current_trace == &nop_trace) 714 /* Only the nop tracer should hit this when disabling */
715 return; 715 WARN_ON_ONCE(current_trace != &nop_trace);
716
717 if (WARN_ON_ONCE(!current_trace->use_max_tr))
718 return; 716 return;
717 }
719 718
720 arch_spin_lock(&ftrace_max_lock); 719 arch_spin_lock(&ftrace_max_lock);
721 720
@@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
743 return; 742 return;
744 743
745 WARN_ON_ONCE(!irqs_disabled()); 744 WARN_ON_ONCE(!irqs_disabled());
746 if (!current_trace->use_max_tr) { 745 if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
747 WARN_ON_ONCE(1);
748 return; 746 return;
749 }
750 747
751 arch_spin_lock(&ftrace_max_lock); 748 arch_spin_lock(&ftrace_max_lock);
752 749
@@ -866,10 +863,13 @@ int register_tracer(struct tracer *type)
866 863
867 current_trace = type; 864 current_trace = type;
868 865
869 /* If we expanded the buffers, make sure the max is expanded too */ 866 if (type->use_max_tr) {
870 if (ring_buffer_expanded && type->use_max_tr) 867 /* If we expanded the buffers, make sure the max is expanded too */
871 ring_buffer_resize(max_tr.buffer, trace_buf_size, 868 if (ring_buffer_expanded)
872 RING_BUFFER_ALL_CPUS); 869 ring_buffer_resize(max_tr.buffer, trace_buf_size,
870 RING_BUFFER_ALL_CPUS);
871 type->allocated_snapshot = true;
872 }
873 873
874 /* the test is responsible for initializing and enabling */ 874 /* the test is responsible for initializing and enabling */
875 pr_info("Testing tracer %s: ", type->name); 875 pr_info("Testing tracer %s: ", type->name);
@@ -885,10 +885,14 @@ int register_tracer(struct tracer *type)
885 /* Only reset on passing, to avoid touching corrupted buffers */ 885 /* Only reset on passing, to avoid touching corrupted buffers */
886 tracing_reset_online_cpus(tr); 886 tracing_reset_online_cpus(tr);
887 887
888 /* Shrink the max buffer again */ 888 if (type->use_max_tr) {
889 if (ring_buffer_expanded && type->use_max_tr) 889 type->allocated_snapshot = false;
890 ring_buffer_resize(max_tr.buffer, 1, 890
891 RING_BUFFER_ALL_CPUS); 891 /* Shrink the max buffer again */
892 if (ring_buffer_expanded)
893 ring_buffer_resize(max_tr.buffer, 1,
894 RING_BUFFER_ALL_CPUS);
895 }
892 896
893 printk(KERN_CONT "PASSED\n"); 897 printk(KERN_CONT "PASSED\n");
894 } 898 }
@@ -1964,7 +1968,11 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1964 *iter->trace = *current_trace; 1968 *iter->trace = *current_trace;
1965 mutex_unlock(&trace_types_lock); 1969 mutex_unlock(&trace_types_lock);
1966 1970
1967 atomic_inc(&trace_record_cmdline_disabled); 1971 if (iter->snapshot && iter->trace->use_max_tr)
1972 return ERR_PTR(-EBUSY);
1973
1974 if (!iter->snapshot)
1975 atomic_inc(&trace_record_cmdline_disabled);
1968 1976
1969 if (*pos != iter->pos) { 1977 if (*pos != iter->pos) {
1970 iter->ent = NULL; 1978 iter->ent = NULL;
@@ -2003,7 +2011,11 @@ static void s_stop(struct seq_file *m, void *p)
2003{ 2011{
2004 struct trace_iterator *iter = m->private; 2012 struct trace_iterator *iter = m->private;
2005 2013
2006 atomic_dec(&trace_record_cmdline_disabled); 2014 if (iter->snapshot && iter->trace->use_max_tr)
2015 return;
2016
2017 if (!iter->snapshot)
2018 atomic_dec(&trace_record_cmdline_disabled);
2007 trace_access_unlock(iter->cpu_file); 2019 trace_access_unlock(iter->cpu_file);
2008 trace_event_read_unlock(); 2020 trace_event_read_unlock();
2009} 2021}
@@ -2438,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = {
2438}; 2450};
2439 2451
2440static struct trace_iterator * 2452static struct trace_iterator *
2441__tracing_open(struct inode *inode, struct file *file) 2453__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2442{ 2454{
2443 long cpu_file = (long) inode->i_private; 2455 long cpu_file = (long) inode->i_private;
2444 struct trace_iterator *iter; 2456 struct trace_iterator *iter;
@@ -2471,10 +2483,11 @@ __tracing_open(struct inode *inode, struct file *file)
2471 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2483 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2472 goto fail; 2484 goto fail;
2473 2485
2474 if (current_trace && current_trace->print_max) 2486 if ((current_trace && current_trace->print_max) || snapshot)
2475 iter->tr = &max_tr; 2487 iter->tr = &max_tr;
2476 else 2488 else
2477 iter->tr = &global_trace; 2489 iter->tr = &global_trace;
2490 iter->snapshot = snapshot;
2478 iter->pos = -1; 2491 iter->pos = -1;
2479 mutex_init(&iter->mutex); 2492 mutex_init(&iter->mutex);
2480 iter->cpu_file = cpu_file; 2493 iter->cpu_file = cpu_file;
@@ -2491,8 +2504,9 @@ __tracing_open(struct inode *inode, struct file *file)
2491 if (trace_clocks[trace_clock_id].in_ns) 2504 if (trace_clocks[trace_clock_id].in_ns)
2492 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2505 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2493 2506
2494 /* stop the trace while dumping */ 2507 /* stop the trace while dumping if we are not opening "snapshot" */
2495 tracing_stop(); 2508 if (!iter->snapshot)
2509 tracing_stop();
2496 2510
2497 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2511 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2498 for_each_tracing_cpu(cpu) { 2512 for_each_tracing_cpu(cpu) {
@@ -2555,8 +2569,9 @@ static int tracing_release(struct inode *inode, struct file *file)
2555 if (iter->trace && iter->trace->close) 2569 if (iter->trace && iter->trace->close)
2556 iter->trace->close(iter); 2570 iter->trace->close(iter);
2557 2571
2558 /* reenable tracing if it was previously enabled */ 2572 if (!iter->snapshot)
2559 tracing_start(); 2573 /* reenable tracing if it was previously enabled */
2574 tracing_start();
2560 mutex_unlock(&trace_types_lock); 2575 mutex_unlock(&trace_types_lock);
2561 2576
2562 mutex_destroy(&iter->mutex); 2577 mutex_destroy(&iter->mutex);
@@ -2584,7 +2599,7 @@ static int tracing_open(struct inode *inode, struct file *file)
2584 } 2599 }
2585 2600
2586 if (file->f_mode & FMODE_READ) { 2601 if (file->f_mode & FMODE_READ) {
2587 iter = __tracing_open(inode, file); 2602 iter = __tracing_open(inode, file, false);
2588 if (IS_ERR(iter)) 2603 if (IS_ERR(iter))
2589 ret = PTR_ERR(iter); 2604 ret = PTR_ERR(iter);
2590 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 2605 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -3219,7 +3234,7 @@ static int tracing_set_tracer(const char *buf)
3219 if (current_trace && current_trace->reset) 3234 if (current_trace && current_trace->reset)
3220 current_trace->reset(tr); 3235 current_trace->reset(tr);
3221 3236
3222 had_max_tr = current_trace && current_trace->use_max_tr; 3237 had_max_tr = current_trace && current_trace->allocated_snapshot;
3223 current_trace = &nop_trace; 3238 current_trace = &nop_trace;
3224 3239
3225 if (had_max_tr && !t->use_max_tr) { 3240 if (had_max_tr && !t->use_max_tr) {
@@ -3238,6 +3253,8 @@ static int tracing_set_tracer(const char *buf)
3238 */ 3253 */
3239 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); 3254 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3240 set_buffer_entries(&max_tr, 1); 3255 set_buffer_entries(&max_tr, 1);
3256 tracing_reset_online_cpus(&max_tr);
3257 current_trace->allocated_snapshot = false;
3241 } 3258 }
3242 destroy_trace_option_files(topts); 3259 destroy_trace_option_files(topts);
3243 3260
@@ -3248,6 +3265,7 @@ static int tracing_set_tracer(const char *buf)
3248 RING_BUFFER_ALL_CPUS); 3265 RING_BUFFER_ALL_CPUS);
3249 if (ret < 0) 3266 if (ret < 0)
3250 goto out; 3267 goto out;
3268 t->allocated_snapshot = true;
3251 } 3269 }
3252 3270
3253 if (t->init) { 3271 if (t->init) {
@@ -4066,6 +4084,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
4066 return single_open(file, tracing_clock_show, NULL); 4084 return single_open(file, tracing_clock_show, NULL);
4067} 4085}
4068 4086
4087#ifdef CONFIG_TRACER_SNAPSHOT
4088static int tracing_snapshot_open(struct inode *inode, struct file *file)
4089{
4090 struct trace_iterator *iter;
4091 int ret = 0;
4092
4093 if (file->f_mode & FMODE_READ) {
4094 iter = __tracing_open(inode, file, true);
4095 if (IS_ERR(iter))
4096 ret = PTR_ERR(iter);
4097 }
4098 return ret;
4099}
4100
4101static ssize_t
4102tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4103 loff_t *ppos)
4104{
4105 unsigned long val;
4106 int ret;
4107
4108 ret = tracing_update_buffers();
4109 if (ret < 0)
4110 return ret;
4111
4112 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4113 if (ret)
4114 return ret;
4115
4116 mutex_lock(&trace_types_lock);
4117
4118 if (current_trace && current_trace->use_max_tr) {
4119 ret = -EBUSY;
4120 goto out;
4121 }
4122
4123 switch (val) {
4124 case 0:
4125 if (current_trace->allocated_snapshot) {
4126 /* free spare buffer */
4127 ring_buffer_resize(max_tr.buffer, 1,
4128 RING_BUFFER_ALL_CPUS);
4129 set_buffer_entries(&max_tr, 1);
4130 tracing_reset_online_cpus(&max_tr);
4131 current_trace->allocated_snapshot = false;
4132 }
4133 break;
4134 case 1:
4135 if (!current_trace->allocated_snapshot) {
4136 /* allocate spare buffer */
4137 ret = resize_buffer_duplicate_size(&max_tr,
4138 &global_trace, RING_BUFFER_ALL_CPUS);
4139 if (ret < 0)
4140 break;
4141 current_trace->allocated_snapshot = true;
4142 }
4143
4144 local_irq_disable();
4145 /* Now, we're going to swap */
4146 update_max_tr(&global_trace, current, smp_processor_id());
4147 local_irq_enable();
4148 break;
4149 default:
4150 if (current_trace->allocated_snapshot)
4151 tracing_reset_online_cpus(&max_tr);
4152 else
4153 ret = -EINVAL;
4154 break;
4155 }
4156
4157 if (ret >= 0) {
4158 *ppos += cnt;
4159 ret = cnt;
4160 }
4161out:
4162 mutex_unlock(&trace_types_lock);
4163 return ret;
4164}
4165#endif /* CONFIG_TRACER_SNAPSHOT */
4166
4167
4069static const struct file_operations tracing_max_lat_fops = { 4168static const struct file_operations tracing_max_lat_fops = {
4070 .open = tracing_open_generic, 4169 .open = tracing_open_generic,
4071 .read = tracing_max_lat_read, 4170 .read = tracing_max_lat_read,
@@ -4122,6 +4221,16 @@ static const struct file_operations trace_clock_fops = {
4122 .write = tracing_clock_write, 4221 .write = tracing_clock_write,
4123}; 4222};
4124 4223
4224#ifdef CONFIG_TRACER_SNAPSHOT
4225static const struct file_operations snapshot_fops = {
4226 .open = tracing_snapshot_open,
4227 .read = seq_read,
4228 .write = tracing_snapshot_write,
4229 .llseek = tracing_seek,
4230 .release = tracing_release,
4231};
4232#endif /* CONFIG_TRACER_SNAPSHOT */
4233
4125struct ftrace_buffer_info { 4234struct ftrace_buffer_info {
4126 struct trace_array *tr; 4235 struct trace_array *tr;
4127 void *spare; 4236 void *spare;
@@ -4921,6 +5030,11 @@ static __init int tracer_init_debugfs(void)
4921 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 5030 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4922#endif 5031#endif
4923 5032
5033#ifdef CONFIG_TRACER_SNAPSHOT
5034 trace_create_file("snapshot", 0644, d_tracer,
5035 (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
5036#endif
5037
4924 create_trace_options_dir(); 5038 create_trace_options_dir();
4925 5039
4926 for_each_tracing_cpu(cpu) 5040 for_each_tracing_cpu(cpu)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 04a2c7ab1735..57d7e5397d56 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -287,6 +287,7 @@ struct tracer {
287 struct tracer_flags *flags; 287 struct tracer_flags *flags;
288 bool print_max; 288 bool print_max;
289 bool use_max_tr; 289 bool use_max_tr;
290 bool allocated_snapshot;
290}; 291};
291 292
292 293