aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-08-03 16:10:49 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-03-15 00:34:44 -0400
commit277ba04461c2746cf935353474c0961161951b68 (patch)
treedf2b8eb157c6725e606605f6acf40acbd4f13dd6 /kernel/trace/trace.c
parent12ab74ee00d154bc05ea2fc659b7ce6519e5d5a6 (diff)
tracing: Add interface to allow multiple trace buffers
Add the interface ("instances" directory) to add multiple buffers to ftrace. To create a new instance, simply do a mkdir in the instances directory: This will create a directory with the following: # cd instances # mkdir foo # ls foo buffer_size_kb free_buffer trace_clock trace_pipe buffer_total_size_kb set_event trace_marker tracing_enabled events/ trace trace_options tracing_on Currently only events are able to be set, and there isn't a way to delete a buffer when one is created (yet). Note, the i_mutex lock is dropped from the parent "instances" directory during the mkdir operation. As the "instances" directory can not be renamed or deleted (created on boot), I do not see any harm in dropping the lock. The creation of the sub directories is protected by trace_types_lock mutex, which only lets one instance get into the code path at a time. If two tasks try to create or delete directories of the same name, only one will occur and the other will fail with -EEXIST. Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c129
1 files changed, 129 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 406adbc277a0..07a63114d938 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5107,6 +5107,133 @@ static const struct file_operations rb_simple_fops = {
5107 .llseek = default_llseek, 5107 .llseek = default_llseek,
5108}; 5108};
5109 5109
5110struct dentry *trace_instance_dir;
5111
5112static void
5113init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5114
5115static int new_instance_create(const char *name)
5116{
5117 enum ring_buffer_flags rb_flags;
5118 struct trace_array *tr;
5119 int ret;
5120 int i;
5121
5122 mutex_lock(&trace_types_lock);
5123
5124 ret = -EEXIST;
5125 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5126 if (tr->name && strcmp(tr->name, name) == 0)
5127 goto out_unlock;
5128 }
5129
5130 ret = -ENOMEM;
5131 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5132 if (!tr)
5133 goto out_unlock;
5134
5135 tr->name = kstrdup(name, GFP_KERNEL);
5136 if (!tr->name)
5137 goto out_free_tr;
5138
5139 raw_spin_lock_init(&tr->start_lock);
5140
5141 tr->current_trace = &nop_trace;
5142
5143 INIT_LIST_HEAD(&tr->systems);
5144 INIT_LIST_HEAD(&tr->events);
5145
5146 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5147
5148 tr->buffer = ring_buffer_alloc(trace_buf_size, rb_flags);
5149 if (!tr->buffer)
5150 goto out_free_tr;
5151
5152 tr->data = alloc_percpu(struct trace_array_cpu);
5153 if (!tr->data)
5154 goto out_free_tr;
5155
5156 for_each_tracing_cpu(i) {
5157 memset(per_cpu_ptr(tr->data, i), 0, sizeof(struct trace_array_cpu));
5158 per_cpu_ptr(tr->data, i)->trace_cpu.cpu = i;
5159 per_cpu_ptr(tr->data, i)->trace_cpu.tr = tr;
5160 }
5161
5162 /* Holder for file callbacks */
5163 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5164 tr->trace_cpu.tr = tr;
5165
5166 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5167 if (!tr->dir)
5168 goto out_free_tr;
5169
5170 ret = event_trace_add_tracer(tr->dir, tr);
5171 if (ret)
5172 goto out_free_tr;
5173
5174 init_tracer_debugfs(tr, tr->dir);
5175
5176 list_add(&tr->list, &ftrace_trace_arrays);
5177
5178 mutex_unlock(&trace_types_lock);
5179
5180 return 0;
5181
5182 out_free_tr:
5183 if (tr->buffer)
5184 ring_buffer_free(tr->buffer);
5185 kfree(tr->name);
5186 kfree(tr);
5187
5188 out_unlock:
5189 mutex_unlock(&trace_types_lock);
5190
5191 return ret;
5192
5193}
5194
5195static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
5196{
5197 struct dentry *parent;
5198 int ret;
5199
5200 /* Paranoid: Make sure the parent is the "instances" directory */
5201 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5202 if (WARN_ON_ONCE(parent != trace_instance_dir))
5203 return -ENOENT;
5204
5205 /*
5206 * The inode mutex is locked, but debugfs_create_dir() will also
5207 * take the mutex. As the instances directory can not be destroyed
5208 * or changed in any other way, it is safe to unlock it, and
5209 * let the dentry try. If two users try to make the same dir at
5210 * the same time, then the new_instance_create() will determine the
5211 * winner.
5212 */
5213 mutex_unlock(&inode->i_mutex);
5214
5215 ret = new_instance_create(dentry->d_iname);
5216
5217 mutex_lock(&inode->i_mutex);
5218
5219 return ret;
5220}
5221
5222static const struct inode_operations instance_dir_inode_operations = {
5223 .lookup = simple_lookup,
5224 .mkdir = instance_mkdir,
5225};
5226
5227static __init void create_trace_instances(struct dentry *d_tracer)
5228{
5229 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
5230 if (WARN_ON(!trace_instance_dir))
5231 return;
5232
5233 /* Hijack the dir inode operations, to allow mkdir */
5234 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
5235}
5236
5110static void 5237static void
5111init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) 5238init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
5112{ 5239{
@@ -5183,6 +5310,8 @@ static __init int tracer_init_debugfs(void)
5183 (void *)&global_trace.trace_cpu, &snapshot_fops); 5310 (void *)&global_trace.trace_cpu, &snapshot_fops);
5184#endif 5311#endif
5185 5312
5313 create_trace_instances(d_tracer);
5314
5186 create_trace_options_dir(&global_trace); 5315 create_trace_options_dir(&global_trace);
5187 5316
5188 for_each_tracing_cpu(cpu) 5317 for_each_tracing_cpu(cpu)