aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-28 21:10:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-28 21:10:39 -0400
commit6803f37e0955c7cc7ebe4363cf2bab5552e4716d (patch)
treeff4dc956f564b3b94fce96ed08609f561ca1c646 /kernel
parent561d9e8185883a8d0788cb15090873d4f13323f5 (diff)
parent09d8091c024ec88d1541d93eb8ddb2bd5cf10c39 (diff)
Merge tag 'trace-fixes-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt: "Oleg is working on fixing a very tight race between opening a event file and deleting that event at the same time (both must be done as root). I also found a bug while testing Oleg's patches which has to do with a race with kprobes using the function tracer. There's also a deadlock fix that was introduced with the previous fixes" * tag 'trace-fixes-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Remove locking trace_types_lock from tracing_reset_all_online_cpus() ftrace: Add check for NULL regs if ops has SAVE_REGS set tracing: Kill trace_cpu struct/members tracing: Change tracing_fops/snapshot_fops to rely on tracing_get_cpu() tracing: Change tracing_entries_fops to rely on tracing_get_cpu() tracing: Change tracing_stats_fops to rely on tracing_get_cpu() tracing: Change tracing_buffers_fops to rely on tracing_get_cpu() tracing: Change tracing_pipe_fops() to rely on tracing_get_cpu() tracing: Introduce trace_create_cpu_file() and tracing_get_cpu()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c18
-rw-r--r--kernel/trace/trace.c197
-rw-r--r--kernel/trace/trace.h8
3 files changed, 95 insertions, 128 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 67708f46baae..8ce9eefc5bb4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1441 * the hashes are freed with call_rcu_sched(). 1441 * the hashes are freed with call_rcu_sched().
1442 */ 1442 */
1443static int 1443static int
1444ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 1444ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1445{ 1445{
1446 struct ftrace_hash *filter_hash; 1446 struct ftrace_hash *filter_hash;
1447 struct ftrace_hash *notrace_hash; 1447 struct ftrace_hash *notrace_hash;
1448 int ret; 1448 int ret;
1449 1449
1450#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1451 /*
1452 * There's a small race when adding ops that the ftrace handler
1453 * that wants regs, may be called without them. We can not
1454 * allow that handler to be called if regs is NULL.
1455 */
1456 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1457 return 0;
1458#endif
1459
1450 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); 1460 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1451 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); 1461 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1452 1462
@@ -4218,7 +4228,7 @@ static inline void ftrace_startup_enable(int command) { }
4218# define ftrace_shutdown_sysctl() do { } while (0) 4228# define ftrace_shutdown_sysctl() do { } while (0)
4219 4229
4220static inline int 4230static inline int
4221ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) 4231ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4222{ 4232{
4223 return 1; 4233 return 1;
4224} 4234}
@@ -4241,7 +4251,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4241 do_for_each_ftrace_op(op, ftrace_control_list) { 4251 do_for_each_ftrace_op(op, ftrace_control_list) {
4242 if (!(op->flags & FTRACE_OPS_FL_STUB) && 4252 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4243 !ftrace_function_local_disabled(op) && 4253 !ftrace_function_local_disabled(op) &&
4244 ftrace_ops_test(op, ip)) 4254 ftrace_ops_test(op, ip, regs))
4245 op->func(ip, parent_ip, op, regs); 4255 op->func(ip, parent_ip, op, regs);
4246 } while_for_each_ftrace_op(op); 4256 } while_for_each_ftrace_op(op);
4247 trace_recursion_clear(TRACE_CONTROL_BIT); 4257 trace_recursion_clear(TRACE_CONTROL_BIT);
@@ -4274,7 +4284,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4274 */ 4284 */
4275 preempt_disable_notrace(); 4285 preempt_disable_notrace();
4276 do_for_each_ftrace_op(op, ftrace_ops_list) { 4286 do_for_each_ftrace_op(op, ftrace_ops_list) {
4277 if (ftrace_ops_test(op, ip)) 4287 if (ftrace_ops_test(op, ip, regs))
4278 op->func(ip, parent_ip, op, regs); 4288 op->func(ip, parent_ip, op, regs);
4279 } while_for_each_ftrace_op(op); 4289 } while_for_each_ftrace_op(op);
4280 preempt_enable_notrace(); 4290 preempt_enable_notrace();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3f2477713aca..882ec1dd1515 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1224,18 +1224,17 @@ void tracing_reset_current(int cpu)
1224 tracing_reset(&global_trace.trace_buffer, cpu); 1224 tracing_reset(&global_trace.trace_buffer, cpu);
1225} 1225}
1226 1226
1227/* Must have trace_types_lock held */
1227void tracing_reset_all_online_cpus(void) 1228void tracing_reset_all_online_cpus(void)
1228{ 1229{
1229 struct trace_array *tr; 1230 struct trace_array *tr;
1230 1231
1231 mutex_lock(&trace_types_lock);
1232 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1232 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1233 tracing_reset_online_cpus(&tr->trace_buffer); 1233 tracing_reset_online_cpus(&tr->trace_buffer);
1234#ifdef CONFIG_TRACER_MAX_TRACE 1234#ifdef CONFIG_TRACER_MAX_TRACE
1235 tracing_reset_online_cpus(&tr->max_buffer); 1235 tracing_reset_online_cpus(&tr->max_buffer);
1236#endif 1236#endif
1237 } 1237 }
1238 mutex_unlock(&trace_types_lock);
1239} 1238}
1240 1239
1241#define SAVED_CMDLINES 128 1240#define SAVED_CMDLINES 128
@@ -2843,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v)
2843 return 0; 2842 return 0;
2844} 2843}
2845 2844
2845/*
2846 * Should be used after trace_array_get(), trace_types_lock
2847 * ensures that i_cdev was already initialized.
2848 */
2849static inline int tracing_get_cpu(struct inode *inode)
2850{
2851 if (inode->i_cdev) /* See trace_create_cpu_file() */
2852 return (long)inode->i_cdev - 1;
2853 return RING_BUFFER_ALL_CPUS;
2854}
2855
2846static const struct seq_operations tracer_seq_ops = { 2856static const struct seq_operations tracer_seq_ops = {
2847 .start = s_start, 2857 .start = s_start,
2848 .next = s_next, 2858 .next = s_next,
@@ -2851,9 +2861,9 @@ static const struct seq_operations tracer_seq_ops = {
2851}; 2861};
2852 2862
2853static struct trace_iterator * 2863static struct trace_iterator *
2854__tracing_open(struct trace_array *tr, struct trace_cpu *tc, 2864__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2855 struct inode *inode, struct file *file, bool snapshot)
2856{ 2865{
2866 struct trace_array *tr = inode->i_private;
2857 struct trace_iterator *iter; 2867 struct trace_iterator *iter;
2858 int cpu; 2868 int cpu;
2859 2869
@@ -2894,8 +2904,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2894 iter->trace_buffer = &tr->trace_buffer; 2904 iter->trace_buffer = &tr->trace_buffer;
2895 iter->snapshot = snapshot; 2905 iter->snapshot = snapshot;
2896 iter->pos = -1; 2906 iter->pos = -1;
2907 iter->cpu_file = tracing_get_cpu(inode);
2897 mutex_init(&iter->mutex); 2908 mutex_init(&iter->mutex);
2898 iter->cpu_file = tc->cpu;
2899 2909
2900 /* Notify the tracer early; before we stop tracing. */ 2910 /* Notify the tracer early; before we stop tracing. */
2901 if (iter->trace && iter->trace->open) 2911 if (iter->trace && iter->trace->open)
@@ -2971,44 +2981,22 @@ static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2971 filp->private_data = inode->i_private; 2981 filp->private_data = inode->i_private;
2972 2982
2973 return 0; 2983 return 0;
2974
2975}
2976
2977static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
2978{
2979 struct trace_cpu *tc = inode->i_private;
2980 struct trace_array *tr = tc->tr;
2981
2982 if (tracing_disabled)
2983 return -ENODEV;
2984
2985 if (trace_array_get(tr) < 0)
2986 return -ENODEV;
2987
2988 filp->private_data = inode->i_private;
2989
2990 return 0;
2991
2992} 2984}
2993 2985
2994static int tracing_release(struct inode *inode, struct file *file) 2986static int tracing_release(struct inode *inode, struct file *file)
2995{ 2987{
2988 struct trace_array *tr = inode->i_private;
2996 struct seq_file *m = file->private_data; 2989 struct seq_file *m = file->private_data;
2997 struct trace_iterator *iter; 2990 struct trace_iterator *iter;
2998 struct trace_array *tr;
2999 int cpu; 2991 int cpu;
3000 2992
3001 /* Writes do not use seq_file, need to grab tr from inode */
3002 if (!(file->f_mode & FMODE_READ)) { 2993 if (!(file->f_mode & FMODE_READ)) {
3003 struct trace_cpu *tc = inode->i_private; 2994 trace_array_put(tr);
3004
3005 trace_array_put(tc->tr);
3006 return 0; 2995 return 0;
3007 } 2996 }
3008 2997
2998 /* Writes do not use seq_file */
3009 iter = m->private; 2999 iter = m->private;
3010 tr = iter->tr;
3011
3012 mutex_lock(&trace_types_lock); 3000 mutex_lock(&trace_types_lock);
3013 3001
3014 for_each_tracing_cpu(cpu) { 3002 for_each_tracing_cpu(cpu) {
@@ -3044,15 +3032,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3044 return 0; 3032 return 0;
3045} 3033}
3046 3034
3047static int tracing_release_generic_tc(struct inode *inode, struct file *file)
3048{
3049 struct trace_cpu *tc = inode->i_private;
3050 struct trace_array *tr = tc->tr;
3051
3052 trace_array_put(tr);
3053 return 0;
3054}
3055
3056static int tracing_single_release_tr(struct inode *inode, struct file *file) 3035static int tracing_single_release_tr(struct inode *inode, struct file *file)
3057{ 3036{
3058 struct trace_array *tr = inode->i_private; 3037 struct trace_array *tr = inode->i_private;
@@ -3064,8 +3043,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file)
3064 3043
3065static int tracing_open(struct inode *inode, struct file *file) 3044static int tracing_open(struct inode *inode, struct file *file)
3066{ 3045{
3067 struct trace_cpu *tc = inode->i_private; 3046 struct trace_array *tr = inode->i_private;
3068 struct trace_array *tr = tc->tr;
3069 struct trace_iterator *iter; 3047 struct trace_iterator *iter;
3070 int ret = 0; 3048 int ret = 0;
3071 3049
@@ -3073,16 +3051,17 @@ static int tracing_open(struct inode *inode, struct file *file)
3073 return -ENODEV; 3051 return -ENODEV;
3074 3052
3075 /* If this file was open for write, then erase contents */ 3053 /* If this file was open for write, then erase contents */
3076 if ((file->f_mode & FMODE_WRITE) && 3054 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3077 (file->f_flags & O_TRUNC)) { 3055 int cpu = tracing_get_cpu(inode);
3078 if (tc->cpu == RING_BUFFER_ALL_CPUS) 3056
3057 if (cpu == RING_BUFFER_ALL_CPUS)
3079 tracing_reset_online_cpus(&tr->trace_buffer); 3058 tracing_reset_online_cpus(&tr->trace_buffer);
3080 else 3059 else
3081 tracing_reset(&tr->trace_buffer, tc->cpu); 3060 tracing_reset(&tr->trace_buffer, cpu);
3082 } 3061 }
3083 3062
3084 if (file->f_mode & FMODE_READ) { 3063 if (file->f_mode & FMODE_READ) {
3085 iter = __tracing_open(tr, tc, inode, file, false); 3064 iter = __tracing_open(inode, file, false);
3086 if (IS_ERR(iter)) 3065 if (IS_ERR(iter))
3087 ret = PTR_ERR(iter); 3066 ret = PTR_ERR(iter);
3088 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 3067 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -3948,8 +3927,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3948 3927
3949static int tracing_open_pipe(struct inode *inode, struct file *filp) 3928static int tracing_open_pipe(struct inode *inode, struct file *filp)
3950{ 3929{
3951 struct trace_cpu *tc = inode->i_private; 3930 struct trace_array *tr = inode->i_private;
3952 struct trace_array *tr = tc->tr;
3953 struct trace_iterator *iter; 3931 struct trace_iterator *iter;
3954 int ret = 0; 3932 int ret = 0;
3955 3933
@@ -3995,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3995 if (trace_clocks[tr->clock_id].in_ns) 3973 if (trace_clocks[tr->clock_id].in_ns)
3996 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3974 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3997 3975
3998 iter->cpu_file = tc->cpu; 3976 iter->tr = tr;
3999 iter->tr = tc->tr; 3977 iter->trace_buffer = &tr->trace_buffer;
4000 iter->trace_buffer = &tc->tr->trace_buffer; 3978 iter->cpu_file = tracing_get_cpu(inode);
4001 mutex_init(&iter->mutex); 3979 mutex_init(&iter->mutex);
4002 filp->private_data = iter; 3980 filp->private_data = iter;
4003 3981
@@ -4020,8 +3998,7 @@ fail:
4020static int tracing_release_pipe(struct inode *inode, struct file *file) 3998static int tracing_release_pipe(struct inode *inode, struct file *file)
4021{ 3999{
4022 struct trace_iterator *iter = file->private_data; 4000 struct trace_iterator *iter = file->private_data;
4023 struct trace_cpu *tc = inode->i_private; 4001 struct trace_array *tr = inode->i_private;
4024 struct trace_array *tr = tc->tr;
4025 4002
4026 mutex_lock(&trace_types_lock); 4003 mutex_lock(&trace_types_lock);
4027 4004
@@ -4374,15 +4351,16 @@ static ssize_t
4374tracing_entries_read(struct file *filp, char __user *ubuf, 4351tracing_entries_read(struct file *filp, char __user *ubuf,
4375 size_t cnt, loff_t *ppos) 4352 size_t cnt, loff_t *ppos)
4376{ 4353{
4377 struct trace_cpu *tc = filp->private_data; 4354 struct inode *inode = file_inode(filp);
4378 struct trace_array *tr = tc->tr; 4355 struct trace_array *tr = inode->i_private;
4356 int cpu = tracing_get_cpu(inode);
4379 char buf[64]; 4357 char buf[64];
4380 int r = 0; 4358 int r = 0;
4381 ssize_t ret; 4359 ssize_t ret;
4382 4360
4383 mutex_lock(&trace_types_lock); 4361 mutex_lock(&trace_types_lock);
4384 4362
4385 if (tc->cpu == RING_BUFFER_ALL_CPUS) { 4363 if (cpu == RING_BUFFER_ALL_CPUS) {
4386 int cpu, buf_size_same; 4364 int cpu, buf_size_same;
4387 unsigned long size; 4365 unsigned long size;
4388 4366
@@ -4409,7 +4387,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
4409 } else 4387 } else
4410 r = sprintf(buf, "X\n"); 4388 r = sprintf(buf, "X\n");
4411 } else 4389 } else
4412 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10); 4390 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4413 4391
4414 mutex_unlock(&trace_types_lock); 4392 mutex_unlock(&trace_types_lock);
4415 4393
@@ -4421,7 +4399,8 @@ static ssize_t
4421tracing_entries_write(struct file *filp, const char __user *ubuf, 4399tracing_entries_write(struct file *filp, const char __user *ubuf,
4422 size_t cnt, loff_t *ppos) 4400 size_t cnt, loff_t *ppos)
4423{ 4401{
4424 struct trace_cpu *tc = filp->private_data; 4402 struct inode *inode = file_inode(filp);
4403 struct trace_array *tr = inode->i_private;
4425 unsigned long val; 4404 unsigned long val;
4426 int ret; 4405 int ret;
4427 4406
@@ -4435,8 +4414,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
4435 4414
4436 /* value is in KB */ 4415 /* value is in KB */
4437 val <<= 10; 4416 val <<= 10;
4438 4417 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4439 ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
4440 if (ret < 0) 4418 if (ret < 0)
4441 return ret; 4419 return ret;
4442 4420
@@ -4697,8 +4675,7 @@ struct ftrace_buffer_info {
4697#ifdef CONFIG_TRACER_SNAPSHOT 4675#ifdef CONFIG_TRACER_SNAPSHOT
4698static int tracing_snapshot_open(struct inode *inode, struct file *file) 4676static int tracing_snapshot_open(struct inode *inode, struct file *file)
4699{ 4677{
4700 struct trace_cpu *tc = inode->i_private; 4678 struct trace_array *tr = inode->i_private;
4701 struct trace_array *tr = tc->tr;
4702 struct trace_iterator *iter; 4679 struct trace_iterator *iter;
4703 struct seq_file *m; 4680 struct seq_file *m;
4704 int ret = 0; 4681 int ret = 0;
@@ -4707,7 +4684,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
4707 return -ENODEV; 4684 return -ENODEV;
4708 4685
4709 if (file->f_mode & FMODE_READ) { 4686 if (file->f_mode & FMODE_READ) {
4710 iter = __tracing_open(tr, tc, inode, file, true); 4687 iter = __tracing_open(inode, file, true);
4711 if (IS_ERR(iter)) 4688 if (IS_ERR(iter))
4712 ret = PTR_ERR(iter); 4689 ret = PTR_ERR(iter);
4713 } else { 4690 } else {
@@ -4724,8 +4701,8 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
4724 ret = 0; 4701 ret = 0;
4725 4702
4726 iter->tr = tr; 4703 iter->tr = tr;
4727 iter->trace_buffer = &tc->tr->max_buffer; 4704 iter->trace_buffer = &tr->max_buffer;
4728 iter->cpu_file = tc->cpu; 4705 iter->cpu_file = tracing_get_cpu(inode);
4729 m->private = iter; 4706 m->private = iter;
4730 file->private_data = m; 4707 file->private_data = m;
4731 } 4708 }
@@ -4884,11 +4861,11 @@ static const struct file_operations tracing_pipe_fops = {
4884}; 4861};
4885 4862
4886static const struct file_operations tracing_entries_fops = { 4863static const struct file_operations tracing_entries_fops = {
4887 .open = tracing_open_generic_tc, 4864 .open = tracing_open_generic_tr,
4888 .read = tracing_entries_read, 4865 .read = tracing_entries_read,
4889 .write = tracing_entries_write, 4866 .write = tracing_entries_write,
4890 .llseek = generic_file_llseek, 4867 .llseek = generic_file_llseek,
4891 .release = tracing_release_generic_tc, 4868 .release = tracing_release_generic_tr,
4892}; 4869};
4893 4870
4894static const struct file_operations tracing_total_entries_fops = { 4871static const struct file_operations tracing_total_entries_fops = {
@@ -4940,8 +4917,7 @@ static const struct file_operations snapshot_raw_fops = {
4940 4917
4941static int tracing_buffers_open(struct inode *inode, struct file *filp) 4918static int tracing_buffers_open(struct inode *inode, struct file *filp)
4942{ 4919{
4943 struct trace_cpu *tc = inode->i_private; 4920 struct trace_array *tr = inode->i_private;
4944 struct trace_array *tr = tc->tr;
4945 struct ftrace_buffer_info *info; 4921 struct ftrace_buffer_info *info;
4946 int ret; 4922 int ret;
4947 4923
@@ -4960,7 +4936,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4960 mutex_lock(&trace_types_lock); 4936 mutex_lock(&trace_types_lock);
4961 4937
4962 info->iter.tr = tr; 4938 info->iter.tr = tr;
4963 info->iter.cpu_file = tc->cpu; 4939 info->iter.cpu_file = tracing_get_cpu(inode);
4964 info->iter.trace = tr->current_trace; 4940 info->iter.trace = tr->current_trace;
4965 info->iter.trace_buffer = &tr->trace_buffer; 4941 info->iter.trace_buffer = &tr->trace_buffer;
4966 info->spare = NULL; 4942 info->spare = NULL;
@@ -5277,14 +5253,14 @@ static ssize_t
5277tracing_stats_read(struct file *filp, char __user *ubuf, 5253tracing_stats_read(struct file *filp, char __user *ubuf,
5278 size_t count, loff_t *ppos) 5254 size_t count, loff_t *ppos)
5279{ 5255{
5280 struct trace_cpu *tc = filp->private_data; 5256 struct inode *inode = file_inode(filp);
5281 struct trace_array *tr = tc->tr; 5257 struct trace_array *tr = inode->i_private;
5282 struct trace_buffer *trace_buf = &tr->trace_buffer; 5258 struct trace_buffer *trace_buf = &tr->trace_buffer;
5259 int cpu = tracing_get_cpu(inode);
5283 struct trace_seq *s; 5260 struct trace_seq *s;
5284 unsigned long cnt; 5261 unsigned long cnt;
5285 unsigned long long t; 5262 unsigned long long t;
5286 unsigned long usec_rem; 5263 unsigned long usec_rem;
5287 int cpu = tc->cpu;
5288 5264
5289 s = kmalloc(sizeof(*s), GFP_KERNEL); 5265 s = kmalloc(sizeof(*s), GFP_KERNEL);
5290 if (!s) 5266 if (!s)
@@ -5337,10 +5313,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
5337} 5313}
5338 5314
5339static const struct file_operations tracing_stats_fops = { 5315static const struct file_operations tracing_stats_fops = {
5340 .open = tracing_open_generic_tc, 5316 .open = tracing_open_generic_tr,
5341 .read = tracing_stats_read, 5317 .read = tracing_stats_read,
5342 .llseek = generic_file_llseek, 5318 .llseek = generic_file_llseek,
5343 .release = tracing_release_generic_tc, 5319 .release = tracing_release_generic_tr,
5344}; 5320};
5345 5321
5346#ifdef CONFIG_DYNAMIC_FTRACE 5322#ifdef CONFIG_DYNAMIC_FTRACE
@@ -5529,10 +5505,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5529 return tr->percpu_dir; 5505 return tr->percpu_dir;
5530} 5506}
5531 5507
5508static struct dentry *
5509trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5510 void *data, long cpu, const struct file_operations *fops)
5511{
5512 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5513
5514 if (ret) /* See tracing_get_cpu() */
5515 ret->d_inode->i_cdev = (void *)(cpu + 1);
5516 return ret;
5517}
5518
5532static void 5519static void
5533tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 5520tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5534{ 5521{
5535 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
5536 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 5522 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5537 struct dentry *d_cpu; 5523 struct dentry *d_cpu;
5538 char cpu_dir[30]; /* 30 characters should be more than enough */ 5524 char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -5548,28 +5534,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5548 } 5534 }
5549 5535
5550 /* per cpu trace_pipe */ 5536 /* per cpu trace_pipe */
5551 trace_create_file("trace_pipe", 0444, d_cpu, 5537 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5552 (void *)&data->trace_cpu, &tracing_pipe_fops); 5538 tr, cpu, &tracing_pipe_fops);
5553 5539
5554 /* per cpu trace */ 5540 /* per cpu trace */
5555 trace_create_file("trace", 0644, d_cpu, 5541 trace_create_cpu_file("trace", 0644, d_cpu,
5556 (void *)&data->trace_cpu, &tracing_fops); 5542 tr, cpu, &tracing_fops);
5557 5543
5558 trace_create_file("trace_pipe_raw", 0444, d_cpu, 5544 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5559 (void *)&data->trace_cpu, &tracing_buffers_fops); 5545 tr, cpu, &tracing_buffers_fops);
5560 5546
5561 trace_create_file("stats", 0444, d_cpu, 5547 trace_create_cpu_file("stats", 0444, d_cpu,
5562 (void *)&data->trace_cpu, &tracing_stats_fops); 5548 tr, cpu, &tracing_stats_fops);
5563 5549
5564 trace_create_file("buffer_size_kb", 0444, d_cpu, 5550 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5565 (void *)&data->trace_cpu, &tracing_entries_fops); 5551 tr, cpu, &tracing_entries_fops);
5566 5552
5567#ifdef CONFIG_TRACER_SNAPSHOT 5553#ifdef CONFIG_TRACER_SNAPSHOT
5568 trace_create_file("snapshot", 0644, d_cpu, 5554 trace_create_cpu_file("snapshot", 0644, d_cpu,
5569 (void *)&data->trace_cpu, &snapshot_fops); 5555 tr, cpu, &snapshot_fops);
5570 5556
5571 trace_create_file("snapshot_raw", 0444, d_cpu, 5557 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5572 (void *)&data->trace_cpu, &snapshot_raw_fops); 5558 tr, cpu, &snapshot_raw_fops);
5573#endif 5559#endif
5574} 5560}
5575 5561
@@ -5878,17 +5864,6 @@ struct dentry *trace_instance_dir;
5878static void 5864static void
5879init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); 5865init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5880 5866
5881static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5882{
5883 int cpu;
5884
5885 for_each_tracing_cpu(cpu) {
5886 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5887 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5888 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5889 }
5890}
5891
5892static int 5867static int
5893allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) 5868allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
5894{ 5869{
@@ -5906,8 +5881,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
5906 return -ENOMEM; 5881 return -ENOMEM;
5907 } 5882 }
5908 5883
5909 init_trace_buffers(tr, buf);
5910
5911 /* Allocate the first page for all buffers */ 5884 /* Allocate the first page for all buffers */
5912 set_buffer_entries(&tr->trace_buffer, 5885 set_buffer_entries(&tr->trace_buffer,
5913 ring_buffer_size(tr->trace_buffer.buffer, 0)); 5886 ring_buffer_size(tr->trace_buffer.buffer, 0));
@@ -5974,10 +5947,6 @@ static int new_instance_create(const char *name)
5974 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 5947 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
5975 goto out_free_tr; 5948 goto out_free_tr;
5976 5949
5977 /* Holder for file callbacks */
5978 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5979 tr->trace_cpu.tr = tr;
5980
5981 tr->dir = debugfs_create_dir(name, trace_instance_dir); 5950 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5982 if (!tr->dir) 5951 if (!tr->dir)
5983 goto out_free_tr; 5952 goto out_free_tr;
@@ -6132,13 +6101,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6132 tr, &tracing_iter_fops); 6101 tr, &tracing_iter_fops);
6133 6102
6134 trace_create_file("trace", 0644, d_tracer, 6103 trace_create_file("trace", 0644, d_tracer,
6135 (void *)&tr->trace_cpu, &tracing_fops); 6104 tr, &tracing_fops);
6136 6105
6137 trace_create_file("trace_pipe", 0444, d_tracer, 6106 trace_create_file("trace_pipe", 0444, d_tracer,
6138 (void *)&tr->trace_cpu, &tracing_pipe_fops); 6107 tr, &tracing_pipe_fops);
6139 6108
6140 trace_create_file("buffer_size_kb", 0644, d_tracer, 6109 trace_create_file("buffer_size_kb", 0644, d_tracer,
6141 (void *)&tr->trace_cpu, &tracing_entries_fops); 6110 tr, &tracing_entries_fops);
6142 6111
6143 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 6112 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6144 tr, &tracing_total_entries_fops); 6113 tr, &tracing_total_entries_fops);
@@ -6153,11 +6122,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6153 &trace_clock_fops); 6122 &trace_clock_fops);
6154 6123
6155 trace_create_file("tracing_on", 0644, d_tracer, 6124 trace_create_file("tracing_on", 0644, d_tracer,
6156 tr, &rb_simple_fops); 6125 tr, &rb_simple_fops);
6157 6126
6158#ifdef CONFIG_TRACER_SNAPSHOT 6127#ifdef CONFIG_TRACER_SNAPSHOT
6159 trace_create_file("snapshot", 0644, d_tracer, 6128 trace_create_file("snapshot", 0644, d_tracer,
6160 (void *)&tr->trace_cpu, &snapshot_fops); 6129 tr, &snapshot_fops);
6161#endif 6130#endif
6162 6131
6163 for_each_tracing_cpu(cpu) 6132 for_each_tracing_cpu(cpu)
@@ -6451,10 +6420,6 @@ __init static int tracer_alloc_buffers(void)
6451 6420
6452 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 6421 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6453 6422
6454 /* Holder for file callbacks */
6455 global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
6456 global_trace.trace_cpu.tr = &global_trace;
6457
6458 INIT_LIST_HEAD(&global_trace.systems); 6423 INIT_LIST_HEAD(&global_trace.systems);
6459 INIT_LIST_HEAD(&global_trace.events); 6424 INIT_LIST_HEAD(&global_trace.events);
6460 list_add(&global_trace.list, &ftrace_trace_arrays); 6425 list_add(&global_trace.list, &ftrace_trace_arrays);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index e7d643b8a907..afaae41b0a02 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -130,19 +130,12 @@ enum trace_flag_type {
130 130
131struct trace_array; 131struct trace_array;
132 132
133struct trace_cpu {
134 struct trace_array *tr;
135 struct dentry *dir;
136 int cpu;
137};
138
139/* 133/*
140 * The CPU trace array - it consists of thousands of trace entries 134 * The CPU trace array - it consists of thousands of trace entries
141 * plus some other descriptor data: (for example which task started 135 * plus some other descriptor data: (for example which task started
142 * the trace, etc.) 136 * the trace, etc.)
143 */ 137 */
144struct trace_array_cpu { 138struct trace_array_cpu {
145 struct trace_cpu trace_cpu;
146 atomic_t disabled; 139 atomic_t disabled;
147 void *buffer_page; /* ring buffer spare */ 140 void *buffer_page; /* ring buffer spare */
148 141
@@ -196,7 +189,6 @@ struct trace_array {
196 bool allocated_snapshot; 189 bool allocated_snapshot;
197#endif 190#endif
198 int buffer_disabled; 191 int buffer_disabled;
199 struct trace_cpu trace_cpu; /* place holder */
200#ifdef CONFIG_FTRACE_SYSCALLS 192#ifdef CONFIG_FTRACE_SYSCALLS
201 int sys_refcount_enter; 193 int sys_refcount_enter;
202 int sys_refcount_exit; 194 int sys_refcount_exit;