aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-08-07 16:01:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-08-07 16:01:30 -0400
commitb7bc9e7d808ba55729bd263b0210cda36965be32 (patch)
tree3854bc709c38f75fbc4b9e90bdedba5202482dc1 /kernel
parent8ef9c292ac7bf91fc0367746cf5db264adddced9 (diff)
parent9457158bbc0ee04ecef76862d73eecd8076e9c7b (diff)
Merge tag 'trace-fixes-3.11-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt: "Oleg Nesterov has been working hard in closing all the holes that can lead to race conditions between deleting an event and accessing an event debugfs file. This included a fix to the debugfs system (acked by Greg Kroah-Hartman). We think that all the holes have been patched and hopefully we don't find more. I haven't marked all of them for stable because I need to examine them more to figure out how far back some of the changes need to go. Along the way, some other fixes have been made. Alexander Z Lam fixed some logic where the wrong buffer was being modifed. Andrew Vagin found a possible corruption for machines that actually allocate cpumask, as a reference to one was being zeroed out by mistake. Dhaval Giani found a bad prototype when tracing is not configured. And I not only had some changes to help Oleg, but also finally fixed a long standing bug that Dave Jones and others have been hitting, where a module unload and reload can cause the function tracing accounting to get screwed up" * tag 'trace-fixes-3.11-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Fix reset of time stamps during trace_clock changes tracing: Make TRACE_ITER_STOP_ON_FREE stop the correct buffer tracing: Fix trace_dump_stack() proto when CONFIG_TRACING is not set tracing: Fix fields of struct trace_iterator that are zeroed by mistake tracing/uprobes: Fail to unregister if probe event files are in use tracing/kprobes: Fail to unregister if probe event files are in use tracing: Add comment to describe special break case in probe_remove_event_call() tracing: trace_remove_event_call() should fail if call/file is in use debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) ftrace: Check module functions being traced on reload ftrace: Consolidate some duplicate code for updating ftrace ops tracing: Change remove_event_file_dir() to clear "d_subdirs"->i_private tracing: Introduce remove_event_file_dir() tracing: Change f_start() to take event_mutex and verify i_private != NULL tracing: Change event_filter_read/write to verify i_private != NULL tracing: Change event_enable/disable_read() to verify i_private != NULL tracing: Turn event/id->i_private into call->event.type
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ftrace.c87
-rw-r--r--kernel/trace/trace.c27
-rw-r--r--kernel/trace/trace_events.c200
-rw-r--r--kernel/trace/trace_events_filter.c17
-rw-r--r--kernel/trace/trace_kprobe.c21
-rw-r--r--kernel/trace/trace_uprobe.c51
6 files changed, 272 insertions, 131 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8ce9eefc5bb4..a6d098c6df3f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2169,12 +2169,57 @@ static cycle_t ftrace_update_time;
2169static unsigned long ftrace_update_cnt; 2169static unsigned long ftrace_update_cnt;
2170unsigned long ftrace_update_tot_cnt; 2170unsigned long ftrace_update_tot_cnt;
2171 2171
2172static int ops_traces_mod(struct ftrace_ops *ops) 2172static inline int ops_traces_mod(struct ftrace_ops *ops)
2173{ 2173{
2174 struct ftrace_hash *hash; 2174 /*
2175 * Filter_hash being empty will default to trace module.
2176 * But notrace hash requires a test of individual module functions.
2177 */
2178 return ftrace_hash_empty(ops->filter_hash) &&
2179 ftrace_hash_empty(ops->notrace_hash);
2180}
2181
2182/*
2183 * Check if the current ops references the record.
2184 *
2185 * If the ops traces all functions, then it was already accounted for.
2186 * If the ops does not trace the current record function, skip it.
2187 * If the ops ignores the function via notrace filter, skip it.
2188 */
2189static inline bool
2190ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2191{
2192 /* If ops isn't enabled, ignore it */
2193 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2194 return 0;
2195
2196 /* If ops traces all mods, we already accounted for it */
2197 if (ops_traces_mod(ops))
2198 return 0;
2199
2200 /* The function must be in the filter */
2201 if (!ftrace_hash_empty(ops->filter_hash) &&
2202 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2203 return 0;
2204
2205 /* If in notrace hash, we ignore it too */
2206 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2207 return 0;
2208
2209 return 1;
2210}
2211
2212static int referenced_filters(struct dyn_ftrace *rec)
2213{
2214 struct ftrace_ops *ops;
2215 int cnt = 0;
2175 2216
2176 hash = ops->filter_hash; 2217 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2177 return ftrace_hash_empty(hash); 2218 if (ops_references_rec(ops, rec))
2219 cnt++;
2220 }
2221
2222 return cnt;
2178} 2223}
2179 2224
2180static int ftrace_update_code(struct module *mod) 2225static int ftrace_update_code(struct module *mod)
@@ -2183,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
2183 struct dyn_ftrace *p; 2228 struct dyn_ftrace *p;
2184 cycle_t start, stop; 2229 cycle_t start, stop;
2185 unsigned long ref = 0; 2230 unsigned long ref = 0;
2231 bool test = false;
2186 int i; 2232 int i;
2187 2233
2188 /* 2234 /*
@@ -2196,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
2196 2242
2197 for (ops = ftrace_ops_list; 2243 for (ops = ftrace_ops_list;
2198 ops != &ftrace_list_end; ops = ops->next) { 2244 ops != &ftrace_list_end; ops = ops->next) {
2199 if (ops->flags & FTRACE_OPS_FL_ENABLED && 2245 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2200 ops_traces_mod(ops)) 2246 if (ops_traces_mod(ops))
2201 ref++; 2247 ref++;
2248 else
2249 test = true;
2250 }
2202 } 2251 }
2203 } 2252 }
2204 2253
@@ -2208,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
2208 for (pg = ftrace_new_pgs; pg; pg = pg->next) { 2257 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2209 2258
2210 for (i = 0; i < pg->index; i++) { 2259 for (i = 0; i < pg->index; i++) {
2260 int cnt = ref;
2261
2211 /* If something went wrong, bail without enabling anything */ 2262 /* If something went wrong, bail without enabling anything */
2212 if (unlikely(ftrace_disabled)) 2263 if (unlikely(ftrace_disabled))
2213 return -1; 2264 return -1;
2214 2265
2215 p = &pg->records[i]; 2266 p = &pg->records[i];
2216 p->flags = ref; 2267 if (test)
2268 cnt += referenced_filters(p);
2269 p->flags = cnt;
2217 2270
2218 /* 2271 /*
2219 * Do the initial record conversion from mcount jump 2272 * Do the initial record conversion from mcount jump
@@ -2233,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
2233 * conversion puts the module to the correct state, thus 2286 * conversion puts the module to the correct state, thus
2234 * passing the ftrace_make_call check. 2287 * passing the ftrace_make_call check.
2235 */ 2288 */
2236 if (ftrace_start_up && ref) { 2289 if (ftrace_start_up && cnt) {
2237 int failed = __ftrace_replace_code(p, 1); 2290 int failed = __ftrace_replace_code(p, 1);
2238 if (failed) 2291 if (failed)
2239 ftrace_bug(failed, p->ip); 2292 ftrace_bug(failed, p->ip);
@@ -3384,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3384 return add_hash_entry(hash, ip); 3437 return add_hash_entry(hash, ip);
3385} 3438}
3386 3439
3440static void ftrace_ops_update_code(struct ftrace_ops *ops)
3441{
3442 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3443 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3444}
3445
3387static int 3446static int
3388ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 3447ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3389 unsigned long ip, int remove, int reset, int enable) 3448 unsigned long ip, int remove, int reset, int enable)
@@ -3426,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3426 3485
3427 mutex_lock(&ftrace_lock); 3486 mutex_lock(&ftrace_lock);
3428 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3487 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3429 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED 3488 if (!ret)
3430 && ftrace_enabled) 3489 ftrace_ops_update_code(ops);
3431 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3432 3490
3433 mutex_unlock(&ftrace_lock); 3491 mutex_unlock(&ftrace_lock);
3434 3492
@@ -3655,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3655 mutex_lock(&ftrace_lock); 3713 mutex_lock(&ftrace_lock);
3656 ret = ftrace_hash_move(iter->ops, filter_hash, 3714 ret = ftrace_hash_move(iter->ops, filter_hash,
3657 orig_hash, iter->hash); 3715 orig_hash, iter->hash);
3658 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) 3716 if (!ret)
3659 && ftrace_enabled) 3717 ftrace_ops_update_code(iter->ops);
3660 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3661 3718
3662 mutex_unlock(&ftrace_lock); 3719 mutex_unlock(&ftrace_lock);
3663 } 3720 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 882ec1dd1515..496f94d57698 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer,
243} 243}
244EXPORT_SYMBOL_GPL(filter_current_check_discard); 244EXPORT_SYMBOL_GPL(filter_current_check_discard);
245 245
246cycle_t ftrace_now(int cpu) 246cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
247{ 247{
248 u64 ts; 248 u64 ts;
249 249
250 /* Early boot up does not have a buffer yet */ 250 /* Early boot up does not have a buffer yet */
251 if (!global_trace.trace_buffer.buffer) 251 if (!buf->buffer)
252 return trace_clock_local(); 252 return trace_clock_local();
253 253
254 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu); 254 ts = ring_buffer_time_stamp(buf->buffer, cpu);
255 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts); 255 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
256 256
257 return ts; 257 return ts;
258} 258}
259 259
260cycle_t ftrace_now(int cpu)
261{
262 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
263}
264
260/** 265/**
261 * tracing_is_enabled - Show if global_trace has been disabled 266 * tracing_is_enabled - Show if global_trace has been disabled
262 * 267 *
@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
1211 /* Make sure all commits have finished */ 1216 /* Make sure all commits have finished */
1212 synchronize_sched(); 1217 synchronize_sched();
1213 1218
1214 buf->time_start = ftrace_now(buf->cpu); 1219 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1215 1220
1216 for_each_online_cpu(cpu) 1221 for_each_online_cpu(cpu)
1217 ring_buffer_reset_cpu(buffer, cpu); 1222 ring_buffer_reset_cpu(buffer, cpu);
@@ -1219,11 +1224,6 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
1219 ring_buffer_record_enable(buffer); 1224 ring_buffer_record_enable(buffer);
1220} 1225}
1221 1226
1222void tracing_reset_current(int cpu)
1223{
1224 tracing_reset(&global_trace.trace_buffer, cpu);
1225}
1226
1227/* Must have trace_types_lock held */ 1227/* Must have trace_types_lock held */
1228void tracing_reset_all_online_cpus(void) 1228void tracing_reset_all_online_cpus(void)
1229{ 1229{
@@ -4151,6 +4151,7 @@ waitagain:
4151 memset(&iter->seq, 0, 4151 memset(&iter->seq, 0,
4152 sizeof(struct trace_iterator) - 4152 sizeof(struct trace_iterator) -
4153 offsetof(struct trace_iterator, seq)); 4153 offsetof(struct trace_iterator, seq));
4154 cpumask_clear(iter->started);
4154 iter->pos = -1; 4155 iter->pos = -1;
4155 4156
4156 trace_event_read_lock(); 4157 trace_event_read_lock();
@@ -4468,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
4468 4469
4469 /* disable tracing ? */ 4470 /* disable tracing ? */
4470 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 4471 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4471 tracing_off(); 4472 tracer_tracing_off(tr);
4472 /* resize the ring buffer to 0 */ 4473 /* resize the ring buffer to 0 */
4473 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 4474 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4474 4475
@@ -4633,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4633 * New clock may not be consistent with the previous clock. 4634 * New clock may not be consistent with the previous clock.
4634 * Reset the buffer so that it doesn't have incomparable timestamps. 4635 * Reset the buffer so that it doesn't have incomparable timestamps.
4635 */ 4636 */
4636 tracing_reset_online_cpus(&global_trace.trace_buffer); 4637 tracing_reset_online_cpus(&tr->trace_buffer);
4637 4638
4638#ifdef CONFIG_TRACER_MAX_TRACE 4639#ifdef CONFIG_TRACER_MAX_TRACE
4639 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) 4640 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4640 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 4641 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4641 tracing_reset_online_cpus(&global_trace.max_buffer); 4642 tracing_reset_online_cpus(&tr->max_buffer);
4642#endif 4643#endif
4643 4644
4644 mutex_unlock(&trace_types_lock); 4645 mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 898f868833f2..29a7ebcfb426 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
409 mutex_unlock(&event_mutex); 409 mutex_unlock(&event_mutex);
410} 410}
411 411
412/* 412static void remove_subsystem(struct ftrace_subsystem_dir *dir)
413 * Open and update trace_array ref count.
414 * Must have the current trace_array passed to it.
415 */
416static int tracing_open_generic_file(struct inode *inode, struct file *filp)
417{ 413{
418 struct ftrace_event_file *file = inode->i_private; 414 if (!dir)
419 struct trace_array *tr = file->tr; 415 return;
420 int ret;
421 416
422 if (trace_array_get(tr) < 0) 417 if (!--dir->nr_events) {
423 return -ENODEV; 418 debugfs_remove_recursive(dir->entry);
419 list_del(&dir->list);
420 __put_system_dir(dir);
421 }
422}
424 423
425 ret = tracing_open_generic(inode, filp); 424static void *event_file_data(struct file *filp)
426 if (ret < 0) 425{
427 trace_array_put(tr); 426 return ACCESS_ONCE(file_inode(filp)->i_private);
428 return ret;
429} 427}
430 428
431static int tracing_release_generic_file(struct inode *inode, struct file *filp) 429static void remove_event_file_dir(struct ftrace_event_file *file)
432{ 430{
433 struct ftrace_event_file *file = inode->i_private; 431 struct dentry *dir = file->dir;
434 struct trace_array *tr = file->tr; 432 struct dentry *child;
435 433
436 trace_array_put(tr); 434 if (dir) {
435 spin_lock(&dir->d_lock); /* probably unneeded */
436 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
437 if (child->d_inode) /* probably unneeded */
438 child->d_inode->i_private = NULL;
439 }
440 spin_unlock(&dir->d_lock);
437 441
438 return 0; 442 debugfs_remove_recursive(dir);
443 }
444
445 list_del(&file->list);
446 remove_subsystem(file->system);
447 kmem_cache_free(file_cachep, file);
439} 448}
440 449
441/* 450/*
@@ -679,15 +688,25 @@ static ssize_t
679event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 688event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
680 loff_t *ppos) 689 loff_t *ppos)
681{ 690{
682 struct ftrace_event_file *file = filp->private_data; 691 struct ftrace_event_file *file;
692 unsigned long flags;
683 char buf[4] = "0"; 693 char buf[4] = "0";
684 694
685 if (file->flags & FTRACE_EVENT_FL_ENABLED && 695 mutex_lock(&event_mutex);
686 !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 696 file = event_file_data(filp);
697 if (likely(file))
698 flags = file->flags;
699 mutex_unlock(&event_mutex);
700
701 if (!file)
702 return -ENODEV;
703
704 if (flags & FTRACE_EVENT_FL_ENABLED &&
705 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
687 strcpy(buf, "1"); 706 strcpy(buf, "1");
688 707
689 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED || 708 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
690 file->flags & FTRACE_EVENT_FL_SOFT_MODE) 709 flags & FTRACE_EVENT_FL_SOFT_MODE)
691 strcat(buf, "*"); 710 strcat(buf, "*");
692 711
693 strcat(buf, "\n"); 712 strcat(buf, "\n");
@@ -699,13 +718,10 @@ static ssize_t
699event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 718event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
700 loff_t *ppos) 719 loff_t *ppos)
701{ 720{
702 struct ftrace_event_file *file = filp->private_data; 721 struct ftrace_event_file *file;
703 unsigned long val; 722 unsigned long val;
704 int ret; 723 int ret;
705 724
706 if (!file)
707 return -EINVAL;
708
709 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 725 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
710 if (ret) 726 if (ret)
711 return ret; 727 return ret;
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
717 switch (val) { 733 switch (val) {
718 case 0: 734 case 0:
719 case 1: 735 case 1:
736 ret = -ENODEV;
720 mutex_lock(&event_mutex); 737 mutex_lock(&event_mutex);
721 ret = ftrace_event_enable_disable(file, val); 738 file = event_file_data(filp);
739 if (likely(file))
740 ret = ftrace_event_enable_disable(file, val);
722 mutex_unlock(&event_mutex); 741 mutex_unlock(&event_mutex);
723 break; 742 break;
724 743
@@ -825,7 +844,7 @@ enum {
825 844
826static void *f_next(struct seq_file *m, void *v, loff_t *pos) 845static void *f_next(struct seq_file *m, void *v, loff_t *pos)
827{ 846{
828 struct ftrace_event_call *call = m->private; 847 struct ftrace_event_call *call = event_file_data(m->private);
829 struct list_head *common_head = &ftrace_common_fields; 848 struct list_head *common_head = &ftrace_common_fields;
830 struct list_head *head = trace_get_fields(call); 849 struct list_head *head = trace_get_fields(call);
831 struct list_head *node = v; 850 struct list_head *node = v;
@@ -857,7 +876,7 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
857 876
858static int f_show(struct seq_file *m, void *v) 877static int f_show(struct seq_file *m, void *v)
859{ 878{
860 struct ftrace_event_call *call = m->private; 879 struct ftrace_event_call *call = event_file_data(m->private);
861 struct ftrace_event_field *field; 880 struct ftrace_event_field *field;
862 const char *array_descriptor; 881 const char *array_descriptor;
863 882
@@ -910,6 +929,11 @@ static void *f_start(struct seq_file *m, loff_t *pos)
910 void *p = (void *)FORMAT_HEADER; 929 void *p = (void *)FORMAT_HEADER;
911 loff_t l = 0; 930 loff_t l = 0;
912 931
932 /* ->stop() is called even if ->start() fails */
933 mutex_lock(&event_mutex);
934 if (!event_file_data(m->private))
935 return ERR_PTR(-ENODEV);
936
913 while (l < *pos && p) 937 while (l < *pos && p)
914 p = f_next(m, p, &l); 938 p = f_next(m, p, &l);
915 939
@@ -918,6 +942,7 @@ static void *f_start(struct seq_file *m, loff_t *pos)
918 942
919static void f_stop(struct seq_file *m, void *p) 943static void f_stop(struct seq_file *m, void *p)
920{ 944{
945 mutex_unlock(&event_mutex);
921} 946}
922 947
923static const struct seq_operations trace_format_seq_ops = { 948static const struct seq_operations trace_format_seq_ops = {
@@ -929,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = {
929 954
930static int trace_format_open(struct inode *inode, struct file *file) 955static int trace_format_open(struct inode *inode, struct file *file)
931{ 956{
932 struct ftrace_event_call *call = inode->i_private;
933 struct seq_file *m; 957 struct seq_file *m;
934 int ret; 958 int ret;
935 959
@@ -938,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
938 return ret; 962 return ret;
939 963
940 m = file->private_data; 964 m = file->private_data;
941 m->private = call; 965 m->private = file;
942 966
943 return 0; 967 return 0;
944} 968}
@@ -946,14 +970,18 @@ static int trace_format_open(struct inode *inode, struct file *file)
946static ssize_t 970static ssize_t
947event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 971event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
948{ 972{
949 struct ftrace_event_call *call = filp->private_data; 973 int id = (long)event_file_data(filp);
950 char buf[32]; 974 char buf[32];
951 int len; 975 int len;
952 976
953 if (*ppos) 977 if (*ppos)
954 return 0; 978 return 0;
955 979
956 len = sprintf(buf, "%d\n", call->event.type); 980 if (unlikely(!id))
981 return -ENODEV;
982
983 len = sprintf(buf, "%d\n", id);
984
957 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 985 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
958} 986}
959 987
@@ -961,21 +989,28 @@ static ssize_t
961event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 989event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
962 loff_t *ppos) 990 loff_t *ppos)
963{ 991{
964 struct ftrace_event_call *call = filp->private_data; 992 struct ftrace_event_call *call;
965 struct trace_seq *s; 993 struct trace_seq *s;
966 int r; 994 int r = -ENODEV;
967 995
968 if (*ppos) 996 if (*ppos)
969 return 0; 997 return 0;
970 998
971 s = kmalloc(sizeof(*s), GFP_KERNEL); 999 s = kmalloc(sizeof(*s), GFP_KERNEL);
1000
972 if (!s) 1001 if (!s)
973 return -ENOMEM; 1002 return -ENOMEM;
974 1003
975 trace_seq_init(s); 1004 trace_seq_init(s);
976 1005
977 print_event_filter(call, s); 1006 mutex_lock(&event_mutex);
978 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1007 call = event_file_data(filp);
1008 if (call)
1009 print_event_filter(call, s);
1010 mutex_unlock(&event_mutex);
1011
1012 if (call)
1013 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
979 1014
980 kfree(s); 1015 kfree(s);
981 1016
@@ -986,9 +1021,9 @@ static ssize_t
986event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1021event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
987 loff_t *ppos) 1022 loff_t *ppos)
988{ 1023{
989 struct ftrace_event_call *call = filp->private_data; 1024 struct ftrace_event_call *call;
990 char *buf; 1025 char *buf;
991 int err; 1026 int err = -ENODEV;
992 1027
993 if (cnt >= PAGE_SIZE) 1028 if (cnt >= PAGE_SIZE)
994 return -EINVAL; 1029 return -EINVAL;
@@ -1003,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1003 } 1038 }
1004 buf[cnt] = '\0'; 1039 buf[cnt] = '\0';
1005 1040
1006 err = apply_event_filter(call, buf); 1041 mutex_lock(&event_mutex);
1042 call = event_file_data(filp);
1043 if (call)
1044 err = apply_event_filter(call, buf);
1045 mutex_unlock(&event_mutex);
1046
1007 free_page((unsigned long) buf); 1047 free_page((unsigned long) buf);
1008 if (err < 0) 1048 if (err < 0)
1009 return err; 1049 return err;
@@ -1225,10 +1265,9 @@ static const struct file_operations ftrace_set_event_fops = {
1225}; 1265};
1226 1266
1227static const struct file_operations ftrace_enable_fops = { 1267static const struct file_operations ftrace_enable_fops = {
1228 .open = tracing_open_generic_file, 1268 .open = tracing_open_generic,
1229 .read = event_enable_read, 1269 .read = event_enable_read,
1230 .write = event_enable_write, 1270 .write = event_enable_write,
1231 .release = tracing_release_generic_file,
1232 .llseek = default_llseek, 1271 .llseek = default_llseek,
1233}; 1272};
1234 1273
@@ -1240,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = {
1240}; 1279};
1241 1280
1242static const struct file_operations ftrace_event_id_fops = { 1281static const struct file_operations ftrace_event_id_fops = {
1243 .open = tracing_open_generic,
1244 .read = event_id_read, 1282 .read = event_id_read,
1245 .llseek = default_llseek, 1283 .llseek = default_llseek,
1246}; 1284};
@@ -1488,8 +1526,8 @@ event_create_dir(struct dentry *parent,
1488 1526
1489#ifdef CONFIG_PERF_EVENTS 1527#ifdef CONFIG_PERF_EVENTS
1490 if (call->event.type && call->class->reg) 1528 if (call->event.type && call->class->reg)
1491 trace_create_file("id", 0444, file->dir, call, 1529 trace_create_file("id", 0444, file->dir,
1492 id); 1530 (void *)(long)call->event.type, id);
1493#endif 1531#endif
1494 1532
1495 /* 1533 /*
@@ -1514,33 +1552,16 @@ event_create_dir(struct dentry *parent,
1514 return 0; 1552 return 0;
1515} 1553}
1516 1554
1517static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1518{
1519 if (!dir)
1520 return;
1521
1522 if (!--dir->nr_events) {
1523 debugfs_remove_recursive(dir->entry);
1524 list_del(&dir->list);
1525 __put_system_dir(dir);
1526 }
1527}
1528
1529static void remove_event_from_tracers(struct ftrace_event_call *call) 1555static void remove_event_from_tracers(struct ftrace_event_call *call)
1530{ 1556{
1531 struct ftrace_event_file *file; 1557 struct ftrace_event_file *file;
1532 struct trace_array *tr; 1558 struct trace_array *tr;
1533 1559
1534 do_for_each_event_file_safe(tr, file) { 1560 do_for_each_event_file_safe(tr, file) {
1535
1536 if (file->event_call != call) 1561 if (file->event_call != call)
1537 continue; 1562 continue;
1538 1563
1539 list_del(&file->list); 1564 remove_event_file_dir(file);
1540 debugfs_remove_recursive(file->dir);
1541 remove_subsystem(file->system);
1542 kmem_cache_free(file_cachep, file);
1543
1544 /* 1565 /*
1545 * The do_for_each_event_file_safe() is 1566 * The do_for_each_event_file_safe() is
1546 * a double loop. After finding the call for this 1567 * a double loop. After finding the call for this
@@ -1692,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
1692 destroy_preds(call); 1713 destroy_preds(call);
1693} 1714}
1694 1715
1716static int probe_remove_event_call(struct ftrace_event_call *call)
1717{
1718 struct trace_array *tr;
1719 struct ftrace_event_file *file;
1720
1721#ifdef CONFIG_PERF_EVENTS
1722 if (call->perf_refcount)
1723 return -EBUSY;
1724#endif
1725 do_for_each_event_file(tr, file) {
1726 if (file->event_call != call)
1727 continue;
1728 /*
1729 * We can't rely on ftrace_event_enable_disable(enable => 0)
1730 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1731 * TRACE_REG_UNREGISTER.
1732 */
1733 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1734 return -EBUSY;
1735 /*
1736 * The do_for_each_event_file_safe() is
1737 * a double loop. After finding the call for this
1738 * trace_array, we use break to jump to the next
1739 * trace_array.
1740 */
1741 break;
1742 } while_for_each_event_file();
1743
1744 __trace_remove_event_call(call);
1745
1746 return 0;
1747}
1748
1695/* Remove an event_call */ 1749/* Remove an event_call */
1696void trace_remove_event_call(struct ftrace_event_call *call) 1750int trace_remove_event_call(struct ftrace_event_call *call)
1697{ 1751{
1752 int ret;
1753
1698 mutex_lock(&trace_types_lock); 1754 mutex_lock(&trace_types_lock);
1699 mutex_lock(&event_mutex); 1755 mutex_lock(&event_mutex);
1700 down_write(&trace_event_sem); 1756 down_write(&trace_event_sem);
1701 __trace_remove_event_call(call); 1757 ret = probe_remove_event_call(call);
1702 up_write(&trace_event_sem); 1758 up_write(&trace_event_sem);
1703 mutex_unlock(&event_mutex); 1759 mutex_unlock(&event_mutex);
1704 mutex_unlock(&trace_types_lock); 1760 mutex_unlock(&trace_types_lock);
1761
1762 return ret;
1705} 1763}
1706 1764
1707#define for_each_event(event, start, end) \ 1765#define for_each_event(event, start, end) \
@@ -2270,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
2270{ 2328{
2271 struct ftrace_event_file *file, *next; 2329 struct ftrace_event_file *file, *next;
2272 2330
2273 list_for_each_entry_safe(file, next, &tr->events, list) { 2331 list_for_each_entry_safe(file, next, &tr->events, list)
2274 list_del(&file->list); 2332 remove_event_file_dir(file);
2275 debugfs_remove_recursive(file->dir);
2276 remove_subsystem(file->system);
2277 kmem_cache_free(file_cachep, file);
2278 }
2279} 2333}
2280 2334
2281static void 2335static void
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0c7b75a8acc8..97daa8cf958d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps,
637 free_page((unsigned long) buf); 637 free_page((unsigned long) buf);
638} 638}
639 639
640/* caller must hold event_mutex */
640void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) 641void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
641{ 642{
642 struct event_filter *filter; 643 struct event_filter *filter = call->filter;
643 644
644 mutex_lock(&event_mutex);
645 filter = call->filter;
646 if (filter && filter->filter_string) 645 if (filter && filter->filter_string)
647 trace_seq_printf(s, "%s\n", filter->filter_string); 646 trace_seq_printf(s, "%s\n", filter->filter_string);
648 else 647 else
649 trace_seq_puts(s, "none\n"); 648 trace_seq_puts(s, "none\n");
650 mutex_unlock(&event_mutex);
651} 649}
652 650
653void print_subsystem_event_filter(struct event_subsystem *system, 651void print_subsystem_event_filter(struct event_subsystem *system,
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system,
1841 return err; 1839 return err;
1842} 1840}
1843 1841
1842/* caller must hold event_mutex */
1844int apply_event_filter(struct ftrace_event_call *call, char *filter_string) 1843int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1845{ 1844{
1846 struct event_filter *filter; 1845 struct event_filter *filter;
1847 int err = 0; 1846 int err;
1848
1849 mutex_lock(&event_mutex);
1850 1847
1851 if (!strcmp(strstrip(filter_string), "0")) { 1848 if (!strcmp(strstrip(filter_string), "0")) {
1852 filter_disable(call); 1849 filter_disable(call);
1853 filter = call->filter; 1850 filter = call->filter;
1854 if (!filter) 1851 if (!filter)
1855 goto out_unlock; 1852 return 0;
1856 RCU_INIT_POINTER(call->filter, NULL); 1853 RCU_INIT_POINTER(call->filter, NULL);
1857 /* Make sure the filter is not being used */ 1854 /* Make sure the filter is not being used */
1858 synchronize_sched(); 1855 synchronize_sched();
1859 __free_filter(filter); 1856 __free_filter(filter);
1860 goto out_unlock; 1857 return 0;
1861 } 1858 }
1862 1859
1863 err = create_filter(call, filter_string, true, &filter); 1860 err = create_filter(call, filter_string, true, &filter);
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1884 __free_filter(tmp); 1881 __free_filter(tmp);
1885 } 1882 }
1886 } 1883 }
1887out_unlock:
1888 mutex_unlock(&event_mutex);
1889 1884
1890 return err; 1885 return err;
1891} 1886}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3811487e7a7a..243f6834d026 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
95} 95}
96 96
97static int register_probe_event(struct trace_probe *tp); 97static int register_probe_event(struct trace_probe *tp);
98static void unregister_probe_event(struct trace_probe *tp); 98static int unregister_probe_event(struct trace_probe *tp);
99 99
100static DEFINE_MUTEX(probe_lock); 100static DEFINE_MUTEX(probe_lock);
101static LIST_HEAD(probe_list); 101static LIST_HEAD(probe_list);
@@ -351,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
351 if (trace_probe_is_enabled(tp)) 351 if (trace_probe_is_enabled(tp))
352 return -EBUSY; 352 return -EBUSY;
353 353
354 /* Will fail if probe is being used by ftrace or perf */
355 if (unregister_probe_event(tp))
356 return -EBUSY;
357
354 __unregister_trace_probe(tp); 358 __unregister_trace_probe(tp);
355 list_del(&tp->list); 359 list_del(&tp->list);
356 unregister_probe_event(tp);
357 360
358 return 0; 361 return 0;
359} 362}
@@ -632,7 +635,9 @@ static int release_all_trace_probes(void)
632 /* TODO: Use batch unregistration */ 635 /* TODO: Use batch unregistration */
633 while (!list_empty(&probe_list)) { 636 while (!list_empty(&probe_list)) {
634 tp = list_entry(probe_list.next, struct trace_probe, list); 637 tp = list_entry(probe_list.next, struct trace_probe, list);
635 unregister_trace_probe(tp); 638 ret = unregister_trace_probe(tp);
639 if (ret)
640 goto end;
636 free_trace_probe(tp); 641 free_trace_probe(tp);
637 } 642 }
638 643
@@ -1247,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
1247 return ret; 1252 return ret;
1248} 1253}
1249 1254
1250static void unregister_probe_event(struct trace_probe *tp) 1255static int unregister_probe_event(struct trace_probe *tp)
1251{ 1256{
1257 int ret;
1258
1252 /* tp->event is unregistered in trace_remove_event_call() */ 1259 /* tp->event is unregistered in trace_remove_event_call() */
1253 trace_remove_event_call(&tp->call); 1260 ret = trace_remove_event_call(&tp->call);
1254 kfree(tp->call.print_fmt); 1261 if (!ret)
1262 kfree(tp->call.print_fmt);
1263 return ret;
1255} 1264}
1256 1265
1257/* Make a debugfs interface for controlling probe points */ 1266/* Make a debugfs interface for controlling probe points */
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index a23d2d71188e..272261b5f94f 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -70,7 +70,7 @@ struct trace_uprobe {
70 (sizeof(struct probe_arg) * (n))) 70 (sizeof(struct probe_arg) * (n)))
71 71
72static int register_uprobe_event(struct trace_uprobe *tu); 72static int register_uprobe_event(struct trace_uprobe *tu);
73static void unregister_uprobe_event(struct trace_uprobe *tu); 73static int unregister_uprobe_event(struct trace_uprobe *tu);
74 74
75static DEFINE_MUTEX(uprobe_lock); 75static DEFINE_MUTEX(uprobe_lock);
76static LIST_HEAD(uprobe_list); 76static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
164} 164}
165 165
166/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ 166/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
167static void unregister_trace_uprobe(struct trace_uprobe *tu) 167static int unregister_trace_uprobe(struct trace_uprobe *tu)
168{ 168{
169 int ret;
170
171 ret = unregister_uprobe_event(tu);
172 if (ret)
173 return ret;
174
169 list_del(&tu->list); 175 list_del(&tu->list);
170 unregister_uprobe_event(tu);
171 free_trace_uprobe(tu); 176 free_trace_uprobe(tu);
177 return 0;
172} 178}
173 179
174/* Register a trace_uprobe and probe_event */ 180/* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
181 187
182 /* register as an event */ 188 /* register as an event */
183 old_tp = find_probe_event(tu->call.name, tu->call.class->system); 189 old_tp = find_probe_event(tu->call.name, tu->call.class->system);
184 if (old_tp) 190 if (old_tp) {
185 /* delete old event */ 191 /* delete old event */
186 unregister_trace_uprobe(old_tp); 192 ret = unregister_trace_uprobe(old_tp);
193 if (ret)
194 goto end;
195 }
187 196
188 ret = register_uprobe_event(tu); 197 ret = register_uprobe_event(tu);
189 if (ret) { 198 if (ret) {
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
256 group = UPROBE_EVENT_SYSTEM; 265 group = UPROBE_EVENT_SYSTEM;
257 266
258 if (is_delete) { 267 if (is_delete) {
268 int ret;
269
259 if (!event) { 270 if (!event) {
260 pr_info("Delete command needs an event name.\n"); 271 pr_info("Delete command needs an event name.\n");
261 return -EINVAL; 272 return -EINVAL;
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
269 return -ENOENT; 280 return -ENOENT;
270 } 281 }
271 /* delete an event */ 282 /* delete an event */
272 unregister_trace_uprobe(tu); 283 ret = unregister_trace_uprobe(tu);
273 mutex_unlock(&uprobe_lock); 284 mutex_unlock(&uprobe_lock);
274 return 0; 285 return ret;
275 } 286 }
276 287
277 if (argc < 2) { 288 if (argc < 2) {
@@ -408,16 +419,20 @@ fail_address_parse:
408 return ret; 419 return ret;
409} 420}
410 421
411static void cleanup_all_probes(void) 422static int cleanup_all_probes(void)
412{ 423{
413 struct trace_uprobe *tu; 424 struct trace_uprobe *tu;
425 int ret = 0;
414 426
415 mutex_lock(&uprobe_lock); 427 mutex_lock(&uprobe_lock);
416 while (!list_empty(&uprobe_list)) { 428 while (!list_empty(&uprobe_list)) {
417 tu = list_entry(uprobe_list.next, struct trace_uprobe, list); 429 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
418 unregister_trace_uprobe(tu); 430 ret = unregister_trace_uprobe(tu);
431 if (ret)
432 break;
419 } 433 }
420 mutex_unlock(&uprobe_lock); 434 mutex_unlock(&uprobe_lock);
435 return ret;
421} 436}
422 437
423/* Probes listing interfaces */ 438/* Probes listing interfaces */
@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
462 477
463static int probes_open(struct inode *inode, struct file *file) 478static int probes_open(struct inode *inode, struct file *file)
464{ 479{
465 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 480 int ret;
466 cleanup_all_probes(); 481
482 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
483 ret = cleanup_all_probes();
484 if (ret)
485 return ret;
486 }
467 487
468 return seq_open(file, &probes_seq_op); 488 return seq_open(file, &probes_seq_op);
469} 489}
@@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
968 return ret; 988 return ret;
969} 989}
970 990
971static void unregister_uprobe_event(struct trace_uprobe *tu) 991static int unregister_uprobe_event(struct trace_uprobe *tu)
972{ 992{
993 int ret;
994
973 /* tu->event is unregistered in trace_remove_event_call() */ 995 /* tu->event is unregistered in trace_remove_event_call() */
974 trace_remove_event_call(&tu->call); 996 ret = trace_remove_event_call(&tu->call);
997 if (ret)
998 return ret;
975 kfree(tu->call.print_fmt); 999 kfree(tu->call.print_fmt);
976 tu->call.print_fmt = NULL; 1000 tu->call.print_fmt = NULL;
1001 return 0;
977} 1002}
978 1003
979/* Make a trace interface for controling probe points */ 1004/* Make a trace interface for controling probe points */