diff options
| -rw-r--r-- | kernel/trace/trace_events_filter.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 53 |
2 files changed, 44 insertions, 13 deletions
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index a6361178de5a..e1b653f7e1ca 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -750,7 +750,11 @@ static int filter_set_pred(struct event_filter *filter, | |||
| 750 | 750 | ||
| 751 | static void __free_preds(struct event_filter *filter) | 751 | static void __free_preds(struct event_filter *filter) |
| 752 | { | 752 | { |
| 753 | int i; | ||
| 754 | |||
| 753 | if (filter->preds) { | 755 | if (filter->preds) { |
| 756 | for (i = 0; i < filter->n_preds; i++) | ||
| 757 | kfree(filter->preds[i].ops); | ||
| 754 | kfree(filter->preds); | 758 | kfree(filter->preds); |
| 755 | filter->preds = NULL; | 759 | filter->preds = NULL; |
| 756 | } | 760 | } |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 636d45fe69b3..9f46e98ba8f2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -35,7 +35,7 @@ struct trace_probe { | |||
| 35 | const char *symbol; /* symbol name */ | 35 | const char *symbol; /* symbol name */ |
| 36 | struct ftrace_event_class class; | 36 | struct ftrace_event_class class; |
| 37 | struct ftrace_event_call call; | 37 | struct ftrace_event_call call; |
| 38 | struct ftrace_event_file **files; | 38 | struct ftrace_event_file * __rcu *files; |
| 39 | ssize_t size; /* trace entry size */ | 39 | ssize_t size; /* trace entry size */ |
| 40 | unsigned int nr_args; | 40 | unsigned int nr_args; |
| 41 | struct probe_arg args[]; | 41 | struct probe_arg args[]; |
| @@ -185,9 +185,14 @@ static struct trace_probe *find_trace_probe(const char *event, | |||
| 185 | 185 | ||
| 186 | static int trace_probe_nr_files(struct trace_probe *tp) | 186 | static int trace_probe_nr_files(struct trace_probe *tp) |
| 187 | { | 187 | { |
| 188 | struct ftrace_event_file **file = tp->files; | 188 | struct ftrace_event_file **file; |
| 189 | int ret = 0; | 189 | int ret = 0; |
| 190 | 190 | ||
| 191 | /* | ||
| 192 | * Since all tp->files updater is protected by probe_enable_lock, | ||
| 193 | * we don't need to lock an rcu_read_lock. | ||
| 194 | */ | ||
| 195 | file = rcu_dereference_raw(tp->files); | ||
| 191 | if (file) | 196 | if (file) |
| 192 | while (*(file++)) | 197 | while (*(file++)) |
| 193 | ret++; | 198 | ret++; |
| @@ -209,9 +214,10 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 209 | mutex_lock(&probe_enable_lock); | 214 | mutex_lock(&probe_enable_lock); |
| 210 | 215 | ||
| 211 | if (file) { | 216 | if (file) { |
| 212 | struct ftrace_event_file **new, **old = tp->files; | 217 | struct ftrace_event_file **new, **old; |
| 213 | int n = trace_probe_nr_files(tp); | 218 | int n = trace_probe_nr_files(tp); |
| 214 | 219 | ||
| 220 | old = rcu_dereference_raw(tp->files); | ||
| 215 | /* 1 is for new one and 1 is for stopper */ | 221 | /* 1 is for new one and 1 is for stopper */ |
| 216 | new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), | 222 | new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), |
| 217 | GFP_KERNEL); | 223 | GFP_KERNEL); |
| @@ -251,11 +257,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 251 | static int | 257 | static int |
| 252 | trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) | 258 | trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) |
| 253 | { | 259 | { |
| 260 | struct ftrace_event_file **files; | ||
| 254 | int i; | 261 | int i; |
| 255 | 262 | ||
| 256 | if (tp->files) { | 263 | /* |
| 257 | for (i = 0; tp->files[i]; i++) | 264 | * Since all tp->files updater is protected by probe_enable_lock, |
| 258 | if (tp->files[i] == file) | 265 | * we don't need to lock an rcu_read_lock. |
| 266 | */ | ||
| 267 | files = rcu_dereference_raw(tp->files); | ||
| 268 | if (files) { | ||
| 269 | for (i = 0; files[i]; i++) | ||
| 270 | if (files[i] == file) | ||
| 259 | return i; | 271 | return i; |
| 260 | } | 272 | } |
| 261 | 273 | ||
| @@ -274,10 +286,11 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 274 | mutex_lock(&probe_enable_lock); | 286 | mutex_lock(&probe_enable_lock); |
| 275 | 287 | ||
| 276 | if (file) { | 288 | if (file) { |
| 277 | struct ftrace_event_file **new, **old = tp->files; | 289 | struct ftrace_event_file **new, **old; |
| 278 | int n = trace_probe_nr_files(tp); | 290 | int n = trace_probe_nr_files(tp); |
| 279 | int i, j; | 291 | int i, j; |
| 280 | 292 | ||
| 293 | old = rcu_dereference_raw(tp->files); | ||
| 281 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { | 294 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { |
| 282 | ret = -EINVAL; | 295 | ret = -EINVAL; |
| 283 | goto out_unlock; | 296 | goto out_unlock; |
| @@ -872,9 +885,16 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 872 | static __kprobes void | 885 | static __kprobes void |
| 873 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) | 886 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) |
| 874 | { | 887 | { |
| 875 | struct ftrace_event_file **file = tp->files; | 888 | /* |
| 889 | * Note: preempt is already disabled around the kprobe handler. | ||
| 890 | * However, we still need an smp_read_barrier_depends() corresponding | ||
| 891 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
| 892 | */ | ||
| 893 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
| 894 | |||
| 895 | if (unlikely(!file)) | ||
| 896 | return; | ||
| 876 | 897 | ||
| 877 | /* Note: preempt is already disabled around the kprobe handler */ | ||
| 878 | while (*file) { | 898 | while (*file) { |
| 879 | __kprobe_trace_func(tp, regs, *file); | 899 | __kprobe_trace_func(tp, regs, *file); |
| 880 | file++; | 900 | file++; |
| @@ -925,9 +945,16 @@ static __kprobes void | |||
| 925 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 945 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, |
| 926 | struct pt_regs *regs) | 946 | struct pt_regs *regs) |
| 927 | { | 947 | { |
| 928 | struct ftrace_event_file **file = tp->files; | 948 | /* |
| 949 | * Note: preempt is already disabled around the kprobe handler. | ||
| 950 | * However, we still need an smp_read_barrier_depends() corresponding | ||
| 951 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
| 952 | */ | ||
| 953 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
| 954 | |||
| 955 | if (unlikely(!file)) | ||
| 956 | return; | ||
| 929 | 957 | ||
| 930 | /* Note: preempt is already disabled around the kprobe handler */ | ||
| 931 | while (*file) { | 958 | while (*file) { |
| 932 | __kretprobe_trace_func(tp, ri, regs, *file); | 959 | __kretprobe_trace_func(tp, ri, regs, *file); |
| 933 | file++; | 960 | file++; |
| @@ -935,7 +962,7 @@ kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 935 | } | 962 | } |
| 936 | 963 | ||
| 937 | /* Event entry printers */ | 964 | /* Event entry printers */ |
| 938 | enum print_line_t | 965 | static enum print_line_t |
| 939 | print_kprobe_event(struct trace_iterator *iter, int flags, | 966 | print_kprobe_event(struct trace_iterator *iter, int flags, |
| 940 | struct trace_event *event) | 967 | struct trace_event *event) |
| 941 | { | 968 | { |
| @@ -971,7 +998,7 @@ partial: | |||
| 971 | return TRACE_TYPE_PARTIAL_LINE; | 998 | return TRACE_TYPE_PARTIAL_LINE; |
| 972 | } | 999 | } |
| 973 | 1000 | ||
| 974 | enum print_line_t | 1001 | static enum print_line_t |
| 975 | print_kretprobe_event(struct trace_iterator *iter, int flags, | 1002 | print_kretprobe_event(struct trace_iterator *iter, int flags, |
| 976 | struct trace_event *event) | 1003 | struct trace_event *event) |
| 977 | { | 1004 | { |
