diff options
| author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2013-05-13 07:58:34 -0400 |
|---|---|---|
| committer | Steven Rostedt <rostedt@goodmis.org> | 2013-05-15 13:50:22 -0400 |
| commit | c02c7e65d9b13670e34bc523744cf4f6e99c198a (patch) | |
| tree | 186f0a0e169e83b9c7d899ace2e028b7293459c5 | |
| parent | 60705c89460fdc7227f2d153b68b3f34814738a4 (diff) | |
tracing/kprobes: Use rcu_dereference_raw for tp->files
Use rcu_dereference_raw() for accessing tp->files. Because the
write-side uses rcu_assign_pointer() for memory barrier,
the read-side also has to use rcu_dereference_raw() with
read memory barrier.
Link: http://lkml.kernel.org/r/20130513115834.6545.17022.stgit@mhiramat-M0-7522
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Tom Zanussi <tom.zanussi@intel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 47 |
1 files changed, 37 insertions, 10 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 636d45fe69b3..0a3d8d5c483d 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -185,9 +185,14 @@ static struct trace_probe *find_trace_probe(const char *event, | |||
| 185 | 185 | ||
| 186 | static int trace_probe_nr_files(struct trace_probe *tp) | 186 | static int trace_probe_nr_files(struct trace_probe *tp) |
| 187 | { | 187 | { |
| 188 | struct ftrace_event_file **file = tp->files; | 188 | struct ftrace_event_file **file; |
| 189 | int ret = 0; | 189 | int ret = 0; |
| 190 | 190 | ||
| 191 | /* | ||
| 192 | * Since all tp->files updater is protected by probe_enable_lock, | ||
| 193 | * we don't need to lock an rcu_read_lock. | ||
| 194 | */ | ||
| 195 | file = rcu_dereference_raw(tp->files); | ||
| 191 | if (file) | 196 | if (file) |
| 192 | while (*(file++)) | 197 | while (*(file++)) |
| 193 | ret++; | 198 | ret++; |
| @@ -209,9 +214,10 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 209 | mutex_lock(&probe_enable_lock); | 214 | mutex_lock(&probe_enable_lock); |
| 210 | 215 | ||
| 211 | if (file) { | 216 | if (file) { |
| 212 | struct ftrace_event_file **new, **old = tp->files; | 217 | struct ftrace_event_file **new, **old; |
| 213 | int n = trace_probe_nr_files(tp); | 218 | int n = trace_probe_nr_files(tp); |
| 214 | 219 | ||
| 220 | old = rcu_dereference_raw(tp->files); | ||
| 215 | /* 1 is for new one and 1 is for stopper */ | 221 | /* 1 is for new one and 1 is for stopper */ |
| 216 | new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), | 222 | new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), |
| 217 | GFP_KERNEL); | 223 | GFP_KERNEL); |
| @@ -251,11 +257,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 251 | static int | 257 | static int |
| 252 | trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) | 258 | trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) |
| 253 | { | 259 | { |
| 260 | struct ftrace_event_file **files; | ||
| 254 | int i; | 261 | int i; |
| 255 | 262 | ||
| 256 | if (tp->files) { | 263 | /* |
| 257 | for (i = 0; tp->files[i]; i++) | 264 | * Since all tp->files updater is protected by probe_enable_lock, |
| 258 | if (tp->files[i] == file) | 265 | * we don't need to lock an rcu_read_lock. |
| 266 | */ | ||
| 267 | files = rcu_dereference_raw(tp->files); | ||
| 268 | if (files) { | ||
| 269 | for (i = 0; files[i]; i++) | ||
| 270 | if (files[i] == file) | ||
| 259 | return i; | 271 | return i; |
| 260 | } | 272 | } |
| 261 | 273 | ||
| @@ -274,10 +286,11 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 274 | mutex_lock(&probe_enable_lock); | 286 | mutex_lock(&probe_enable_lock); |
| 275 | 287 | ||
| 276 | if (file) { | 288 | if (file) { |
| 277 | struct ftrace_event_file **new, **old = tp->files; | 289 | struct ftrace_event_file **new, **old; |
| 278 | int n = trace_probe_nr_files(tp); | 290 | int n = trace_probe_nr_files(tp); |
| 279 | int i, j; | 291 | int i, j; |
| 280 | 292 | ||
| 293 | old = rcu_dereference_raw(tp->files); | ||
| 281 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { | 294 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { |
| 282 | ret = -EINVAL; | 295 | ret = -EINVAL; |
| 283 | goto out_unlock; | 296 | goto out_unlock; |
| @@ -872,9 +885,16 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 872 | static __kprobes void | 885 | static __kprobes void |
| 873 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) | 886 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) |
| 874 | { | 887 | { |
| 875 | struct ftrace_event_file **file = tp->files; | 888 | /* |
| 889 | * Note: preempt is already disabled around the kprobe handler. | ||
| 890 | * However, we still need an smp_read_barrier_depends() corresponding | ||
| 891 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
| 892 | */ | ||
| 893 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
| 894 | |||
| 895 | if (unlikely(!file)) | ||
| 896 | return; | ||
| 876 | 897 | ||
| 877 | /* Note: preempt is already disabled around the kprobe handler */ | ||
| 878 | while (*file) { | 898 | while (*file) { |
| 879 | __kprobe_trace_func(tp, regs, *file); | 899 | __kprobe_trace_func(tp, regs, *file); |
| 880 | file++; | 900 | file++; |
| @@ -925,9 +945,16 @@ static __kprobes void | |||
| 925 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 945 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, |
| 926 | struct pt_regs *regs) | 946 | struct pt_regs *regs) |
| 927 | { | 947 | { |
| 928 | struct ftrace_event_file **file = tp->files; | 948 | /* |
| 949 | * Note: preempt is already disabled around the kprobe handler. | ||
| 950 | * However, we still need an smp_read_barrier_depends() corresponding | ||
| 951 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
| 952 | */ | ||
| 953 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
| 954 | |||
| 955 | if (unlikely(!file)) | ||
| 956 | return; | ||
| 929 | 957 | ||
| 930 | /* Note: preempt is already disabled around the kprobe handler */ | ||
| 931 | while (*file) { | 958 | while (*file) { |
| 932 | __kretprobe_trace_func(tp, ri, regs, *file); | 959 | __kretprobe_trace_func(tp, ri, regs, *file); |
| 933 | file++; | 960 | file++; |
