diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-06-20 13:38:14 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-07-01 20:34:27 -0400 |
commit | b04d52e368e2cf526abb2bab61f304eaea126af2 (patch) | |
tree | b133eb6dcc68e0b4e39c477aeddad326c0797959 /kernel | |
parent | 3baa5e4cf224b8a55220cc841bb475e164b84ceb (diff) |
tracing/kprobes: Turn trace_probe->files into list_head
I think that "ftrace_event_file *trace_probe[]" complicates the
code for no reason, turn it into list_head to simplify the code.
enable_trace_probe() no longer needs synchronize_sched().
This needs the extra sizeof(list_head) memory for every attached
ftrace_event_file, hopefully not a problem in this case.
Link: http://lkml.kernel.org/r/20130620173814.GA13165@redhat.com
Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace_kprobe.c | 138 |
1 files changed, 37 insertions, 101 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 282f86cfd304..405b5b0f903e 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -35,12 +35,17 @@ struct trace_probe { | |||
35 | const char *symbol; /* symbol name */ | 35 | const char *symbol; /* symbol name */ |
36 | struct ftrace_event_class class; | 36 | struct ftrace_event_class class; |
37 | struct ftrace_event_call call; | 37 | struct ftrace_event_call call; |
38 | struct ftrace_event_file * __rcu *files; | 38 | struct list_head files; |
39 | ssize_t size; /* trace entry size */ | 39 | ssize_t size; /* trace entry size */ |
40 | unsigned int nr_args; | 40 | unsigned int nr_args; |
41 | struct probe_arg args[]; | 41 | struct probe_arg args[]; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | struct event_file_link { | ||
45 | struct ftrace_event_file *file; | ||
46 | struct list_head list; | ||
47 | }; | ||
48 | |||
44 | #define SIZEOF_TRACE_PROBE(n) \ | 49 | #define SIZEOF_TRACE_PROBE(n) \ |
45 | (offsetof(struct trace_probe, args) + \ | 50 | (offsetof(struct trace_probe, args) + \ |
46 | (sizeof(struct probe_arg) * (n))) | 51 | (sizeof(struct probe_arg) * (n))) |
@@ -150,6 +155,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
150 | goto error; | 155 | goto error; |
151 | 156 | ||
152 | INIT_LIST_HEAD(&tp->list); | 157 | INIT_LIST_HEAD(&tp->list); |
158 | INIT_LIST_HEAD(&tp->files); | ||
153 | return tp; | 159 | return tp; |
154 | error: | 160 | error: |
155 | kfree(tp->call.name); | 161 | kfree(tp->call.name); |
@@ -184,22 +190,6 @@ static struct trace_probe *find_trace_probe(const char *event, | |||
184 | } | 190 | } |
185 | 191 | ||
186 | /* | 192 | /* |
187 | * This and enable_trace_probe/disable_trace_probe rely on event_mutex | ||
188 | * held by the caller, __ftrace_set_clr_event(). | ||
189 | */ | ||
190 | static int trace_probe_nr_files(struct trace_probe *tp) | ||
191 | { | ||
192 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
193 | int ret = 0; | ||
194 | |||
195 | if (file) | ||
196 | while (*(file++)) | ||
197 | ret++; | ||
198 | |||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Enable trace_probe | 193 | * Enable trace_probe |
204 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. | 194 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. |
205 | */ | 195 | */ |
@@ -209,29 +199,18 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
209 | int ret = 0; | 199 | int ret = 0; |
210 | 200 | ||
211 | if (file) { | 201 | if (file) { |
212 | struct ftrace_event_file **new, **old; | 202 | struct event_file_link *link; |
213 | int n = trace_probe_nr_files(tp); | 203 | |
214 | 204 | link = kmalloc(sizeof(*link), GFP_KERNEL); | |
215 | old = rcu_dereference_raw(tp->files); | 205 | if (!link) { |
216 | /* 1 is for new one and 1 is for stopper */ | ||
217 | new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), | ||
218 | GFP_KERNEL); | ||
219 | if (!new) { | ||
220 | ret = -ENOMEM; | 206 | ret = -ENOMEM; |
221 | goto out; | 207 | goto out; |
222 | } | 208 | } |
223 | memcpy(new, old, n * sizeof(struct ftrace_event_file *)); | ||
224 | new[n] = file; | ||
225 | /* The last one keeps a NULL */ | ||
226 | 209 | ||
227 | rcu_assign_pointer(tp->files, new); | 210 | link->file = file; |
228 | tp->flags |= TP_FLAG_TRACE; | 211 | list_add_tail_rcu(&link->list, &tp->files); |
229 | 212 | ||
230 | if (old) { | 213 | tp->flags |= TP_FLAG_TRACE; |
231 | /* Make sure the probe is done with old files */ | ||
232 | synchronize_sched(); | ||
233 | kfree(old); | ||
234 | } | ||
235 | } else | 214 | } else |
236 | tp->flags |= TP_FLAG_PROFILE; | 215 | tp->flags |= TP_FLAG_PROFILE; |
237 | 216 | ||
@@ -245,24 +224,16 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
245 | return ret; | 224 | return ret; |
246 | } | 225 | } |
247 | 226 | ||
248 | static int | 227 | static struct event_file_link * |
249 | trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) | 228 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) |
250 | { | 229 | { |
251 | struct ftrace_event_file **files; | 230 | struct event_file_link *link; |
252 | int i; | ||
253 | 231 | ||
254 | /* | 232 | list_for_each_entry(link, &tp->files, list) |
255 | * Since all tp->files updater is protected by probe_enable_lock, | 233 | if (link->file == file) |
256 | * we don't need to lock an rcu_read_lock. | 234 | return link; |
257 | */ | ||
258 | files = rcu_dereference_raw(tp->files); | ||
259 | if (files) { | ||
260 | for (i = 0; files[i]; i++) | ||
261 | if (files[i] == file) | ||
262 | return i; | ||
263 | } | ||
264 | 235 | ||
265 | return -1; | 236 | return NULL; |
266 | } | 237 | } |
267 | 238 | ||
268 | /* | 239 | /* |
@@ -275,38 +246,23 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
275 | int ret = 0; | 246 | int ret = 0; |
276 | 247 | ||
277 | if (file) { | 248 | if (file) { |
278 | struct ftrace_event_file **new, **old; | 249 | struct event_file_link *link; |
279 | int n = trace_probe_nr_files(tp); | ||
280 | int i, j; | ||
281 | 250 | ||
282 | old = rcu_dereference_raw(tp->files); | 251 | link = find_event_file_link(tp, file); |
283 | if (n == 0 || trace_probe_file_index(tp, file) < 0) { | 252 | if (!link) { |
284 | ret = -EINVAL; | 253 | ret = -EINVAL; |
285 | goto out; | 254 | goto out; |
286 | } | 255 | } |
287 | 256 | ||
288 | if (n == 1) { /* Remove the last file */ | 257 | list_del_rcu(&link->list); |
289 | tp->flags &= ~TP_FLAG_TRACE; | 258 | /* synchronize with kprobe_trace_func/kretprobe_trace_func */ |
290 | new = NULL; | 259 | synchronize_sched(); |
291 | } else { | 260 | kfree(link); |
292 | new = kzalloc(n * sizeof(struct ftrace_event_file *), | ||
293 | GFP_KERNEL); | ||
294 | if (!new) { | ||
295 | ret = -ENOMEM; | ||
296 | goto out; | ||
297 | } | ||
298 | |||
299 | /* This copy & check loop copies the NULL stopper too */ | ||
300 | for (i = 0, j = 0; j < n && i < n + 1; i++) | ||
301 | if (old[i] != file) | ||
302 | new[j++] = old[i]; | ||
303 | } | ||
304 | 261 | ||
305 | rcu_assign_pointer(tp->files, new); | 262 | if (!list_empty(&tp->files)) |
263 | goto out; | ||
306 | 264 | ||
307 | /* Make sure the probe is done with old files */ | 265 | tp->flags &= ~TP_FLAG_TRACE; |
308 | synchronize_sched(); | ||
309 | kfree(old); | ||
310 | } else | 266 | } else |
311 | tp->flags &= ~TP_FLAG_PROFILE; | 267 | tp->flags &= ~TP_FLAG_PROFILE; |
312 | 268 | ||
@@ -871,20 +827,10 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
871 | static __kprobes void | 827 | static __kprobes void |
872 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) | 828 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) |
873 | { | 829 | { |
874 | /* | 830 | struct event_file_link *link; |
875 | * Note: preempt is already disabled around the kprobe handler. | ||
876 | * However, we still need an smp_read_barrier_depends() corresponding | ||
877 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
878 | */ | ||
879 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
880 | |||
881 | if (unlikely(!file)) | ||
882 | return; | ||
883 | 831 | ||
884 | while (*file) { | 832 | list_for_each_entry_rcu(link, &tp->files, list) |
885 | __kprobe_trace_func(tp, regs, *file); | 833 | __kprobe_trace_func(tp, regs, link->file); |
886 | file++; | ||
887 | } | ||
888 | } | 834 | } |
889 | 835 | ||
890 | /* Kretprobe handler */ | 836 | /* Kretprobe handler */ |
@@ -931,20 +877,10 @@ static __kprobes void | |||
931 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 877 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, |
932 | struct pt_regs *regs) | 878 | struct pt_regs *regs) |
933 | { | 879 | { |
934 | /* | 880 | struct event_file_link *link; |
935 | * Note: preempt is already disabled around the kprobe handler. | ||
936 | * However, we still need an smp_read_barrier_depends() corresponding | ||
937 | * to smp_wmb() in rcu_assign_pointer() to access the pointer. | ||
938 | */ | ||
939 | struct ftrace_event_file **file = rcu_dereference_raw(tp->files); | ||
940 | |||
941 | if (unlikely(!file)) | ||
942 | return; | ||
943 | 881 | ||
944 | while (*file) { | 882 | list_for_each_entry_rcu(link, &tp->files, list) |
945 | __kretprobe_trace_func(tp, ri, regs, *file); | 883 | __kretprobe_trace_func(tp, ri, regs, link->file); |
946 | file++; | ||
947 | } | ||
948 | } | 884 | } |
949 | 885 | ||
950 | /* Event entry printers */ | 886 | /* Event entry printers */ |