diff options
| author | Namhyung Kim <namhyung.kim@lge.com> | 2013-07-03 00:50:51 -0400 |
|---|---|---|
| committer | Steven Rostedt <rostedt@goodmis.org> | 2014-01-02 16:17:29 -0500 |
| commit | c31ffb3ff633109e8b7b438a9e1815b919f5e32d (patch) | |
| tree | 77536f8c866a626b4e7992151058068dd8bda5d8 /kernel/trace | |
| parent | 50eb2672ce13d73e96f6cee84e78cfb52513ff48 (diff) | |
tracing/kprobes: Factor out struct trace_probe
There are functions that can be shared to both of kprobes and uprobes.
Separate common data structure to struct trace_probe and use it from
the shared functions.
Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 560 | ||||
| -rw-r--r-- | kernel/trace/trace_probe.h | 20 |
2 files changed, 295 insertions, 285 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index dae9541ada9e..727190698727 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -27,18 +27,12 @@ | |||
| 27 | /** | 27 | /** |
| 28 | * Kprobe event core functions | 28 | * Kprobe event core functions |
| 29 | */ | 29 | */ |
| 30 | struct trace_probe { | 30 | struct trace_kprobe { |
| 31 | struct list_head list; | 31 | struct list_head list; |
| 32 | struct kretprobe rp; /* Use rp.kp for kprobe use */ | 32 | struct kretprobe rp; /* Use rp.kp for kprobe use */ |
| 33 | unsigned long nhit; | 33 | unsigned long nhit; |
| 34 | unsigned int flags; /* For TP_FLAG_* */ | ||
| 35 | const char *symbol; /* symbol name */ | 34 | const char *symbol; /* symbol name */ |
| 36 | struct ftrace_event_class class; | 35 | struct trace_probe tp; |
| 37 | struct ftrace_event_call call; | ||
| 38 | struct list_head files; | ||
| 39 | ssize_t size; /* trace entry size */ | ||
| 40 | unsigned int nr_args; | ||
| 41 | struct probe_arg args[]; | ||
| 42 | }; | 36 | }; |
| 43 | 37 | ||
| 44 | struct event_file_link { | 38 | struct event_file_link { |
| @@ -46,56 +40,46 @@ struct event_file_link { | |||
| 46 | struct list_head list; | 40 | struct list_head list; |
| 47 | }; | 41 | }; |
| 48 | 42 | ||
| 49 | #define SIZEOF_TRACE_PROBE(n) \ | 43 | #define SIZEOF_TRACE_KPROBE(n) \ |
| 50 | (offsetof(struct trace_probe, args) + \ | 44 | (offsetof(struct trace_kprobe, tp.args) + \ |
| 51 | (sizeof(struct probe_arg) * (n))) | 45 | (sizeof(struct probe_arg) * (n))) |
| 52 | 46 | ||
| 53 | 47 | ||
| 54 | static __kprobes bool trace_probe_is_return(struct trace_probe *tp) | 48 | static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk) |
| 55 | { | 49 | { |
| 56 | return tp->rp.handler != NULL; | 50 | return tk->rp.handler != NULL; |
| 57 | } | 51 | } |
| 58 | 52 | ||
| 59 | static __kprobes const char *trace_probe_symbol(struct trace_probe *tp) | 53 | static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk) |
| 60 | { | 54 | { |
| 61 | return tp->symbol ? tp->symbol : "unknown"; | 55 | return tk->symbol ? tk->symbol : "unknown"; |
| 62 | } | 56 | } |
| 63 | 57 | ||
| 64 | static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp) | 58 | static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk) |
| 65 | { | 59 | { |
| 66 | return tp->rp.kp.offset; | 60 | return tk->rp.kp.offset; |
| 67 | } | 61 | } |
| 68 | 62 | ||
| 69 | static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp) | 63 | static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk) |
| 70 | { | 64 | { |
| 71 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | 65 | return !!(kprobe_gone(&tk->rp.kp)); |
| 72 | } | 66 | } |
| 73 | 67 | ||
| 74 | static __kprobes bool trace_probe_is_registered(struct trace_probe *tp) | 68 | static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk, |
| 75 | { | 69 | struct module *mod) |
| 76 | return !!(tp->flags & TP_FLAG_REGISTERED); | ||
| 77 | } | ||
| 78 | |||
| 79 | static __kprobes bool trace_probe_has_gone(struct trace_probe *tp) | ||
| 80 | { | ||
| 81 | return !!(kprobe_gone(&tp->rp.kp)); | ||
| 82 | } | ||
| 83 | |||
| 84 | static __kprobes bool trace_probe_within_module(struct trace_probe *tp, | ||
| 85 | struct module *mod) | ||
| 86 | { | 70 | { |
| 87 | int len = strlen(mod->name); | 71 | int len = strlen(mod->name); |
| 88 | const char *name = trace_probe_symbol(tp); | 72 | const char *name = trace_kprobe_symbol(tk); |
| 89 | return strncmp(mod->name, name, len) == 0 && name[len] == ':'; | 73 | return strncmp(mod->name, name, len) == 0 && name[len] == ':'; |
| 90 | } | 74 | } |
| 91 | 75 | ||
| 92 | static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) | 76 | static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk) |
| 93 | { | 77 | { |
| 94 | return !!strchr(trace_probe_symbol(tp), ':'); | 78 | return !!strchr(trace_kprobe_symbol(tk), ':'); |
| 95 | } | 79 | } |
| 96 | 80 | ||
| 97 | static int register_probe_event(struct trace_probe *tp); | 81 | static int register_kprobe_event(struct trace_kprobe *tk); |
| 98 | static int unregister_probe_event(struct trace_probe *tp); | 82 | static int unregister_kprobe_event(struct trace_kprobe *tk); |
| 99 | 83 | ||
| 100 | static DEFINE_MUTEX(probe_lock); | 84 | static DEFINE_MUTEX(probe_lock); |
| 101 | static LIST_HEAD(probe_list); | 85 | static LIST_HEAD(probe_list); |
| @@ -107,42 +91,42 @@ static int kretprobe_dispatcher(struct kretprobe_instance *ri, | |||
| 107 | /* | 91 | /* |
| 108 | * Allocate new trace_probe and initialize it (including kprobes). | 92 | * Allocate new trace_probe and initialize it (including kprobes). |
| 109 | */ | 93 | */ |
| 110 | static struct trace_probe *alloc_trace_probe(const char *group, | 94 | static struct trace_kprobe *alloc_trace_kprobe(const char *group, |
| 111 | const char *event, | 95 | const char *event, |
| 112 | void *addr, | 96 | void *addr, |
| 113 | const char *symbol, | 97 | const char *symbol, |
| 114 | unsigned long offs, | 98 | unsigned long offs, |
| 115 | int nargs, bool is_return) | 99 | int nargs, bool is_return) |
| 116 | { | 100 | { |
| 117 | struct trace_probe *tp; | 101 | struct trace_kprobe *tk; |
| 118 | int ret = -ENOMEM; | 102 | int ret = -ENOMEM; |
| 119 | 103 | ||
| 120 | tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); | 104 | tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); |
| 121 | if (!tp) | 105 | if (!tk) |
| 122 | return ERR_PTR(ret); | 106 | return ERR_PTR(ret); |
| 123 | 107 | ||
| 124 | if (symbol) { | 108 | if (symbol) { |
| 125 | tp->symbol = kstrdup(symbol, GFP_KERNEL); | 109 | tk->symbol = kstrdup(symbol, GFP_KERNEL); |
| 126 | if (!tp->symbol) | 110 | if (!tk->symbol) |
| 127 | goto error; | 111 | goto error; |
| 128 | tp->rp.kp.symbol_name = tp->symbol; | 112 | tk->rp.kp.symbol_name = tk->symbol; |
| 129 | tp->rp.kp.offset = offs; | 113 | tk->rp.kp.offset = offs; |
| 130 | } else | 114 | } else |
| 131 | tp->rp.kp.addr = addr; | 115 | tk->rp.kp.addr = addr; |
| 132 | 116 | ||
| 133 | if (is_return) | 117 | if (is_return) |
| 134 | tp->rp.handler = kretprobe_dispatcher; | 118 | tk->rp.handler = kretprobe_dispatcher; |
| 135 | else | 119 | else |
| 136 | tp->rp.kp.pre_handler = kprobe_dispatcher; | 120 | tk->rp.kp.pre_handler = kprobe_dispatcher; |
| 137 | 121 | ||
| 138 | if (!event || !is_good_name(event)) { | 122 | if (!event || !is_good_name(event)) { |
| 139 | ret = -EINVAL; | 123 | ret = -EINVAL; |
| 140 | goto error; | 124 | goto error; |
| 141 | } | 125 | } |
| 142 | 126 | ||
| 143 | tp->call.class = &tp->class; | 127 | tk->tp.call.class = &tk->tp.class; |
| 144 | tp->call.name = kstrdup(event, GFP_KERNEL); | 128 | tk->tp.call.name = kstrdup(event, GFP_KERNEL); |
| 145 | if (!tp->call.name) | 129 | if (!tk->tp.call.name) |
| 146 | goto error; | 130 | goto error; |
| 147 | 131 | ||
| 148 | if (!group || !is_good_name(group)) { | 132 | if (!group || !is_good_name(group)) { |
| @@ -150,42 +134,42 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
| 150 | goto error; | 134 | goto error; |
| 151 | } | 135 | } |
| 152 | 136 | ||
| 153 | tp->class.system = kstrdup(group, GFP_KERNEL); | 137 | tk->tp.class.system = kstrdup(group, GFP_KERNEL); |
| 154 | if (!tp->class.system) | 138 | if (!tk->tp.class.system) |
| 155 | goto error; | 139 | goto error; |
| 156 | 140 | ||
| 157 | INIT_LIST_HEAD(&tp->list); | 141 | INIT_LIST_HEAD(&tk->list); |
| 158 | INIT_LIST_HEAD(&tp->files); | 142 | INIT_LIST_HEAD(&tk->tp.files); |
| 159 | return tp; | 143 | return tk; |
| 160 | error: | 144 | error: |
| 161 | kfree(tp->call.name); | 145 | kfree(tk->tp.call.name); |
| 162 | kfree(tp->symbol); | 146 | kfree(tk->symbol); |
| 163 | kfree(tp); | 147 | kfree(tk); |
| 164 | return ERR_PTR(ret); | 148 | return ERR_PTR(ret); |
| 165 | } | 149 | } |
| 166 | 150 | ||
| 167 | static void free_trace_probe(struct trace_probe *tp) | 151 | static void free_trace_kprobe(struct trace_kprobe *tk) |
| 168 | { | 152 | { |
| 169 | int i; | 153 | int i; |
| 170 | 154 | ||
| 171 | for (i = 0; i < tp->nr_args; i++) | 155 | for (i = 0; i < tk->tp.nr_args; i++) |
| 172 | traceprobe_free_probe_arg(&tp->args[i]); | 156 | traceprobe_free_probe_arg(&tk->tp.args[i]); |
| 173 | 157 | ||
| 174 | kfree(tp->call.class->system); | 158 | kfree(tk->tp.call.class->system); |
| 175 | kfree(tp->call.name); | 159 | kfree(tk->tp.call.name); |
| 176 | kfree(tp->symbol); | 160 | kfree(tk->symbol); |
| 177 | kfree(tp); | 161 | kfree(tk); |
| 178 | } | 162 | } |
| 179 | 163 | ||
| 180 | static struct trace_probe *find_trace_probe(const char *event, | 164 | static struct trace_kprobe *find_trace_kprobe(const char *event, |
| 181 | const char *group) | 165 | const char *group) |
| 182 | { | 166 | { |
| 183 | struct trace_probe *tp; | 167 | struct trace_kprobe *tk; |
| 184 | 168 | ||
| 185 | list_for_each_entry(tp, &probe_list, list) | 169 | list_for_each_entry(tk, &probe_list, list) |
| 186 | if (strcmp(tp->call.name, event) == 0 && | 170 | if (strcmp(tk->tp.call.name, event) == 0 && |
| 187 | strcmp(tp->call.class->system, group) == 0) | 171 | strcmp(tk->tp.call.class->system, group) == 0) |
| 188 | return tp; | 172 | return tk; |
| 189 | return NULL; | 173 | return NULL; |
| 190 | } | 174 | } |
| 191 | 175 | ||
| @@ -194,7 +178,7 @@ static struct trace_probe *find_trace_probe(const char *event, | |||
| 194 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. | 178 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. |
| 195 | */ | 179 | */ |
| 196 | static int | 180 | static int |
| 197 | enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | 181 | enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) |
| 198 | { | 182 | { |
| 199 | int ret = 0; | 183 | int ret = 0; |
| 200 | 184 | ||
| @@ -208,17 +192,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 208 | } | 192 | } |
| 209 | 193 | ||
| 210 | link->file = file; | 194 | link->file = file; |
| 211 | list_add_tail_rcu(&link->list, &tp->files); | 195 | list_add_tail_rcu(&link->list, &tk->tp.files); |
| 212 | 196 | ||
| 213 | tp->flags |= TP_FLAG_TRACE; | 197 | tk->tp.flags |= TP_FLAG_TRACE; |
| 214 | } else | 198 | } else |
| 215 | tp->flags |= TP_FLAG_PROFILE; | 199 | tk->tp.flags |= TP_FLAG_PROFILE; |
| 216 | 200 | ||
| 217 | if (trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) { | 201 | if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { |
| 218 | if (trace_probe_is_return(tp)) | 202 | if (trace_kprobe_is_return(tk)) |
| 219 | ret = enable_kretprobe(&tp->rp); | 203 | ret = enable_kretprobe(&tk->rp); |
| 220 | else | 204 | else |
| 221 | ret = enable_kprobe(&tp->rp.kp); | 205 | ret = enable_kprobe(&tk->rp.kp); |
| 222 | } | 206 | } |
| 223 | out: | 207 | out: |
| 224 | return ret; | 208 | return ret; |
| @@ -241,14 +225,14 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 241 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. | 225 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
| 242 | */ | 226 | */ |
| 243 | static int | 227 | static int |
| 244 | disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | 228 | disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) |
| 245 | { | 229 | { |
| 246 | struct event_file_link *link = NULL; | 230 | struct event_file_link *link = NULL; |
| 247 | int wait = 0; | 231 | int wait = 0; |
| 248 | int ret = 0; | 232 | int ret = 0; |
| 249 | 233 | ||
| 250 | if (file) { | 234 | if (file) { |
| 251 | link = find_event_file_link(tp, file); | 235 | link = find_event_file_link(&tk->tp, file); |
| 252 | if (!link) { | 236 | if (!link) { |
| 253 | ret = -EINVAL; | 237 | ret = -EINVAL; |
| 254 | goto out; | 238 | goto out; |
| @@ -256,18 +240,18 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 256 | 240 | ||
| 257 | list_del_rcu(&link->list); | 241 | list_del_rcu(&link->list); |
| 258 | wait = 1; | 242 | wait = 1; |
| 259 | if (!list_empty(&tp->files)) | 243 | if (!list_empty(&tk->tp.files)) |
| 260 | goto out; | 244 | goto out; |
| 261 | 245 | ||
| 262 | tp->flags &= ~TP_FLAG_TRACE; | 246 | tk->tp.flags &= ~TP_FLAG_TRACE; |
| 263 | } else | 247 | } else |
| 264 | tp->flags &= ~TP_FLAG_PROFILE; | 248 | tk->tp.flags &= ~TP_FLAG_PROFILE; |
| 265 | 249 | ||
| 266 | if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { | 250 | if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) { |
| 267 | if (trace_probe_is_return(tp)) | 251 | if (trace_kprobe_is_return(tk)) |
| 268 | disable_kretprobe(&tp->rp); | 252 | disable_kretprobe(&tk->rp); |
| 269 | else | 253 | else |
| 270 | disable_kprobe(&tp->rp.kp); | 254 | disable_kprobe(&tk->rp.kp); |
| 271 | wait = 1; | 255 | wait = 1; |
| 272 | } | 256 | } |
| 273 | out: | 257 | out: |
| @@ -288,40 +272,40 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 288 | } | 272 | } |
| 289 | 273 | ||
| 290 | /* Internal register function - just handle k*probes and flags */ | 274 | /* Internal register function - just handle k*probes and flags */ |
| 291 | static int __register_trace_probe(struct trace_probe *tp) | 275 | static int __register_trace_kprobe(struct trace_kprobe *tk) |
| 292 | { | 276 | { |
| 293 | int i, ret; | 277 | int i, ret; |
| 294 | 278 | ||
| 295 | if (trace_probe_is_registered(tp)) | 279 | if (trace_probe_is_registered(&tk->tp)) |
| 296 | return -EINVAL; | 280 | return -EINVAL; |
| 297 | 281 | ||
| 298 | for (i = 0; i < tp->nr_args; i++) | 282 | for (i = 0; i < tk->tp.nr_args; i++) |
| 299 | traceprobe_update_arg(&tp->args[i]); | 283 | traceprobe_update_arg(&tk->tp.args[i]); |
| 300 | 284 | ||
| 301 | /* Set/clear disabled flag according to tp->flag */ | 285 | /* Set/clear disabled flag according to tp->flag */ |
| 302 | if (trace_probe_is_enabled(tp)) | 286 | if (trace_probe_is_enabled(&tk->tp)) |
| 303 | tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; | 287 | tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; |
| 304 | else | 288 | else |
| 305 | tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; | 289 | tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; |
| 306 | 290 | ||
| 307 | if (trace_probe_is_return(tp)) | 291 | if (trace_kprobe_is_return(tk)) |
| 308 | ret = register_kretprobe(&tp->rp); | 292 | ret = register_kretprobe(&tk->rp); |
| 309 | else | 293 | else |
| 310 | ret = register_kprobe(&tp->rp.kp); | 294 | ret = register_kprobe(&tk->rp.kp); |
| 311 | 295 | ||
| 312 | if (ret == 0) | 296 | if (ret == 0) |
| 313 | tp->flags |= TP_FLAG_REGISTERED; | 297 | tk->tp.flags |= TP_FLAG_REGISTERED; |
| 314 | else { | 298 | else { |
| 315 | pr_warning("Could not insert probe at %s+%lu: %d\n", | 299 | pr_warning("Could not insert probe at %s+%lu: %d\n", |
| 316 | trace_probe_symbol(tp), trace_probe_offset(tp), ret); | 300 | trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); |
| 317 | if (ret == -ENOENT && trace_probe_is_on_module(tp)) { | 301 | if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { |
| 318 | pr_warning("This probe might be able to register after" | 302 | pr_warning("This probe might be able to register after" |
| 319 | "target module is loaded. Continue.\n"); | 303 | "target module is loaded. Continue.\n"); |
| 320 | ret = 0; | 304 | ret = 0; |
| 321 | } else if (ret == -EILSEQ) { | 305 | } else if (ret == -EILSEQ) { |
| 322 | pr_warning("Probing address(0x%p) is not an " | 306 | pr_warning("Probing address(0x%p) is not an " |
| 323 | "instruction boundary.\n", | 307 | "instruction boundary.\n", |
| 324 | tp->rp.kp.addr); | 308 | tk->rp.kp.addr); |
| 325 | ret = -EINVAL; | 309 | ret = -EINVAL; |
| 326 | } | 310 | } |
| 327 | } | 311 | } |
| @@ -330,67 +314,67 @@ static int __register_trace_probe(struct trace_probe *tp) | |||
| 330 | } | 314 | } |
| 331 | 315 | ||
| 332 | /* Internal unregister function - just handle k*probes and flags */ | 316 | /* Internal unregister function - just handle k*probes and flags */ |
| 333 | static void __unregister_trace_probe(struct trace_probe *tp) | 317 | static void __unregister_trace_kprobe(struct trace_kprobe *tk) |
| 334 | { | 318 | { |
| 335 | if (trace_probe_is_registered(tp)) { | 319 | if (trace_probe_is_registered(&tk->tp)) { |
| 336 | if (trace_probe_is_return(tp)) | 320 | if (trace_kprobe_is_return(tk)) |
| 337 | unregister_kretprobe(&tp->rp); | 321 | unregister_kretprobe(&tk->rp); |
| 338 | else | 322 | else |
| 339 | unregister_kprobe(&tp->rp.kp); | 323 | unregister_kprobe(&tk->rp.kp); |
| 340 | tp->flags &= ~TP_FLAG_REGISTERED; | 324 | tk->tp.flags &= ~TP_FLAG_REGISTERED; |
| 341 | /* Cleanup kprobe for reuse */ | 325 | /* Cleanup kprobe for reuse */ |
| 342 | if (tp->rp.kp.symbol_name) | 326 | if (tk->rp.kp.symbol_name) |
| 343 | tp->rp.kp.addr = NULL; | 327 | tk->rp.kp.addr = NULL; |
| 344 | } | 328 | } |
| 345 | } | 329 | } |
| 346 | 330 | ||
| 347 | /* Unregister a trace_probe and probe_event: call with locking probe_lock */ | 331 | /* Unregister a trace_probe and probe_event: call with locking probe_lock */ |
| 348 | static int unregister_trace_probe(struct trace_probe *tp) | 332 | static int unregister_trace_kprobe(struct trace_kprobe *tk) |
| 349 | { | 333 | { |
| 350 | /* Enabled event can not be unregistered */ | 334 | /* Enabled event can not be unregistered */ |
| 351 | if (trace_probe_is_enabled(tp)) | 335 | if (trace_probe_is_enabled(&tk->tp)) |
| 352 | return -EBUSY; | 336 | return -EBUSY; |
| 353 | 337 | ||
| 354 | /* Will fail if probe is being used by ftrace or perf */ | 338 | /* Will fail if probe is being used by ftrace or perf */ |
| 355 | if (unregister_probe_event(tp)) | 339 | if (unregister_kprobe_event(tk)) |
| 356 | return -EBUSY; | 340 | return -EBUSY; |
| 357 | 341 | ||
| 358 | __unregister_trace_probe(tp); | 342 | __unregister_trace_kprobe(tk); |
| 359 | list_del(&tp->list); | 343 | list_del(&tk->list); |
| 360 | 344 | ||
| 361 | return 0; | 345 | return 0; |
| 362 | } | 346 | } |
| 363 | 347 | ||
| 364 | /* Register a trace_probe and probe_event */ | 348 | /* Register a trace_probe and probe_event */ |
| 365 | static int register_trace_probe(struct trace_probe *tp) | 349 | static int register_trace_kprobe(struct trace_kprobe *tk) |
| 366 | { | 350 | { |
| 367 | struct trace_probe *old_tp; | 351 | struct trace_kprobe *old_tk; |
| 368 | int ret; | 352 | int ret; |
| 369 | 353 | ||
| 370 | mutex_lock(&probe_lock); | 354 | mutex_lock(&probe_lock); |
| 371 | 355 | ||
| 372 | /* Delete old (same name) event if exist */ | 356 | /* Delete old (same name) event if exist */ |
| 373 | old_tp = find_trace_probe(tp->call.name, tp->call.class->system); | 357 | old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); |
| 374 | if (old_tp) { | 358 | if (old_tk) { |
| 375 | ret = unregister_trace_probe(old_tp); | 359 | ret = unregister_trace_kprobe(old_tk); |
| 376 | if (ret < 0) | 360 | if (ret < 0) |
| 377 | goto end; | 361 | goto end; |
| 378 | free_trace_probe(old_tp); | 362 | free_trace_kprobe(old_tk); |
| 379 | } | 363 | } |
| 380 | 364 | ||
| 381 | /* Register new event */ | 365 | /* Register new event */ |
| 382 | ret = register_probe_event(tp); | 366 | ret = register_kprobe_event(tk); |
| 383 | if (ret) { | 367 | if (ret) { |
| 384 | pr_warning("Failed to register probe event(%d)\n", ret); | 368 | pr_warning("Failed to register probe event(%d)\n", ret); |
| 385 | goto end; | 369 | goto end; |
| 386 | } | 370 | } |
| 387 | 371 | ||
| 388 | /* Register k*probe */ | 372 | /* Register k*probe */ |
| 389 | ret = __register_trace_probe(tp); | 373 | ret = __register_trace_kprobe(tk); |
| 390 | if (ret < 0) | 374 | if (ret < 0) |
| 391 | unregister_probe_event(tp); | 375 | unregister_kprobe_event(tk); |
| 392 | else | 376 | else |
| 393 | list_add_tail(&tp->list, &probe_list); | 377 | list_add_tail(&tk->list, &probe_list); |
| 394 | 378 | ||
| 395 | end: | 379 | end: |
| 396 | mutex_unlock(&probe_lock); | 380 | mutex_unlock(&probe_lock); |
| @@ -398,11 +382,11 @@ end: | |||
| 398 | } | 382 | } |
| 399 | 383 | ||
| 400 | /* Module notifier call back, checking event on the module */ | 384 | /* Module notifier call back, checking event on the module */ |
| 401 | static int trace_probe_module_callback(struct notifier_block *nb, | 385 | static int trace_kprobe_module_callback(struct notifier_block *nb, |
| 402 | unsigned long val, void *data) | 386 | unsigned long val, void *data) |
| 403 | { | 387 | { |
| 404 | struct module *mod = data; | 388 | struct module *mod = data; |
| 405 | struct trace_probe *tp; | 389 | struct trace_kprobe *tk; |
| 406 | int ret; | 390 | int ret; |
| 407 | 391 | ||
| 408 | if (val != MODULE_STATE_COMING) | 392 | if (val != MODULE_STATE_COMING) |
| @@ -410,15 +394,15 @@ static int trace_probe_module_callback(struct notifier_block *nb, | |||
| 410 | 394 | ||
| 411 | /* Update probes on coming module */ | 395 | /* Update probes on coming module */ |
| 412 | mutex_lock(&probe_lock); | 396 | mutex_lock(&probe_lock); |
| 413 | list_for_each_entry(tp, &probe_list, list) { | 397 | list_for_each_entry(tk, &probe_list, list) { |
| 414 | if (trace_probe_within_module(tp, mod)) { | 398 | if (trace_kprobe_within_module(tk, mod)) { |
| 415 | /* Don't need to check busy - this should have gone. */ | 399 | /* Don't need to check busy - this should have gone. */ |
| 416 | __unregister_trace_probe(tp); | 400 | __unregister_trace_kprobe(tk); |
| 417 | ret = __register_trace_probe(tp); | 401 | ret = __register_trace_kprobe(tk); |
| 418 | if (ret) | 402 | if (ret) |
| 419 | pr_warning("Failed to re-register probe %s on" | 403 | pr_warning("Failed to re-register probe %s on" |
| 420 | "%s: %d\n", | 404 | "%s: %d\n", |
| 421 | tp->call.name, mod->name, ret); | 405 | tk->tp.call.name, mod->name, ret); |
| 422 | } | 406 | } |
| 423 | } | 407 | } |
| 424 | mutex_unlock(&probe_lock); | 408 | mutex_unlock(&probe_lock); |
| @@ -426,12 +410,12 @@ static int trace_probe_module_callback(struct notifier_block *nb, | |||
| 426 | return NOTIFY_DONE; | 410 | return NOTIFY_DONE; |
| 427 | } | 411 | } |
| 428 | 412 | ||
| 429 | static struct notifier_block trace_probe_module_nb = { | 413 | static struct notifier_block trace_kprobe_module_nb = { |
| 430 | .notifier_call = trace_probe_module_callback, | 414 | .notifier_call = trace_kprobe_module_callback, |
| 431 | .priority = 1 /* Invoked after kprobe module callback */ | 415 | .priority = 1 /* Invoked after kprobe module callback */ |
| 432 | }; | 416 | }; |
| 433 | 417 | ||
| 434 | static int create_trace_probe(int argc, char **argv) | 418 | static int create_trace_kprobe(int argc, char **argv) |
| 435 | { | 419 | { |
| 436 | /* | 420 | /* |
| 437 | * Argument syntax: | 421 | * Argument syntax: |
| @@ -451,7 +435,7 @@ static int create_trace_probe(int argc, char **argv) | |||
| 451 | * Type of args: | 435 | * Type of args: |
| 452 | * FETCHARG:TYPE : use TYPE instead of unsigned long. | 436 | * FETCHARG:TYPE : use TYPE instead of unsigned long. |
| 453 | */ | 437 | */ |
| 454 | struct trace_probe *tp; | 438 | struct trace_kprobe *tk; |
| 455 | int i, ret = 0; | 439 | int i, ret = 0; |
| 456 | bool is_return = false, is_delete = false; | 440 | bool is_return = false, is_delete = false; |
| 457 | char *symbol = NULL, *event = NULL, *group = NULL; | 441 | char *symbol = NULL, *event = NULL, *group = NULL; |
| @@ -498,16 +482,16 @@ static int create_trace_probe(int argc, char **argv) | |||
| 498 | return -EINVAL; | 482 | return -EINVAL; |
| 499 | } | 483 | } |
| 500 | mutex_lock(&probe_lock); | 484 | mutex_lock(&probe_lock); |
| 501 | tp = find_trace_probe(event, group); | 485 | tk = find_trace_kprobe(event, group); |
| 502 | if (!tp) { | 486 | if (!tk) { |
| 503 | mutex_unlock(&probe_lock); | 487 | mutex_unlock(&probe_lock); |
| 504 | pr_info("Event %s/%s doesn't exist.\n", group, event); | 488 | pr_info("Event %s/%s doesn't exist.\n", group, event); |
| 505 | return -ENOENT; | 489 | return -ENOENT; |
| 506 | } | 490 | } |
| 507 | /* delete an event */ | 491 | /* delete an event */ |
| 508 | ret = unregister_trace_probe(tp); | 492 | ret = unregister_trace_kprobe(tk); |
| 509 | if (ret == 0) | 493 | if (ret == 0) |
| 510 | free_trace_probe(tp); | 494 | free_trace_kprobe(tk); |
| 511 | mutex_unlock(&probe_lock); | 495 | mutex_unlock(&probe_lock); |
| 512 | return ret; | 496 | return ret; |
| 513 | } | 497 | } |
| @@ -554,47 +538,49 @@ static int create_trace_probe(int argc, char **argv) | |||
| 554 | is_return ? 'r' : 'p', addr); | 538 | is_return ? 'r' : 'p', addr); |
| 555 | event = buf; | 539 | event = buf; |
| 556 | } | 540 | } |
| 557 | tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, | 541 | tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc, |
| 558 | is_return); | 542 | is_return); |
| 559 | if (IS_ERR(tp)) { | 543 | if (IS_ERR(tk)) { |
| 560 | pr_info("Failed to allocate trace_probe.(%d)\n", | 544 | pr_info("Failed to allocate trace_probe.(%d)\n", |
| 561 | (int)PTR_ERR(tp)); | 545 | (int)PTR_ERR(tk)); |
| 562 | return PTR_ERR(tp); | 546 | return PTR_ERR(tk); |
| 563 | } | 547 | } |
| 564 | 548 | ||
| 565 | /* parse arguments */ | 549 | /* parse arguments */ |
| 566 | ret = 0; | 550 | ret = 0; |
| 567 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | 551 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
| 552 | struct probe_arg *parg = &tk->tp.args[i]; | ||
| 553 | |||
| 568 | /* Increment count for freeing args in error case */ | 554 | /* Increment count for freeing args in error case */ |
| 569 | tp->nr_args++; | 555 | tk->tp.nr_args++; |
| 570 | 556 | ||
| 571 | /* Parse argument name */ | 557 | /* Parse argument name */ |
| 572 | arg = strchr(argv[i], '='); | 558 | arg = strchr(argv[i], '='); |
| 573 | if (arg) { | 559 | if (arg) { |
| 574 | *arg++ = '\0'; | 560 | *arg++ = '\0'; |
| 575 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); | 561 | parg->name = kstrdup(argv[i], GFP_KERNEL); |
| 576 | } else { | 562 | } else { |
| 577 | arg = argv[i]; | 563 | arg = argv[i]; |
| 578 | /* If argument name is omitted, set "argN" */ | 564 | /* If argument name is omitted, set "argN" */ |
| 579 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); | 565 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); |
| 580 | tp->args[i].name = kstrdup(buf, GFP_KERNEL); | 566 | parg->name = kstrdup(buf, GFP_KERNEL); |
| 581 | } | 567 | } |
| 582 | 568 | ||
| 583 | if (!tp->args[i].name) { | 569 | if (!parg->name) { |
| 584 | pr_info("Failed to allocate argument[%d] name.\n", i); | 570 | pr_info("Failed to allocate argument[%d] name.\n", i); |
| 585 | ret = -ENOMEM; | 571 | ret = -ENOMEM; |
| 586 | goto error; | 572 | goto error; |
| 587 | } | 573 | } |
| 588 | 574 | ||
| 589 | if (!is_good_name(tp->args[i].name)) { | 575 | if (!is_good_name(parg->name)) { |
| 590 | pr_info("Invalid argument[%d] name: %s\n", | 576 | pr_info("Invalid argument[%d] name: %s\n", |
| 591 | i, tp->args[i].name); | 577 | i, parg->name); |
| 592 | ret = -EINVAL; | 578 | ret = -EINVAL; |
| 593 | goto error; | 579 | goto error; |
| 594 | } | 580 | } |
| 595 | 581 | ||
| 596 | if (traceprobe_conflict_field_name(tp->args[i].name, | 582 | if (traceprobe_conflict_field_name(parg->name, |
| 597 | tp->args, i)) { | 583 | tk->tp.args, i)) { |
| 598 | pr_info("Argument[%d] name '%s' conflicts with " | 584 | pr_info("Argument[%d] name '%s' conflicts with " |
| 599 | "another field.\n", i, argv[i]); | 585 | "another field.\n", i, argv[i]); |
| 600 | ret = -EINVAL; | 586 | ret = -EINVAL; |
| @@ -602,7 +588,7 @@ static int create_trace_probe(int argc, char **argv) | |||
| 602 | } | 588 | } |
| 603 | 589 | ||
| 604 | /* Parse fetch argument */ | 590 | /* Parse fetch argument */ |
| 605 | ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i], | 591 | ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, |
| 606 | is_return, true); | 592 | is_return, true); |
| 607 | if (ret) { | 593 | if (ret) { |
| 608 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); | 594 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
| @@ -610,35 +596,35 @@ static int create_trace_probe(int argc, char **argv) | |||
| 610 | } | 596 | } |
| 611 | } | 597 | } |
| 612 | 598 | ||
| 613 | ret = register_trace_probe(tp); | 599 | ret = register_trace_kprobe(tk); |
| 614 | if (ret) | 600 | if (ret) |
| 615 | goto error; | 601 | goto error; |
| 616 | return 0; | 602 | return 0; |
| 617 | 603 | ||
| 618 | error: | 604 | error: |
| 619 | free_trace_probe(tp); | 605 | free_trace_kprobe(tk); |
| 620 | return ret; | 606 | return ret; |
| 621 | } | 607 | } |
| 622 | 608 | ||
| 623 | static int release_all_trace_probes(void) | 609 | static int release_all_trace_kprobes(void) |
| 624 | { | 610 | { |
| 625 | struct trace_probe *tp; | 611 | struct trace_kprobe *tk; |
| 626 | int ret = 0; | 612 | int ret = 0; |
| 627 | 613 | ||
| 628 | mutex_lock(&probe_lock); | 614 | mutex_lock(&probe_lock); |
| 629 | /* Ensure no probe is in use. */ | 615 | /* Ensure no probe is in use. */ |
| 630 | list_for_each_entry(tp, &probe_list, list) | 616 | list_for_each_entry(tk, &probe_list, list) |
| 631 | if (trace_probe_is_enabled(tp)) { | 617 | if (trace_probe_is_enabled(&tk->tp)) { |
| 632 | ret = -EBUSY; | 618 | ret = -EBUSY; |
| 633 | goto end; | 619 | goto end; |
| 634 | } | 620 | } |
| 635 | /* TODO: Use batch unregistration */ | 621 | /* TODO: Use batch unregistration */ |
| 636 | while (!list_empty(&probe_list)) { | 622 | while (!list_empty(&probe_list)) { |
| 637 | tp = list_entry(probe_list.next, struct trace_probe, list); | 623 | tk = list_entry(probe_list.next, struct trace_kprobe, list); |
| 638 | ret = unregister_trace_probe(tp); | 624 | ret = unregister_trace_kprobe(tk); |
| 639 | if (ret) | 625 | if (ret) |
| 640 | goto end; | 626 | goto end; |
| 641 | free_trace_probe(tp); | 627 | free_trace_kprobe(tk); |
| 642 | } | 628 | } |
| 643 | 629 | ||
| 644 | end: | 630 | end: |
| @@ -666,22 +652,22 @@ static void probes_seq_stop(struct seq_file *m, void *v) | |||
| 666 | 652 | ||
| 667 | static int probes_seq_show(struct seq_file *m, void *v) | 653 | static int probes_seq_show(struct seq_file *m, void *v) |
| 668 | { | 654 | { |
| 669 | struct trace_probe *tp = v; | 655 | struct trace_kprobe *tk = v; |
| 670 | int i; | 656 | int i; |
| 671 | 657 | ||
| 672 | seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p'); | 658 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); |
| 673 | seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); | 659 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); |
| 674 | 660 | ||
| 675 | if (!tp->symbol) | 661 | if (!tk->symbol) |
| 676 | seq_printf(m, " 0x%p", tp->rp.kp.addr); | 662 | seq_printf(m, " 0x%p", tk->rp.kp.addr); |
| 677 | else if (tp->rp.kp.offset) | 663 | else if (tk->rp.kp.offset) |
| 678 | seq_printf(m, " %s+%u", trace_probe_symbol(tp), | 664 | seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), |
| 679 | tp->rp.kp.offset); | 665 | tk->rp.kp.offset); |
| 680 | else | 666 | else |
| 681 | seq_printf(m, " %s", trace_probe_symbol(tp)); | 667 | seq_printf(m, " %s", trace_kprobe_symbol(tk)); |
| 682 | 668 | ||
| 683 | for (i = 0; i < tp->nr_args; i++) | 669 | for (i = 0; i < tk->tp.nr_args; i++) |
| 684 | seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm); | 670 | seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); |
| 685 | seq_printf(m, "\n"); | 671 | seq_printf(m, "\n"); |
| 686 | 672 | ||
| 687 | return 0; | 673 | return 0; |
| @@ -699,7 +685,7 @@ static int probes_open(struct inode *inode, struct file *file) | |||
| 699 | int ret; | 685 | int ret; |
| 700 | 686 | ||
| 701 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { | 687 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
| 702 | ret = release_all_trace_probes(); | 688 | ret = release_all_trace_kprobes(); |
| 703 | if (ret < 0) | 689 | if (ret < 0) |
| 704 | return ret; | 690 | return ret; |
| 705 | } | 691 | } |
| @@ -711,7 +697,7 @@ static ssize_t probes_write(struct file *file, const char __user *buffer, | |||
| 711 | size_t count, loff_t *ppos) | 697 | size_t count, loff_t *ppos) |
| 712 | { | 698 | { |
| 713 | return traceprobe_probes_write(file, buffer, count, ppos, | 699 | return traceprobe_probes_write(file, buffer, count, ppos, |
| 714 | create_trace_probe); | 700 | create_trace_kprobe); |
| 715 | } | 701 | } |
| 716 | 702 | ||
| 717 | static const struct file_operations kprobe_events_ops = { | 703 | static const struct file_operations kprobe_events_ops = { |
| @@ -726,10 +712,10 @@ static const struct file_operations kprobe_events_ops = { | |||
| 726 | /* Probes profiling interfaces */ | 712 | /* Probes profiling interfaces */ |
| 727 | static int probes_profile_seq_show(struct seq_file *m, void *v) | 713 | static int probes_profile_seq_show(struct seq_file *m, void *v) |
| 728 | { | 714 | { |
| 729 | struct trace_probe *tp = v; | 715 | struct trace_kprobe *tk = v; |
| 730 | 716 | ||
| 731 | seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, | 717 | seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, |
| 732 | tp->rp.kp.nmissed); | 718 | tk->rp.kp.nmissed); |
| 733 | 719 | ||
| 734 | return 0; | 720 | return 0; |
| 735 | } | 721 | } |
| @@ -804,7 +790,7 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp, | |||
| 804 | 790 | ||
| 805 | /* Kprobe handler */ | 791 | /* Kprobe handler */ |
| 806 | static __kprobes void | 792 | static __kprobes void |
| 807 | __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | 793 | __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, |
| 808 | struct ftrace_event_file *ftrace_file) | 794 | struct ftrace_event_file *ftrace_file) |
| 809 | { | 795 | { |
| 810 | struct kprobe_trace_entry_head *entry; | 796 | struct kprobe_trace_entry_head *entry; |
| @@ -812,7 +798,7 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 812 | struct ring_buffer *buffer; | 798 | struct ring_buffer *buffer; |
| 813 | int size, dsize, pc; | 799 | int size, dsize, pc; |
| 814 | unsigned long irq_flags; | 800 | unsigned long irq_flags; |
| 815 | struct ftrace_event_call *call = &tp->call; | 801 | struct ftrace_event_call *call = &tk->tp.call; |
| 816 | 802 | ||
| 817 | WARN_ON(call != ftrace_file->event_call); | 803 | WARN_ON(call != ftrace_file->event_call); |
| 818 | 804 | ||
| @@ -822,8 +808,8 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 822 | local_save_flags(irq_flags); | 808 | local_save_flags(irq_flags); |
| 823 | pc = preempt_count(); | 809 | pc = preempt_count(); |
| 824 | 810 | ||
| 825 | dsize = __get_data_size(tp, regs); | 811 | dsize = __get_data_size(&tk->tp, regs); |
| 826 | size = sizeof(*entry) + tp->size + dsize; | 812 | size = sizeof(*entry) + tk->tp.size + dsize; |
| 827 | 813 | ||
| 828 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, | 814 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
| 829 | call->event.type, | 815 | call->event.type, |
| @@ -832,8 +818,8 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 832 | return; | 818 | return; |
| 833 | 819 | ||
| 834 | entry = ring_buffer_event_data(event); | 820 | entry = ring_buffer_event_data(event); |
| 835 | entry->ip = (unsigned long)tp->rp.kp.addr; | 821 | entry->ip = (unsigned long)tk->rp.kp.addr; |
| 836 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 822 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
| 837 | 823 | ||
| 838 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 824 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
| 839 | trace_buffer_unlock_commit_regs(buffer, event, | 825 | trace_buffer_unlock_commit_regs(buffer, event, |
| @@ -841,17 +827,17 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 841 | } | 827 | } |
| 842 | 828 | ||
| 843 | static __kprobes void | 829 | static __kprobes void |
| 844 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) | 830 | kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) |
| 845 | { | 831 | { |
| 846 | struct event_file_link *link; | 832 | struct event_file_link *link; |
| 847 | 833 | ||
| 848 | list_for_each_entry_rcu(link, &tp->files, list) | 834 | list_for_each_entry_rcu(link, &tk->tp.files, list) |
| 849 | __kprobe_trace_func(tp, regs, link->file); | 835 | __kprobe_trace_func(tk, regs, link->file); |
| 850 | } | 836 | } |
| 851 | 837 | ||
| 852 | /* Kretprobe handler */ | 838 | /* Kretprobe handler */ |
| 853 | static __kprobes void | 839 | static __kprobes void |
| 854 | __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 840 | __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, |
| 855 | struct pt_regs *regs, | 841 | struct pt_regs *regs, |
| 856 | struct ftrace_event_file *ftrace_file) | 842 | struct ftrace_event_file *ftrace_file) |
| 857 | { | 843 | { |
| @@ -860,7 +846,7 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 860 | struct ring_buffer *buffer; | 846 | struct ring_buffer *buffer; |
| 861 | int size, pc, dsize; | 847 | int size, pc, dsize; |
| 862 | unsigned long irq_flags; | 848 | unsigned long irq_flags; |
| 863 | struct ftrace_event_call *call = &tp->call; | 849 | struct ftrace_event_call *call = &tk->tp.call; |
| 864 | 850 | ||
| 865 | WARN_ON(call != ftrace_file->event_call); | 851 | WARN_ON(call != ftrace_file->event_call); |
| 866 | 852 | ||
| @@ -870,8 +856,8 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 870 | local_save_flags(irq_flags); | 856 | local_save_flags(irq_flags); |
| 871 | pc = preempt_count(); | 857 | pc = preempt_count(); |
| 872 | 858 | ||
| 873 | dsize = __get_data_size(tp, regs); | 859 | dsize = __get_data_size(&tk->tp, regs); |
| 874 | size = sizeof(*entry) + tp->size + dsize; | 860 | size = sizeof(*entry) + tk->tp.size + dsize; |
| 875 | 861 | ||
| 876 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, | 862 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
| 877 | call->event.type, | 863 | call->event.type, |
| @@ -880,9 +866,9 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 880 | return; | 866 | return; |
| 881 | 867 | ||
| 882 | entry = ring_buffer_event_data(event); | 868 | entry = ring_buffer_event_data(event); |
| 883 | entry->func = (unsigned long)tp->rp.kp.addr; | 869 | entry->func = (unsigned long)tk->rp.kp.addr; |
| 884 | entry->ret_ip = (unsigned long)ri->ret_addr; | 870 | entry->ret_ip = (unsigned long)ri->ret_addr; |
| 885 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 871 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
| 886 | 872 | ||
| 887 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 873 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
| 888 | trace_buffer_unlock_commit_regs(buffer, event, | 874 | trace_buffer_unlock_commit_regs(buffer, event, |
| @@ -890,13 +876,13 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 890 | } | 876 | } |
| 891 | 877 | ||
| 892 | static __kprobes void | 878 | static __kprobes void |
| 893 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 879 | kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, |
| 894 | struct pt_regs *regs) | 880 | struct pt_regs *regs) |
| 895 | { | 881 | { |
| 896 | struct event_file_link *link; | 882 | struct event_file_link *link; |
| 897 | 883 | ||
| 898 | list_for_each_entry_rcu(link, &tp->files, list) | 884 | list_for_each_entry_rcu(link, &tk->tp.files, list) |
| 899 | __kretprobe_trace_func(tp, ri, regs, link->file); | 885 | __kretprobe_trace_func(tk, ri, regs, link->file); |
| 900 | } | 886 | } |
| 901 | 887 | ||
| 902 | /* Event entry printers */ | 888 | /* Event entry printers */ |
| @@ -983,16 +969,18 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 983 | { | 969 | { |
| 984 | int ret, i; | 970 | int ret, i; |
| 985 | struct kprobe_trace_entry_head field; | 971 | struct kprobe_trace_entry_head field; |
| 986 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 972 | struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; |
| 987 | 973 | ||
| 988 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 974 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
| 989 | /* Set argument names as fields */ | 975 | /* Set argument names as fields */ |
| 990 | for (i = 0; i < tp->nr_args; i++) { | 976 | for (i = 0; i < tk->tp.nr_args; i++) { |
| 991 | ret = trace_define_field(event_call, tp->args[i].type->fmttype, | 977 | struct probe_arg *parg = &tk->tp.args[i]; |
| 992 | tp->args[i].name, | 978 | |
| 993 | sizeof(field) + tp->args[i].offset, | 979 | ret = trace_define_field(event_call, parg->type->fmttype, |
| 994 | tp->args[i].type->size, | 980 | parg->name, |
| 995 | tp->args[i].type->is_signed, | 981 | sizeof(field) + parg->offset, |
| 982 | parg->type->size, | ||
| 983 | parg->type->is_signed, | ||
| 996 | FILTER_OTHER); | 984 | FILTER_OTHER); |
| 997 | if (ret) | 985 | if (ret) |
| 998 | return ret; | 986 | return ret; |
| @@ -1004,17 +992,19 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 1004 | { | 992 | { |
| 1005 | int ret, i; | 993 | int ret, i; |
| 1006 | struct kretprobe_trace_entry_head field; | 994 | struct kretprobe_trace_entry_head field; |
| 1007 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 995 | struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; |
| 1008 | 996 | ||
| 1009 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | 997 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
| 1010 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | 998 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
| 1011 | /* Set argument names as fields */ | 999 | /* Set argument names as fields */ |
| 1012 | for (i = 0; i < tp->nr_args; i++) { | 1000 | for (i = 0; i < tk->tp.nr_args; i++) { |
| 1013 | ret = trace_define_field(event_call, tp->args[i].type->fmttype, | 1001 | struct probe_arg *parg = &tk->tp.args[i]; |
| 1014 | tp->args[i].name, | 1002 | |
| 1015 | sizeof(field) + tp->args[i].offset, | 1003 | ret = trace_define_field(event_call, parg->type->fmttype, |
| 1016 | tp->args[i].type->size, | 1004 | parg->name, |
| 1017 | tp->args[i].type->is_signed, | 1005 | sizeof(field) + parg->offset, |
| 1006 | parg->type->size, | ||
| 1007 | parg->type->is_signed, | ||
| 1018 | FILTER_OTHER); | 1008 | FILTER_OTHER); |
| 1019 | if (ret) | 1009 | if (ret) |
| 1020 | return ret; | 1010 | return ret; |
| @@ -1022,14 +1012,14 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 1022 | return 0; | 1012 | return 0; |
| 1023 | } | 1013 | } |
| 1024 | 1014 | ||
| 1025 | static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) | 1015 | static int __set_print_fmt(struct trace_kprobe *tk, char *buf, int len) |
| 1026 | { | 1016 | { |
| 1027 | int i; | 1017 | int i; |
| 1028 | int pos = 0; | 1018 | int pos = 0; |
| 1029 | 1019 | ||
| 1030 | const char *fmt, *arg; | 1020 | const char *fmt, *arg; |
| 1031 | 1021 | ||
| 1032 | if (!trace_probe_is_return(tp)) { | 1022 | if (!trace_kprobe_is_return(tk)) { |
| 1033 | fmt = "(%lx)"; | 1023 | fmt = "(%lx)"; |
| 1034 | arg = "REC->" FIELD_STRING_IP; | 1024 | arg = "REC->" FIELD_STRING_IP; |
| 1035 | } else { | 1025 | } else { |
| @@ -1042,21 +1032,21 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) | |||
| 1042 | 1032 | ||
| 1043 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); | 1033 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); |
| 1044 | 1034 | ||
| 1045 | for (i = 0; i < tp->nr_args; i++) { | 1035 | for (i = 0; i < tk->tp.nr_args; i++) { |
| 1046 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", | 1036 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", |
| 1047 | tp->args[i].name, tp->args[i].type->fmt); | 1037 | tk->tp.args[i].name, tk->tp.args[i].type->fmt); |
| 1048 | } | 1038 | } |
| 1049 | 1039 | ||
| 1050 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); | 1040 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); |
| 1051 | 1041 | ||
| 1052 | for (i = 0; i < tp->nr_args; i++) { | 1042 | for (i = 0; i < tk->tp.nr_args; i++) { |
| 1053 | if (strcmp(tp->args[i].type->name, "string") == 0) | 1043 | if (strcmp(tk->tp.args[i].type->name, "string") == 0) |
| 1054 | pos += snprintf(buf + pos, LEN_OR_ZERO, | 1044 | pos += snprintf(buf + pos, LEN_OR_ZERO, |
| 1055 | ", __get_str(%s)", | 1045 | ", __get_str(%s)", |
| 1056 | tp->args[i].name); | 1046 | tk->tp.args[i].name); |
| 1057 | else | 1047 | else |
| 1058 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | 1048 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", |
| 1059 | tp->args[i].name); | 1049 | tk->tp.args[i].name); |
| 1060 | } | 1050 | } |
| 1061 | 1051 | ||
| 1062 | #undef LEN_OR_ZERO | 1052 | #undef LEN_OR_ZERO |
| @@ -1065,20 +1055,20 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) | |||
| 1065 | return pos; | 1055 | return pos; |
| 1066 | } | 1056 | } |
| 1067 | 1057 | ||
| 1068 | static int set_print_fmt(struct trace_probe *tp) | 1058 | static int set_print_fmt(struct trace_kprobe *tk) |
| 1069 | { | 1059 | { |
| 1070 | int len; | 1060 | int len; |
| 1071 | char *print_fmt; | 1061 | char *print_fmt; |
| 1072 | 1062 | ||
| 1073 | /* First: called with 0 length to calculate the needed length */ | 1063 | /* First: called with 0 length to calculate the needed length */ |
| 1074 | len = __set_print_fmt(tp, NULL, 0); | 1064 | len = __set_print_fmt(tk, NULL, 0); |
| 1075 | print_fmt = kmalloc(len + 1, GFP_KERNEL); | 1065 | print_fmt = kmalloc(len + 1, GFP_KERNEL); |
| 1076 | if (!print_fmt) | 1066 | if (!print_fmt) |
| 1077 | return -ENOMEM; | 1067 | return -ENOMEM; |
| 1078 | 1068 | ||
| 1079 | /* Second: actually write the @print_fmt */ | 1069 | /* Second: actually write the @print_fmt */ |
| 1080 | __set_print_fmt(tp, print_fmt, len + 1); | 1070 | __set_print_fmt(tk, print_fmt, len + 1); |
| 1081 | tp->call.print_fmt = print_fmt; | 1071 | tk->tp.call.print_fmt = print_fmt; |
| 1082 | 1072 | ||
| 1083 | return 0; | 1073 | return 0; |
| 1084 | } | 1074 | } |
| @@ -1087,9 +1077,9 @@ static int set_print_fmt(struct trace_probe *tp) | |||
| 1087 | 1077 | ||
| 1088 | /* Kprobe profile handler */ | 1078 | /* Kprobe profile handler */ |
| 1089 | static __kprobes void | 1079 | static __kprobes void |
| 1090 | kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | 1080 | kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) |
| 1091 | { | 1081 | { |
| 1092 | struct ftrace_event_call *call = &tp->call; | 1082 | struct ftrace_event_call *call = &tk->tp.call; |
| 1093 | struct kprobe_trace_entry_head *entry; | 1083 | struct kprobe_trace_entry_head *entry; |
| 1094 | struct hlist_head *head; | 1084 | struct hlist_head *head; |
| 1095 | int size, __size, dsize; | 1085 | int size, __size, dsize; |
| @@ -1099,8 +1089,8 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | |||
| 1099 | if (hlist_empty(head)) | 1089 | if (hlist_empty(head)) |
| 1100 | return; | 1090 | return; |
| 1101 | 1091 | ||
| 1102 | dsize = __get_data_size(tp, regs); | 1092 | dsize = __get_data_size(&tk->tp, regs); |
| 1103 | __size = sizeof(*entry) + tp->size + dsize; | 1093 | __size = sizeof(*entry) + tk->tp.size + dsize; |
| 1104 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1094 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
| 1105 | size -= sizeof(u32); | 1095 | size -= sizeof(u32); |
| 1106 | 1096 | ||
| @@ -1108,18 +1098,18 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | |||
| 1108 | if (!entry) | 1098 | if (!entry) |
| 1109 | return; | 1099 | return; |
| 1110 | 1100 | ||
| 1111 | entry->ip = (unsigned long)tp->rp.kp.addr; | 1101 | entry->ip = (unsigned long)tk->rp.kp.addr; |
| 1112 | memset(&entry[1], 0, dsize); | 1102 | memset(&entry[1], 0, dsize); |
| 1113 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1103 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
| 1114 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1104 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 1115 | } | 1105 | } |
| 1116 | 1106 | ||
| 1117 | /* Kretprobe profile handler */ | 1107 | /* Kretprobe profile handler */ |
| 1118 | static __kprobes void | 1108 | static __kprobes void |
| 1119 | kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 1109 | kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, |
| 1120 | struct pt_regs *regs) | 1110 | struct pt_regs *regs) |
| 1121 | { | 1111 | { |
| 1122 | struct ftrace_event_call *call = &tp->call; | 1112 | struct ftrace_event_call *call = &tk->tp.call; |
| 1123 | struct kretprobe_trace_entry_head *entry; | 1113 | struct kretprobe_trace_entry_head *entry; |
| 1124 | struct hlist_head *head; | 1114 | struct hlist_head *head; |
| 1125 | int size, __size, dsize; | 1115 | int size, __size, dsize; |
| @@ -1129,8 +1119,8 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 1129 | if (hlist_empty(head)) | 1119 | if (hlist_empty(head)) |
| 1130 | return; | 1120 | return; |
| 1131 | 1121 | ||
| 1132 | dsize = __get_data_size(tp, regs); | 1122 | dsize = __get_data_size(&tk->tp, regs); |
| 1133 | __size = sizeof(*entry) + tp->size + dsize; | 1123 | __size = sizeof(*entry) + tk->tp.size + dsize; |
| 1134 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1124 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
| 1135 | size -= sizeof(u32); | 1125 | size -= sizeof(u32); |
| 1136 | 1126 | ||
| @@ -1138,9 +1128,9 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 1138 | if (!entry) | 1128 | if (!entry) |
| 1139 | return; | 1129 | return; |
| 1140 | 1130 | ||
| 1141 | entry->func = (unsigned long)tp->rp.kp.addr; | 1131 | entry->func = (unsigned long)tk->rp.kp.addr; |
| 1142 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1132 | entry->ret_ip = (unsigned long)ri->ret_addr; |
| 1143 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1133 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
| 1144 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1134 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 1145 | } | 1135 | } |
| 1146 | #endif /* CONFIG_PERF_EVENTS */ | 1136 | #endif /* CONFIG_PERF_EVENTS */ |
| @@ -1155,20 +1145,20 @@ static __kprobes | |||
| 1155 | int kprobe_register(struct ftrace_event_call *event, | 1145 | int kprobe_register(struct ftrace_event_call *event, |
| 1156 | enum trace_reg type, void *data) | 1146 | enum trace_reg type, void *data) |
| 1157 | { | 1147 | { |
| 1158 | struct trace_probe *tp = (struct trace_probe *)event->data; | 1148 | struct trace_kprobe *tk = (struct trace_kprobe *)event->data; |
| 1159 | struct ftrace_event_file *file = data; | 1149 | struct ftrace_event_file *file = data; |
| 1160 | 1150 | ||
| 1161 | switch (type) { | 1151 | switch (type) { |
| 1162 | case TRACE_REG_REGISTER: | 1152 | case TRACE_REG_REGISTER: |
| 1163 | return enable_trace_probe(tp, file); | 1153 | return enable_trace_kprobe(tk, file); |
| 1164 | case TRACE_REG_UNREGISTER: | 1154 | case TRACE_REG_UNREGISTER: |
| 1165 | return disable_trace_probe(tp, file); | 1155 | return disable_trace_kprobe(tk, file); |
| 1166 | 1156 | ||
| 1167 | #ifdef CONFIG_PERF_EVENTS | 1157 | #ifdef CONFIG_PERF_EVENTS |
| 1168 | case TRACE_REG_PERF_REGISTER: | 1158 | case TRACE_REG_PERF_REGISTER: |
| 1169 | return enable_trace_probe(tp, NULL); | 1159 | return enable_trace_kprobe(tk, NULL); |
| 1170 | case TRACE_REG_PERF_UNREGISTER: | 1160 | case TRACE_REG_PERF_UNREGISTER: |
| 1171 | return disable_trace_probe(tp, NULL); | 1161 | return disable_trace_kprobe(tk, NULL); |
| 1172 | case TRACE_REG_PERF_OPEN: | 1162 | case TRACE_REG_PERF_OPEN: |
| 1173 | case TRACE_REG_PERF_CLOSE: | 1163 | case TRACE_REG_PERF_CLOSE: |
| 1174 | case TRACE_REG_PERF_ADD: | 1164 | case TRACE_REG_PERF_ADD: |
| @@ -1182,15 +1172,15 @@ int kprobe_register(struct ftrace_event_call *event, | |||
| 1182 | static __kprobes | 1172 | static __kprobes |
| 1183 | int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | 1173 | int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) |
| 1184 | { | 1174 | { |
| 1185 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1175 | struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); |
| 1186 | 1176 | ||
| 1187 | tp->nhit++; | 1177 | tk->nhit++; |
| 1188 | 1178 | ||
| 1189 | if (tp->flags & TP_FLAG_TRACE) | 1179 | if (tk->tp.flags & TP_FLAG_TRACE) |
| 1190 | kprobe_trace_func(tp, regs); | 1180 | kprobe_trace_func(tk, regs); |
| 1191 | #ifdef CONFIG_PERF_EVENTS | 1181 | #ifdef CONFIG_PERF_EVENTS |
| 1192 | if (tp->flags & TP_FLAG_PROFILE) | 1182 | if (tk->tp.flags & TP_FLAG_PROFILE) |
| 1193 | kprobe_perf_func(tp, regs); | 1183 | kprobe_perf_func(tk, regs); |
| 1194 | #endif | 1184 | #endif |
| 1195 | return 0; /* We don't tweek kernel, so just return 0 */ | 1185 | return 0; /* We don't tweek kernel, so just return 0 */ |
| 1196 | } | 1186 | } |
| @@ -1198,15 +1188,15 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | |||
| 1198 | static __kprobes | 1188 | static __kprobes |
| 1199 | int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) | 1189 | int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) |
| 1200 | { | 1190 | { |
| 1201 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1191 | struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); |
| 1202 | 1192 | ||
| 1203 | tp->nhit++; | 1193 | tk->nhit++; |
| 1204 | 1194 | ||
| 1205 | if (tp->flags & TP_FLAG_TRACE) | 1195 | if (tk->tp.flags & TP_FLAG_TRACE) |
| 1206 | kretprobe_trace_func(tp, ri, regs); | 1196 | kretprobe_trace_func(tk, ri, regs); |
| 1207 | #ifdef CONFIG_PERF_EVENTS | 1197 | #ifdef CONFIG_PERF_EVENTS |
| 1208 | if (tp->flags & TP_FLAG_PROFILE) | 1198 | if (tk->tp.flags & TP_FLAG_PROFILE) |
| 1209 | kretprobe_perf_func(tp, ri, regs); | 1199 | kretprobe_perf_func(tk, ri, regs); |
| 1210 | #endif | 1200 | #endif |
| 1211 | return 0; /* We don't tweek kernel, so just return 0 */ | 1201 | return 0; /* We don't tweek kernel, so just return 0 */ |
| 1212 | } | 1202 | } |
| @@ -1219,21 +1209,21 @@ static struct trace_event_functions kprobe_funcs = { | |||
| 1219 | .trace = print_kprobe_event | 1209 | .trace = print_kprobe_event |
| 1220 | }; | 1210 | }; |
| 1221 | 1211 | ||
| 1222 | static int register_probe_event(struct trace_probe *tp) | 1212 | static int register_kprobe_event(struct trace_kprobe *tk) |
| 1223 | { | 1213 | { |
| 1224 | struct ftrace_event_call *call = &tp->call; | 1214 | struct ftrace_event_call *call = &tk->tp.call; |
| 1225 | int ret; | 1215 | int ret; |
| 1226 | 1216 | ||
| 1227 | /* Initialize ftrace_event_call */ | 1217 | /* Initialize ftrace_event_call */ |
| 1228 | INIT_LIST_HEAD(&call->class->fields); | 1218 | INIT_LIST_HEAD(&call->class->fields); |
| 1229 | if (trace_probe_is_return(tp)) { | 1219 | if (trace_kprobe_is_return(tk)) { |
| 1230 | call->event.funcs = &kretprobe_funcs; | 1220 | call->event.funcs = &kretprobe_funcs; |
| 1231 | call->class->define_fields = kretprobe_event_define_fields; | 1221 | call->class->define_fields = kretprobe_event_define_fields; |
| 1232 | } else { | 1222 | } else { |
| 1233 | call->event.funcs = &kprobe_funcs; | 1223 | call->event.funcs = &kprobe_funcs; |
| 1234 | call->class->define_fields = kprobe_event_define_fields; | 1224 | call->class->define_fields = kprobe_event_define_fields; |
| 1235 | } | 1225 | } |
| 1236 | if (set_print_fmt(tp) < 0) | 1226 | if (set_print_fmt(tk) < 0) |
| 1237 | return -ENOMEM; | 1227 | return -ENOMEM; |
| 1238 | ret = register_ftrace_event(&call->event); | 1228 | ret = register_ftrace_event(&call->event); |
| 1239 | if (!ret) { | 1229 | if (!ret) { |
| @@ -1242,7 +1232,7 @@ static int register_probe_event(struct trace_probe *tp) | |||
| 1242 | } | 1232 | } |
| 1243 | call->flags = 0; | 1233 | call->flags = 0; |
| 1244 | call->class->reg = kprobe_register; | 1234 | call->class->reg = kprobe_register; |
| 1245 | call->data = tp; | 1235 | call->data = tk; |
| 1246 | ret = trace_add_event_call(call); | 1236 | ret = trace_add_event_call(call); |
| 1247 | if (ret) { | 1237 | if (ret) { |
| 1248 | pr_info("Failed to register kprobe event: %s\n", call->name); | 1238 | pr_info("Failed to register kprobe event: %s\n", call->name); |
| @@ -1252,14 +1242,14 @@ static int register_probe_event(struct trace_probe *tp) | |||
| 1252 | return ret; | 1242 | return ret; |
| 1253 | } | 1243 | } |
| 1254 | 1244 | ||
| 1255 | static int unregister_probe_event(struct trace_probe *tp) | 1245 | static int unregister_kprobe_event(struct trace_kprobe *tk) |
| 1256 | { | 1246 | { |
| 1257 | int ret; | 1247 | int ret; |
| 1258 | 1248 | ||
| 1259 | /* tp->event is unregistered in trace_remove_event_call() */ | 1249 | /* tp->event is unregistered in trace_remove_event_call() */ |
| 1260 | ret = trace_remove_event_call(&tp->call); | 1250 | ret = trace_remove_event_call(&tk->tp.call); |
| 1261 | if (!ret) | 1251 | if (!ret) |
| 1262 | kfree(tp->call.print_fmt); | 1252 | kfree(tk->tp.call.print_fmt); |
| 1263 | return ret; | 1253 | return ret; |
| 1264 | } | 1254 | } |
| 1265 | 1255 | ||
| @@ -1269,7 +1259,7 @@ static __init int init_kprobe_trace(void) | |||
| 1269 | struct dentry *d_tracer; | 1259 | struct dentry *d_tracer; |
| 1270 | struct dentry *entry; | 1260 | struct dentry *entry; |
| 1271 | 1261 | ||
| 1272 | if (register_module_notifier(&trace_probe_module_nb)) | 1262 | if (register_module_notifier(&trace_kprobe_module_nb)) |
| 1273 | return -EINVAL; | 1263 | return -EINVAL; |
| 1274 | 1264 | ||
| 1275 | d_tracer = tracing_init_dentry(); | 1265 | d_tracer = tracing_init_dentry(); |
| @@ -1309,26 +1299,26 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, | |||
| 1309 | } | 1299 | } |
| 1310 | 1300 | ||
| 1311 | static struct ftrace_event_file * | 1301 | static struct ftrace_event_file * |
| 1312 | find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr) | 1302 | find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) |
| 1313 | { | 1303 | { |
| 1314 | struct ftrace_event_file *file; | 1304 | struct ftrace_event_file *file; |
| 1315 | 1305 | ||
| 1316 | list_for_each_entry(file, &tr->events, list) | 1306 | list_for_each_entry(file, &tr->events, list) |
| 1317 | if (file->event_call == &tp->call) | 1307 | if (file->event_call == &tk->tp.call) |
| 1318 | return file; | 1308 | return file; |
| 1319 | 1309 | ||
| 1320 | return NULL; | 1310 | return NULL; |
| 1321 | } | 1311 | } |
| 1322 | 1312 | ||
| 1323 | /* | 1313 | /* |
| 1324 | * Nobody but us can call enable_trace_probe/disable_trace_probe at this | 1314 | * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this |
| 1325 | * stage, we can do this lockless. | 1315 | * stage, we can do this lockless. |
| 1326 | */ | 1316 | */ |
| 1327 | static __init int kprobe_trace_self_tests_init(void) | 1317 | static __init int kprobe_trace_self_tests_init(void) |
| 1328 | { | 1318 | { |
| 1329 | int ret, warn = 0; | 1319 | int ret, warn = 0; |
| 1330 | int (*target)(int, int, int, int, int, int); | 1320 | int (*target)(int, int, int, int, int, int); |
| 1331 | struct trace_probe *tp; | 1321 | struct trace_kprobe *tk; |
| 1332 | struct ftrace_event_file *file; | 1322 | struct ftrace_event_file *file; |
| 1333 | 1323 | ||
| 1334 | target = kprobe_trace_selftest_target; | 1324 | target = kprobe_trace_selftest_target; |
| @@ -1337,44 +1327,44 @@ static __init int kprobe_trace_self_tests_init(void) | |||
| 1337 | 1327 | ||
| 1338 | ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target " | 1328 | ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target " |
| 1339 | "$stack $stack0 +0($stack)", | 1329 | "$stack $stack0 +0($stack)", |
| 1340 | create_trace_probe); | 1330 | create_trace_kprobe); |
| 1341 | if (WARN_ON_ONCE(ret)) { | 1331 | if (WARN_ON_ONCE(ret)) { |
| 1342 | pr_warn("error on probing function entry.\n"); | 1332 | pr_warn("error on probing function entry.\n"); |
| 1343 | warn++; | 1333 | warn++; |
| 1344 | } else { | 1334 | } else { |
| 1345 | /* Enable trace point */ | 1335 | /* Enable trace point */ |
| 1346 | tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); | 1336 | tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); |
| 1347 | if (WARN_ON_ONCE(tp == NULL)) { | 1337 | if (WARN_ON_ONCE(tk == NULL)) { |
| 1348 | pr_warn("error on getting new probe.\n"); | 1338 | pr_warn("error on getting new probe.\n"); |
| 1349 | warn++; | 1339 | warn++; |
| 1350 | } else { | 1340 | } else { |
| 1351 | file = find_trace_probe_file(tp, top_trace_array()); | 1341 | file = find_trace_probe_file(tk, top_trace_array()); |
| 1352 | if (WARN_ON_ONCE(file == NULL)) { | 1342 | if (WARN_ON_ONCE(file == NULL)) { |
| 1353 | pr_warn("error on getting probe file.\n"); | 1343 | pr_warn("error on getting probe file.\n"); |
| 1354 | warn++; | 1344 | warn++; |
| 1355 | } else | 1345 | } else |
| 1356 | enable_trace_probe(tp, file); | 1346 | enable_trace_kprobe(tk, file); |
| 1357 | } | 1347 | } |
| 1358 | } | 1348 | } |
| 1359 | 1349 | ||
| 1360 | ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " | 1350 | ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " |
| 1361 | "$retval", create_trace_probe); | 1351 | "$retval", create_trace_kprobe); |
| 1362 | if (WARN_ON_ONCE(ret)) { | 1352 | if (WARN_ON_ONCE(ret)) { |
| 1363 | pr_warn("error on probing function return.\n"); | 1353 | pr_warn("error on probing function return.\n"); |
| 1364 | warn++; | 1354 | warn++; |
| 1365 | } else { | 1355 | } else { |
| 1366 | /* Enable trace point */ | 1356 | /* Enable trace point */ |
| 1367 | tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); | 1357 | tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); |
| 1368 | if (WARN_ON_ONCE(tp == NULL)) { | 1358 | if (WARN_ON_ONCE(tk == NULL)) { |
| 1369 | pr_warn("error on getting 2nd new probe.\n"); | 1359 | pr_warn("error on getting 2nd new probe.\n"); |
| 1370 | warn++; | 1360 | warn++; |
| 1371 | } else { | 1361 | } else { |
| 1372 | file = find_trace_probe_file(tp, top_trace_array()); | 1362 | file = find_trace_probe_file(tk, top_trace_array()); |
| 1373 | if (WARN_ON_ONCE(file == NULL)) { | 1363 | if (WARN_ON_ONCE(file == NULL)) { |
| 1374 | pr_warn("error on getting probe file.\n"); | 1364 | pr_warn("error on getting probe file.\n"); |
| 1375 | warn++; | 1365 | warn++; |
| 1376 | } else | 1366 | } else |
| 1377 | enable_trace_probe(tp, file); | 1367 | enable_trace_kprobe(tk, file); |
| 1378 | } | 1368 | } |
| 1379 | } | 1369 | } |
| 1380 | 1370 | ||
| @@ -1384,46 +1374,46 @@ static __init int kprobe_trace_self_tests_init(void) | |||
| 1384 | ret = target(1, 2, 3, 4, 5, 6); | 1374 | ret = target(1, 2, 3, 4, 5, 6); |
| 1385 | 1375 | ||
| 1386 | /* Disable trace points before removing it */ | 1376 | /* Disable trace points before removing it */ |
| 1387 | tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); | 1377 | tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); |
| 1388 | if (WARN_ON_ONCE(tp == NULL)) { | 1378 | if (WARN_ON_ONCE(tk == NULL)) { |
| 1389 | pr_warn("error on getting test probe.\n"); | 1379 | pr_warn("error on getting test probe.\n"); |
| 1390 | warn++; | 1380 | warn++; |
| 1391 | } else { | 1381 | } else { |
| 1392 | file = find_trace_probe_file(tp, top_trace_array()); | 1382 | file = find_trace_probe_file(tk, top_trace_array()); |
| 1393 | if (WARN_ON_ONCE(file == NULL)) { | 1383 | if (WARN_ON_ONCE(file == NULL)) { |
| 1394 | pr_warn("error on getting probe file.\n"); | 1384 | pr_warn("error on getting probe file.\n"); |
| 1395 | warn++; | 1385 | warn++; |
| 1396 | } else | 1386 | } else |
| 1397 | disable_trace_probe(tp, file); | 1387 | disable_trace_kprobe(tk, file); |
| 1398 | } | 1388 | } |
| 1399 | 1389 | ||
| 1400 | tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); | 1390 | tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); |
| 1401 | if (WARN_ON_ONCE(tp == NULL)) { | 1391 | if (WARN_ON_ONCE(tk == NULL)) { |
| 1402 | pr_warn("error on getting 2nd test probe.\n"); | 1392 | pr_warn("error on getting 2nd test probe.\n"); |
| 1403 | warn++; | 1393 | warn++; |
| 1404 | } else { | 1394 | } else { |
| 1405 | file = find_trace_probe_file(tp, top_trace_array()); | 1395 | file = find_trace_probe_file(tk, top_trace_array()); |
| 1406 | if (WARN_ON_ONCE(file == NULL)) { | 1396 | if (WARN_ON_ONCE(file == NULL)) { |
| 1407 | pr_warn("error on getting probe file.\n"); | 1397 | pr_warn("error on getting probe file.\n"); |
| 1408 | warn++; | 1398 | warn++; |
| 1409 | } else | 1399 | } else |
| 1410 | disable_trace_probe(tp, file); | 1400 | disable_trace_kprobe(tk, file); |
| 1411 | } | 1401 | } |
| 1412 | 1402 | ||
| 1413 | ret = traceprobe_command("-:testprobe", create_trace_probe); | 1403 | ret = traceprobe_command("-:testprobe", create_trace_kprobe); |
| 1414 | if (WARN_ON_ONCE(ret)) { | 1404 | if (WARN_ON_ONCE(ret)) { |
| 1415 | pr_warn("error on deleting a probe.\n"); | 1405 | pr_warn("error on deleting a probe.\n"); |
| 1416 | warn++; | 1406 | warn++; |
| 1417 | } | 1407 | } |
| 1418 | 1408 | ||
| 1419 | ret = traceprobe_command("-:testprobe2", create_trace_probe); | 1409 | ret = traceprobe_command("-:testprobe2", create_trace_kprobe); |
| 1420 | if (WARN_ON_ONCE(ret)) { | 1410 | if (WARN_ON_ONCE(ret)) { |
| 1421 | pr_warn("error on deleting a probe.\n"); | 1411 | pr_warn("error on deleting a probe.\n"); |
| 1422 | warn++; | 1412 | warn++; |
| 1423 | } | 1413 | } |
| 1424 | 1414 | ||
| 1425 | end: | 1415 | end: |
| 1426 | release_all_trace_probes(); | 1416 | release_all_trace_kprobes(); |
| 1427 | if (warn) | 1417 | if (warn) |
| 1428 | pr_cont("NG: Some tests are failed. Please check them.\n"); | 1418 | pr_cont("NG: Some tests are failed. Please check them.\n"); |
| 1429 | else | 1419 | else |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 5c7e09d10d74..984e91ed8a44 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
| @@ -124,6 +124,26 @@ struct probe_arg { | |||
| 124 | const struct fetch_type *type; /* Type of this argument */ | 124 | const struct fetch_type *type; /* Type of this argument */ |
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| 127 | struct trace_probe { | ||
| 128 | unsigned int flags; /* For TP_FLAG_* */ | ||
| 129 | struct ftrace_event_class class; | ||
| 130 | struct ftrace_event_call call; | ||
| 131 | struct list_head files; | ||
| 132 | ssize_t size; /* trace entry size */ | ||
| 133 | unsigned int nr_args; | ||
| 134 | struct probe_arg args[]; | ||
| 135 | }; | ||
| 136 | |||
| 137 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) | ||
| 138 | { | ||
| 139 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | ||
| 140 | } | ||
| 141 | |||
| 142 | static inline bool trace_probe_is_registered(struct trace_probe *tp) | ||
| 143 | { | ||
| 144 | return !!(tp->flags & TP_FLAG_REGISTERED); | ||
| 145 | } | ||
| 146 | |||
| 127 | static inline __kprobes void call_fetch(struct fetch_param *fprm, | 147 | static inline __kprobes void call_fetch(struct fetch_param *fprm, |
| 128 | struct pt_regs *regs, void *dest) | 148 | struct pt_regs *regs, void *dest) |
| 129 | { | 149 | { |
