diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Makefile | 1 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 215 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 57 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 193 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 49 | ||||
| -rw-r--r-- | kernel/trace/trace_events_filter.c | 12 | ||||
| -rw-r--r-- | kernel/trace/trace_events_trigger.c | 1437 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 838 | ||||
| -rw-r--r-- | kernel/trace/trace_probe.c | 440 | ||||
| -rw-r--r-- | kernel/trace/trace_probe.h | 224 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 65 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest.c | 33 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 14 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 487 |
16 files changed, 3142 insertions, 927 deletions
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d7e2068e4b71..1378e84fbe39 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -50,6 +50,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y) | |||
| 50 | obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o | 50 | obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o |
| 51 | endif | 51 | endif |
| 52 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 52 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
| 53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o | ||
| 53 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
| 54 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o | 55 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o |
| 55 | ifeq ($(CONFIG_PM_RUNTIME),y) | 56 | ifeq ($(CONFIG_PM_RUNTIME),y) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 72a0f81dc5a8..cd7f76d1eb86 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -85,6 +85,8 @@ int function_trace_stop __read_mostly; | |||
| 85 | 85 | ||
| 86 | /* Current function tracing op */ | 86 | /* Current function tracing op */ |
| 87 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | 87 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; |
| 88 | /* What to set function_trace_op to */ | ||
| 89 | static struct ftrace_ops *set_function_trace_op; | ||
| 88 | 90 | ||
| 89 | /* List for set_ftrace_pid's pids. */ | 91 | /* List for set_ftrace_pid's pids. */ |
| 90 | LIST_HEAD(ftrace_pids); | 92 | LIST_HEAD(ftrace_pids); |
| @@ -278,6 +280,29 @@ static void update_global_ops(void) | |||
| 278 | global_ops.func = func; | 280 | global_ops.func = func; |
| 279 | } | 281 | } |
| 280 | 282 | ||
| 283 | static void ftrace_sync(struct work_struct *work) | ||
| 284 | { | ||
| 285 | /* | ||
| 286 | * This function is just a stub to implement a hard force | ||
| 287 | * of synchronize_sched(). This requires synchronizing | ||
| 288 | * tasks even in userspace and idle. | ||
| 289 | * | ||
| 290 | * Yes, function tracing is rude. | ||
| 291 | */ | ||
| 292 | } | ||
| 293 | |||
| 294 | static void ftrace_sync_ipi(void *data) | ||
| 295 | { | ||
| 296 | /* Probably not needed, but do it anyway */ | ||
| 297 | smp_rmb(); | ||
| 298 | } | ||
| 299 | |||
| 300 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
| 301 | static void update_function_graph_func(void); | ||
| 302 | #else | ||
| 303 | static inline void update_function_graph_func(void) { } | ||
| 304 | #endif | ||
| 305 | |||
| 281 | static void update_ftrace_function(void) | 306 | static void update_ftrace_function(void) |
| 282 | { | 307 | { |
| 283 | ftrace_func_t func; | 308 | ftrace_func_t func; |
| @@ -296,16 +321,61 @@ static void update_ftrace_function(void) | |||
| 296 | !FTRACE_FORCE_LIST_FUNC)) { | 321 | !FTRACE_FORCE_LIST_FUNC)) { |
| 297 | /* Set the ftrace_ops that the arch callback uses */ | 322 | /* Set the ftrace_ops that the arch callback uses */ |
| 298 | if (ftrace_ops_list == &global_ops) | 323 | if (ftrace_ops_list == &global_ops) |
| 299 | function_trace_op = ftrace_global_list; | 324 | set_function_trace_op = ftrace_global_list; |
| 300 | else | 325 | else |
| 301 | function_trace_op = ftrace_ops_list; | 326 | set_function_trace_op = ftrace_ops_list; |
| 302 | func = ftrace_ops_list->func; | 327 | func = ftrace_ops_list->func; |
| 303 | } else { | 328 | } else { |
| 304 | /* Just use the default ftrace_ops */ | 329 | /* Just use the default ftrace_ops */ |
| 305 | function_trace_op = &ftrace_list_end; | 330 | set_function_trace_op = &ftrace_list_end; |
| 306 | func = ftrace_ops_list_func; | 331 | func = ftrace_ops_list_func; |
| 307 | } | 332 | } |
| 308 | 333 | ||
| 334 | /* If there's no change, then do nothing more here */ | ||
| 335 | if (ftrace_trace_function == func) | ||
| 336 | return; | ||
| 337 | |||
| 338 | update_function_graph_func(); | ||
| 339 | |||
| 340 | /* | ||
| 341 | * If we are using the list function, it doesn't care | ||
| 342 | * about the function_trace_ops. | ||
| 343 | */ | ||
| 344 | if (func == ftrace_ops_list_func) { | ||
| 345 | ftrace_trace_function = func; | ||
| 346 | /* | ||
| 347 | * Don't even bother setting function_trace_ops, | ||
| 348 | * it would be racy to do so anyway. | ||
| 349 | */ | ||
| 350 | return; | ||
| 351 | } | ||
| 352 | |||
| 353 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
| 354 | /* | ||
| 355 | * For static tracing, we need to be a bit more careful. | ||
| 356 | * The function change takes affect immediately. Thus, | ||
| 357 | * we need to coorditate the setting of the function_trace_ops | ||
| 358 | * with the setting of the ftrace_trace_function. | ||
| 359 | * | ||
| 360 | * Set the function to the list ops, which will call the | ||
| 361 | * function we want, albeit indirectly, but it handles the | ||
| 362 | * ftrace_ops and doesn't depend on function_trace_op. | ||
| 363 | */ | ||
| 364 | ftrace_trace_function = ftrace_ops_list_func; | ||
| 365 | /* | ||
| 366 | * Make sure all CPUs see this. Yes this is slow, but static | ||
| 367 | * tracing is slow and nasty to have enabled. | ||
| 368 | */ | ||
| 369 | schedule_on_each_cpu(ftrace_sync); | ||
| 370 | /* Now all cpus are using the list ops. */ | ||
| 371 | function_trace_op = set_function_trace_op; | ||
| 372 | /* Make sure the function_trace_op is visible on all CPUs */ | ||
| 373 | smp_wmb(); | ||
| 374 | /* Nasty way to force a rmb on all cpus */ | ||
| 375 | smp_call_function(ftrace_sync_ipi, NULL, 1); | ||
| 376 | /* OK, we are all set to update the ftrace_trace_function now! */ | ||
| 377 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | ||
| 378 | |||
| 309 | ftrace_trace_function = func; | 379 | ftrace_trace_function = func; |
| 310 | } | 380 | } |
| 311 | 381 | ||
| @@ -410,17 +480,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
| 410 | return 0; | 480 | return 0; |
| 411 | } | 481 | } |
| 412 | 482 | ||
| 413 | static void ftrace_sync(struct work_struct *work) | ||
| 414 | { | ||
| 415 | /* | ||
| 416 | * This function is just a stub to implement a hard force | ||
| 417 | * of synchronize_sched(). This requires synchronizing | ||
| 418 | * tasks even in userspace and idle. | ||
| 419 | * | ||
| 420 | * Yes, function tracing is rude. | ||
| 421 | */ | ||
| 422 | } | ||
| 423 | |||
| 424 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 483 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
| 425 | { | 484 | { |
| 426 | int ret; | 485 | int ret; |
| @@ -439,20 +498,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 439 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { | 498 | } else if (ops->flags & FTRACE_OPS_FL_CONTROL) { |
| 440 | ret = remove_ftrace_list_ops(&ftrace_control_list, | 499 | ret = remove_ftrace_list_ops(&ftrace_control_list, |
| 441 | &control_ops, ops); | 500 | &control_ops, ops); |
| 442 | if (!ret) { | ||
| 443 | /* | ||
| 444 | * The ftrace_ops is now removed from the list, | ||
| 445 | * so there'll be no new users. We must ensure | ||
| 446 | * all current users are done before we free | ||
| 447 | * the control data. | ||
| 448 | * Note synchronize_sched() is not enough, as we | ||
| 449 | * use preempt_disable() to do RCU, but the function | ||
| 450 | * tracer can be called where RCU is not active | ||
| 451 | * (before user_exit()). | ||
| 452 | */ | ||
| 453 | schedule_on_each_cpu(ftrace_sync); | ||
| 454 | control_ops_free(ops); | ||
| 455 | } | ||
| 456 | } else | 501 | } else |
| 457 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); | 502 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); |
| 458 | 503 | ||
| @@ -462,17 +507,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 462 | if (ftrace_enabled) | 507 | if (ftrace_enabled) |
| 463 | update_ftrace_function(); | 508 | update_ftrace_function(); |
| 464 | 509 | ||
| 465 | /* | ||
| 466 | * Dynamic ops may be freed, we must make sure that all | ||
| 467 | * callers are done before leaving this function. | ||
| 468 | * | ||
| 469 | * Again, normal synchronize_sched() is not good enough. | ||
| 470 | * We need to do a hard force of sched synchronization. | ||
| 471 | */ | ||
| 472 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
| 473 | schedule_on_each_cpu(ftrace_sync); | ||
| 474 | |||
| 475 | |||
| 476 | return 0; | 510 | return 0; |
| 477 | } | 511 | } |
| 478 | 512 | ||
| @@ -1082,19 +1116,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
| 1082 | 1116 | ||
| 1083 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1117 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
| 1084 | 1118 | ||
| 1085 | loff_t | ||
| 1086 | ftrace_filter_lseek(struct file *file, loff_t offset, int whence) | ||
| 1087 | { | ||
| 1088 | loff_t ret; | ||
| 1089 | |||
| 1090 | if (file->f_mode & FMODE_READ) | ||
| 1091 | ret = seq_lseek(file, offset, whence); | ||
| 1092 | else | ||
| 1093 | file->f_pos = ret = 1; | ||
| 1094 | |||
| 1095 | return ret; | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | #ifdef CONFIG_DYNAMIC_FTRACE | 1119 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 1099 | 1120 | ||
| 1100 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 1121 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
| @@ -1992,8 +2013,14 @@ void ftrace_modify_all_code(int command) | |||
| 1992 | else if (command & FTRACE_DISABLE_CALLS) | 2013 | else if (command & FTRACE_DISABLE_CALLS) |
| 1993 | ftrace_replace_code(0); | 2014 | ftrace_replace_code(0); |
| 1994 | 2015 | ||
| 1995 | if (update && ftrace_trace_function != ftrace_ops_list_func) | 2016 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
| 2017 | function_trace_op = set_function_trace_op; | ||
| 2018 | smp_wmb(); | ||
| 2019 | /* If irqs are disabled, we are in stop machine */ | ||
| 2020 | if (!irqs_disabled()) | ||
| 2021 | smp_call_function(ftrace_sync_ipi, NULL, 1); | ||
| 1996 | ftrace_update_ftrace_func(ftrace_trace_function); | 2022 | ftrace_update_ftrace_func(ftrace_trace_function); |
| 2023 | } | ||
| 1997 | 2024 | ||
| 1998 | if (command & FTRACE_START_FUNC_RET) | 2025 | if (command & FTRACE_START_FUNC_RET) |
| 1999 | ftrace_enable_ftrace_graph_caller(); | 2026 | ftrace_enable_ftrace_graph_caller(); |
| @@ -2156,10 +2183,41 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
| 2156 | command |= FTRACE_UPDATE_TRACE_FUNC; | 2183 | command |= FTRACE_UPDATE_TRACE_FUNC; |
| 2157 | } | 2184 | } |
| 2158 | 2185 | ||
| 2159 | if (!command || !ftrace_enabled) | 2186 | if (!command || !ftrace_enabled) { |
| 2187 | /* | ||
| 2188 | * If these are control ops, they still need their | ||
| 2189 | * per_cpu field freed. Since, function tracing is | ||
| 2190 | * not currently active, we can just free them | ||
| 2191 | * without synchronizing all CPUs. | ||
| 2192 | */ | ||
| 2193 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | ||
| 2194 | control_ops_free(ops); | ||
| 2160 | return 0; | 2195 | return 0; |
| 2196 | } | ||
| 2161 | 2197 | ||
| 2162 | ftrace_run_update_code(command); | 2198 | ftrace_run_update_code(command); |
| 2199 | |||
| 2200 | /* | ||
| 2201 | * Dynamic ops may be freed, we must make sure that all | ||
| 2202 | * callers are done before leaving this function. | ||
| 2203 | * The same goes for freeing the per_cpu data of the control | ||
| 2204 | * ops. | ||
| 2205 | * | ||
| 2206 | * Again, normal synchronize_sched() is not good enough. | ||
| 2207 | * We need to do a hard force of sched synchronization. | ||
| 2208 | * This is because we use preempt_disable() to do RCU, but | ||
| 2209 | * the function tracers can be called where RCU is not watching | ||
| 2210 | * (like before user_exit()). We can not rely on the RCU | ||
| 2211 | * infrastructure to do the synchronization, thus we must do it | ||
| 2212 | * ourselves. | ||
| 2213 | */ | ||
| 2214 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { | ||
| 2215 | schedule_on_each_cpu(ftrace_sync); | ||
| 2216 | |||
| 2217 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | ||
| 2218 | control_ops_free(ops); | ||
| 2219 | } | ||
| 2220 | |||
| 2163 | return 0; | 2221 | return 0; |
| 2164 | } | 2222 | } |
| 2165 | 2223 | ||
| @@ -2739,7 +2797,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash) | |||
| 2739 | * routine, you can use ftrace_filter_write() for the write | 2797 | * routine, you can use ftrace_filter_write() for the write |
| 2740 | * routine if @flag has FTRACE_ITER_FILTER set, or | 2798 | * routine if @flag has FTRACE_ITER_FILTER set, or |
| 2741 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. | 2799 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. |
| 2742 | * ftrace_filter_lseek() should be used as the lseek routine, and | 2800 | * tracing_lseek() should be used as the lseek routine, and |
| 2743 | * release must call ftrace_regex_release(). | 2801 | * release must call ftrace_regex_release(). |
| 2744 | */ | 2802 | */ |
| 2745 | int | 2803 | int |
| @@ -3767,7 +3825,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
| 3767 | .open = ftrace_filter_open, | 3825 | .open = ftrace_filter_open, |
| 3768 | .read = seq_read, | 3826 | .read = seq_read, |
| 3769 | .write = ftrace_filter_write, | 3827 | .write = ftrace_filter_write, |
| 3770 | .llseek = ftrace_filter_lseek, | 3828 | .llseek = tracing_lseek, |
| 3771 | .release = ftrace_regex_release, | 3829 | .release = ftrace_regex_release, |
| 3772 | }; | 3830 | }; |
| 3773 | 3831 | ||
| @@ -3775,7 +3833,7 @@ static const struct file_operations ftrace_notrace_fops = { | |||
| 3775 | .open = ftrace_notrace_open, | 3833 | .open = ftrace_notrace_open, |
| 3776 | .read = seq_read, | 3834 | .read = seq_read, |
| 3777 | .write = ftrace_notrace_write, | 3835 | .write = ftrace_notrace_write, |
| 3778 | .llseek = ftrace_filter_lseek, | 3836 | .llseek = tracing_lseek, |
| 3779 | .release = ftrace_regex_release, | 3837 | .release = ftrace_regex_release, |
| 3780 | }; | 3838 | }; |
| 3781 | 3839 | ||
| @@ -4038,7 +4096,7 @@ static const struct file_operations ftrace_graph_fops = { | |||
| 4038 | .open = ftrace_graph_open, | 4096 | .open = ftrace_graph_open, |
| 4039 | .read = seq_read, | 4097 | .read = seq_read, |
| 4040 | .write = ftrace_graph_write, | 4098 | .write = ftrace_graph_write, |
| 4041 | .llseek = ftrace_filter_lseek, | 4099 | .llseek = tracing_lseek, |
| 4042 | .release = ftrace_graph_release, | 4100 | .release = ftrace_graph_release, |
| 4043 | }; | 4101 | }; |
| 4044 | 4102 | ||
| @@ -4046,7 +4104,7 @@ static const struct file_operations ftrace_graph_notrace_fops = { | |||
| 4046 | .open = ftrace_graph_notrace_open, | 4104 | .open = ftrace_graph_notrace_open, |
| 4047 | .read = seq_read, | 4105 | .read = seq_read, |
| 4048 | .write = ftrace_graph_write, | 4106 | .write = ftrace_graph_write, |
| 4049 | .llseek = ftrace_filter_lseek, | 4107 | .llseek = tracing_lseek, |
| 4050 | .release = ftrace_graph_release, | 4108 | .release = ftrace_graph_release, |
| 4051 | }; | 4109 | }; |
| 4052 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4110 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| @@ -4719,7 +4777,7 @@ static const struct file_operations ftrace_pid_fops = { | |||
| 4719 | .open = ftrace_pid_open, | 4777 | .open = ftrace_pid_open, |
| 4720 | .write = ftrace_pid_write, | 4778 | .write = ftrace_pid_write, |
| 4721 | .read = seq_read, | 4779 | .read = seq_read, |
| 4722 | .llseek = ftrace_filter_lseek, | 4780 | .llseek = tracing_lseek, |
| 4723 | .release = ftrace_pid_release, | 4781 | .release = ftrace_pid_release, |
| 4724 | }; | 4782 | }; |
| 4725 | 4783 | ||
| @@ -4862,6 +4920,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | |||
| 4862 | trace_func_graph_ret_t ftrace_graph_return = | 4920 | trace_func_graph_ret_t ftrace_graph_return = |
| 4863 | (trace_func_graph_ret_t)ftrace_stub; | 4921 | (trace_func_graph_ret_t)ftrace_stub; |
| 4864 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; | 4922 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
| 4923 | static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; | ||
| 4865 | 4924 | ||
| 4866 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | 4925 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
| 4867 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | 4926 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) |
| @@ -5003,6 +5062,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = { | |||
| 5003 | FTRACE_OPS_FL_RECURSION_SAFE, | 5062 | FTRACE_OPS_FL_RECURSION_SAFE, |
| 5004 | }; | 5063 | }; |
| 5005 | 5064 | ||
| 5065 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | ||
| 5066 | { | ||
| 5067 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) | ||
| 5068 | return 0; | ||
| 5069 | return __ftrace_graph_entry(trace); | ||
| 5070 | } | ||
| 5071 | |||
| 5072 | /* | ||
| 5073 | * The function graph tracer should only trace the functions defined | ||
| 5074 | * by set_ftrace_filter and set_ftrace_notrace. If another function | ||
| 5075 | * tracer ops is registered, the graph tracer requires testing the | ||
| 5076 | * function against the global ops, and not just trace any function | ||
| 5077 | * that any ftrace_ops registered. | ||
| 5078 | */ | ||
| 5079 | static void update_function_graph_func(void) | ||
| 5080 | { | ||
| 5081 | if (ftrace_ops_list == &ftrace_list_end || | ||
| 5082 | (ftrace_ops_list == &global_ops && | ||
| 5083 | global_ops.next == &ftrace_list_end)) | ||
| 5084 | ftrace_graph_entry = __ftrace_graph_entry; | ||
| 5085 | else | ||
| 5086 | ftrace_graph_entry = ftrace_graph_entry_test; | ||
| 5087 | } | ||
| 5088 | |||
| 5006 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 5089 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
| 5007 | trace_func_graph_ent_t entryfunc) | 5090 | trace_func_graph_ent_t entryfunc) |
| 5008 | { | 5091 | { |
| @@ -5027,7 +5110,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
| 5027 | } | 5110 | } |
| 5028 | 5111 | ||
| 5029 | ftrace_graph_return = retfunc; | 5112 | ftrace_graph_return = retfunc; |
| 5030 | ftrace_graph_entry = entryfunc; | 5113 | |
| 5114 | /* | ||
| 5115 | * Update the indirect function to the entryfunc, and the | ||
| 5116 | * function that gets called to the entry_test first. Then | ||
| 5117 | * call the update fgraph entry function to determine if | ||
| 5118 | * the entryfunc should be called directly or not. | ||
| 5119 | */ | ||
| 5120 | __ftrace_graph_entry = entryfunc; | ||
| 5121 | ftrace_graph_entry = ftrace_graph_entry_test; | ||
| 5122 | update_function_graph_func(); | ||
| 5031 | 5123 | ||
| 5032 | ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); | 5124 | ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET); |
| 5033 | 5125 | ||
| @@ -5046,6 +5138,7 @@ void unregister_ftrace_graph(void) | |||
| 5046 | ftrace_graph_active--; | 5138 | ftrace_graph_active--; |
| 5047 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 5139 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
| 5048 | ftrace_graph_entry = ftrace_graph_entry_stub; | 5140 | ftrace_graph_entry = ftrace_graph_entry_stub; |
| 5141 | __ftrace_graph_entry = ftrace_graph_entry_stub; | ||
| 5049 | ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); | 5142 | ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET); |
| 5050 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5143 | unregister_pm_notifier(&ftrace_suspend_notifier); |
| 5051 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5144 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index cc2f66f68dc5..294b8a271a04 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -2558,7 +2558,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
| 2558 | if (unlikely(test_time_stamp(delta))) { | 2558 | if (unlikely(test_time_stamp(delta))) { |
| 2559 | int local_clock_stable = 1; | 2559 | int local_clock_stable = 1; |
| 2560 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 2560 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 2561 | local_clock_stable = sched_clock_stable; | 2561 | local_clock_stable = sched_clock_stable(); |
| 2562 | #endif | 2562 | #endif |
| 2563 | WARN_ONCE(delta > (1ULL << 59), | 2563 | WARN_ONCE(delta > (1ULL << 59), |
| 2564 | KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", | 2564 | KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9d20cd9743ef..20c755e018ca 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -595,6 +595,28 @@ void free_snapshot(struct trace_array *tr) | |||
| 595 | } | 595 | } |
| 596 | 596 | ||
| 597 | /** | 597 | /** |
| 598 | * tracing_alloc_snapshot - allocate snapshot buffer. | ||
| 599 | * | ||
| 600 | * This only allocates the snapshot buffer if it isn't already | ||
| 601 | * allocated - it doesn't also take a snapshot. | ||
| 602 | * | ||
| 603 | * This is meant to be used in cases where the snapshot buffer needs | ||
| 604 | * to be set up for events that can't sleep but need to be able to | ||
| 605 | * trigger a snapshot. | ||
| 606 | */ | ||
| 607 | int tracing_alloc_snapshot(void) | ||
| 608 | { | ||
| 609 | struct trace_array *tr = &global_trace; | ||
| 610 | int ret; | ||
| 611 | |||
| 612 | ret = alloc_snapshot(tr); | ||
| 613 | WARN_ON(ret < 0); | ||
| 614 | |||
| 615 | return ret; | ||
| 616 | } | ||
| 617 | EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); | ||
| 618 | |||
| 619 | /** | ||
| 598 | * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. | 620 | * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. |
| 599 | * | 621 | * |
| 600 | * This is similar to trace_snapshot(), but it will allocate the | 622 | * This is similar to trace_snapshot(), but it will allocate the |
| @@ -607,11 +629,10 @@ void free_snapshot(struct trace_array *tr) | |||
| 607 | */ | 629 | */ |
| 608 | void tracing_snapshot_alloc(void) | 630 | void tracing_snapshot_alloc(void) |
| 609 | { | 631 | { |
| 610 | struct trace_array *tr = &global_trace; | ||
| 611 | int ret; | 632 | int ret; |
| 612 | 633 | ||
| 613 | ret = alloc_snapshot(tr); | 634 | ret = tracing_alloc_snapshot(); |
| 614 | if (WARN_ON(ret < 0)) | 635 | if (ret < 0) |
| 615 | return; | 636 | return; |
| 616 | 637 | ||
| 617 | tracing_snapshot(); | 638 | tracing_snapshot(); |
| @@ -623,6 +644,12 @@ void tracing_snapshot(void) | |||
| 623 | WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); | 644 | WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); |
| 624 | } | 645 | } |
| 625 | EXPORT_SYMBOL_GPL(tracing_snapshot); | 646 | EXPORT_SYMBOL_GPL(tracing_snapshot); |
| 647 | int tracing_alloc_snapshot(void) | ||
| 648 | { | ||
| 649 | WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); | ||
| 650 | return -ENODEV; | ||
| 651 | } | ||
| 652 | EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); | ||
| 626 | void tracing_snapshot_alloc(void) | 653 | void tracing_snapshot_alloc(void) |
| 627 | { | 654 | { |
| 628 | /* Give warning */ | 655 | /* Give warning */ |
| @@ -3156,19 +3183,23 @@ tracing_write_stub(struct file *filp, const char __user *ubuf, | |||
| 3156 | return count; | 3183 | return count; |
| 3157 | } | 3184 | } |
| 3158 | 3185 | ||
| 3159 | static loff_t tracing_seek(struct file *file, loff_t offset, int origin) | 3186 | loff_t tracing_lseek(struct file *file, loff_t offset, int whence) |
| 3160 | { | 3187 | { |
| 3188 | int ret; | ||
| 3189 | |||
| 3161 | if (file->f_mode & FMODE_READ) | 3190 | if (file->f_mode & FMODE_READ) |
| 3162 | return seq_lseek(file, offset, origin); | 3191 | ret = seq_lseek(file, offset, whence); |
| 3163 | else | 3192 | else |
| 3164 | return 0; | 3193 | file->f_pos = ret = 0; |
| 3194 | |||
| 3195 | return ret; | ||
| 3165 | } | 3196 | } |
| 3166 | 3197 | ||
| 3167 | static const struct file_operations tracing_fops = { | 3198 | static const struct file_operations tracing_fops = { |
| 3168 | .open = tracing_open, | 3199 | .open = tracing_open, |
| 3169 | .read = seq_read, | 3200 | .read = seq_read, |
| 3170 | .write = tracing_write_stub, | 3201 | .write = tracing_write_stub, |
| 3171 | .llseek = tracing_seek, | 3202 | .llseek = tracing_lseek, |
| 3172 | .release = tracing_release, | 3203 | .release = tracing_release, |
| 3173 | }; | 3204 | }; |
| 3174 | 3205 | ||
| @@ -4212,12 +4243,6 @@ out: | |||
| 4212 | return sret; | 4243 | return sret; |
| 4213 | } | 4244 | } |
| 4214 | 4245 | ||
| 4215 | static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, | ||
| 4216 | struct pipe_buffer *buf) | ||
| 4217 | { | ||
| 4218 | __free_page(buf->page); | ||
| 4219 | } | ||
| 4220 | |||
| 4221 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | 4246 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, |
| 4222 | unsigned int idx) | 4247 | unsigned int idx) |
| 4223 | { | 4248 | { |
| @@ -4229,7 +4254,7 @@ static const struct pipe_buf_operations tracing_pipe_buf_ops = { | |||
| 4229 | .map = generic_pipe_buf_map, | 4254 | .map = generic_pipe_buf_map, |
| 4230 | .unmap = generic_pipe_buf_unmap, | 4255 | .unmap = generic_pipe_buf_unmap, |
| 4231 | .confirm = generic_pipe_buf_confirm, | 4256 | .confirm = generic_pipe_buf_confirm, |
| 4232 | .release = tracing_pipe_buf_release, | 4257 | .release = generic_pipe_buf_release, |
| 4233 | .steal = generic_pipe_buf_steal, | 4258 | .steal = generic_pipe_buf_steal, |
| 4234 | .get = generic_pipe_buf_get, | 4259 | .get = generic_pipe_buf_get, |
| 4235 | }; | 4260 | }; |
| @@ -4913,7 +4938,7 @@ static const struct file_operations snapshot_fops = { | |||
| 4913 | .open = tracing_snapshot_open, | 4938 | .open = tracing_snapshot_open, |
| 4914 | .read = seq_read, | 4939 | .read = seq_read, |
| 4915 | .write = tracing_snapshot_write, | 4940 | .write = tracing_snapshot_write, |
| 4916 | .llseek = tracing_seek, | 4941 | .llseek = tracing_lseek, |
| 4917 | .release = tracing_snapshot_release, | 4942 | .release = tracing_snapshot_release, |
| 4918 | }; | 4943 | }; |
| 4919 | 4944 | ||
| @@ -5883,6 +5908,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size | |||
| 5883 | 5908 | ||
| 5884 | rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; | 5909 | rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; |
| 5885 | 5910 | ||
| 5911 | buf->tr = tr; | ||
| 5912 | |||
| 5886 | buf->buffer = ring_buffer_alloc(size, rb_flags); | 5913 | buf->buffer = ring_buffer_alloc(size, rb_flags); |
| 5887 | if (!buf->buffer) | 5914 | if (!buf->buffer) |
| 5888 | return -ENOMEM; | 5915 | return -ENOMEM; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ea189e027b80..02b592f2d4b7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | |||
| 1 | #ifndef _LINUX_KERNEL_TRACE_H | 2 | #ifndef _LINUX_KERNEL_TRACE_H |
| 2 | #define _LINUX_KERNEL_TRACE_H | 3 | #define _LINUX_KERNEL_TRACE_H |
| 3 | 4 | ||
| @@ -587,6 +588,8 @@ void tracing_start_sched_switch_record(void); | |||
| 587 | int register_tracer(struct tracer *type); | 588 | int register_tracer(struct tracer *type); |
| 588 | int is_tracing_stopped(void); | 589 | int is_tracing_stopped(void); |
| 589 | 590 | ||
| 591 | loff_t tracing_lseek(struct file *file, loff_t offset, int whence); | ||
| 592 | |||
| 590 | extern cpumask_var_t __read_mostly tracing_buffer_mask; | 593 | extern cpumask_var_t __read_mostly tracing_buffer_mask; |
| 591 | 594 | ||
| 592 | #define for_each_tracing_cpu(cpu) \ | 595 | #define for_each_tracing_cpu(cpu) \ |
| @@ -1020,6 +1023,10 @@ extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | |||
| 1020 | extern void print_subsystem_event_filter(struct event_subsystem *system, | 1023 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
| 1021 | struct trace_seq *s); | 1024 | struct trace_seq *s); |
| 1022 | extern int filter_assign_type(const char *type); | 1025 | extern int filter_assign_type(const char *type); |
| 1026 | extern int create_event_filter(struct ftrace_event_call *call, | ||
| 1027 | char *filter_str, bool set_str, | ||
| 1028 | struct event_filter **filterp); | ||
| 1029 | extern void free_event_filter(struct event_filter *filter); | ||
| 1023 | 1030 | ||
| 1024 | struct ftrace_event_field * | 1031 | struct ftrace_event_field * |
| 1025 | trace_find_event_field(struct ftrace_event_call *call, char *name); | 1032 | trace_find_event_field(struct ftrace_event_call *call, char *name); |
| @@ -1028,9 +1035,195 @@ extern void trace_event_enable_cmd_record(bool enable); | |||
| 1028 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); | 1035 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
| 1029 | extern int event_trace_del_tracer(struct trace_array *tr); | 1036 | extern int event_trace_del_tracer(struct trace_array *tr); |
| 1030 | 1037 | ||
| 1038 | extern struct ftrace_event_file *find_event_file(struct trace_array *tr, | ||
| 1039 | const char *system, | ||
| 1040 | const char *event); | ||
| 1041 | |||
| 1042 | static inline void *event_file_data(struct file *filp) | ||
| 1043 | { | ||
| 1044 | return ACCESS_ONCE(file_inode(filp)->i_private); | ||
| 1045 | } | ||
| 1046 | |||
| 1031 | extern struct mutex event_mutex; | 1047 | extern struct mutex event_mutex; |
| 1032 | extern struct list_head ftrace_events; | 1048 | extern struct list_head ftrace_events; |
| 1033 | 1049 | ||
| 1050 | extern const struct file_operations event_trigger_fops; | ||
| 1051 | |||
| 1052 | extern int register_trigger_cmds(void); | ||
| 1053 | extern void clear_event_triggers(struct trace_array *tr); | ||
| 1054 | |||
| 1055 | struct event_trigger_data { | ||
| 1056 | unsigned long count; | ||
| 1057 | int ref; | ||
| 1058 | struct event_trigger_ops *ops; | ||
| 1059 | struct event_command *cmd_ops; | ||
| 1060 | struct event_filter __rcu *filter; | ||
| 1061 | char *filter_str; | ||
| 1062 | void *private_data; | ||
| 1063 | struct list_head list; | ||
| 1064 | }; | ||
| 1065 | |||
| 1066 | /** | ||
| 1067 | * struct event_trigger_ops - callbacks for trace event triggers | ||
| 1068 | * | ||
| 1069 | * The methods in this structure provide per-event trigger hooks for | ||
| 1070 | * various trigger operations. | ||
| 1071 | * | ||
| 1072 | * All the methods below, except for @init() and @free(), must be | ||
| 1073 | * implemented. | ||
| 1074 | * | ||
| 1075 | * @func: The trigger 'probe' function called when the triggering | ||
| 1076 | * event occurs. The data passed into this callback is the data | ||
| 1077 | * that was supplied to the event_command @reg() function that | ||
| 1078 | * registered the trigger (see struct event_command). | ||
| 1079 | * | ||
| 1080 | * @init: An optional initialization function called for the trigger | ||
| 1081 | * when the trigger is registered (via the event_command reg() | ||
| 1082 | * function). This can be used to perform per-trigger | ||
| 1083 | * initialization such as incrementing a per-trigger reference | ||
| 1084 | * count, for instance. This is usually implemented by the | ||
| 1085 | * generic utility function @event_trigger_init() (see | ||
| 1086 | * trace_event_triggers.c). | ||
| 1087 | * | ||
| 1088 | * @free: An optional de-initialization function called for the | ||
| 1089 | * trigger when the trigger is unregistered (via the | ||
| 1090 | * event_command @reg() function). This can be used to perform | ||
| 1091 | * per-trigger de-initialization such as decrementing a | ||
| 1092 | * per-trigger reference count and freeing corresponding trigger | ||
| 1093 | * data, for instance. This is usually implemented by the | ||
| 1094 | * generic utility function @event_trigger_free() (see | ||
| 1095 | * trace_event_triggers.c). | ||
| 1096 | * | ||
| 1097 | * @print: The callback function invoked to have the trigger print | ||
| 1098 | * itself. This is usually implemented by a wrapper function | ||
| 1099 | * that calls the generic utility function @event_trigger_print() | ||
| 1100 | * (see trace_event_triggers.c). | ||
| 1101 | */ | ||
| 1102 | struct event_trigger_ops { | ||
| 1103 | void (*func)(struct event_trigger_data *data); | ||
| 1104 | int (*init)(struct event_trigger_ops *ops, | ||
| 1105 | struct event_trigger_data *data); | ||
| 1106 | void (*free)(struct event_trigger_ops *ops, | ||
| 1107 | struct event_trigger_data *data); | ||
| 1108 | int (*print)(struct seq_file *m, | ||
| 1109 | struct event_trigger_ops *ops, | ||
| 1110 | struct event_trigger_data *data); | ||
| 1111 | }; | ||
| 1112 | |||
| 1113 | /** | ||
| 1114 | * struct event_command - callbacks and data members for event commands | ||
| 1115 | * | ||
| 1116 | * Event commands are invoked by users by writing the command name | ||
| 1117 | * into the 'trigger' file associated with a trace event. The | ||
| 1118 | * parameters associated with a specific invocation of an event | ||
| 1119 | * command are used to create an event trigger instance, which is | ||
| 1120 | * added to the list of trigger instances associated with that trace | ||
| 1121 | * event. When the event is hit, the set of triggers associated with | ||
| 1122 | * that event is invoked. | ||
| 1123 | * | ||
| 1124 | * The data members in this structure provide per-event command data | ||
| 1125 | * for various event commands. | ||
| 1126 | * | ||
| 1127 | * All the data members below, except for @post_trigger, must be set | ||
| 1128 | * for each event command. | ||
| 1129 | * | ||
| 1130 | * @name: The unique name that identifies the event command. This is | ||
| 1131 | * the name used when setting triggers via trigger files. | ||
| 1132 | * | ||
| 1133 | * @trigger_type: A unique id that identifies the event command | ||
| 1134 | * 'type'. This value has two purposes, the first to ensure that | ||
| 1135 | * only one trigger of the same type can be set at a given time | ||
| 1136 | * for a particular event e.g. it doesn't make sense to have both | ||
| 1137 | * a traceon and traceoff trigger attached to a single event at | ||
| 1138 | * the same time, so traceon and traceoff have the same type | ||
| 1139 | * though they have different names. The @trigger_type value is | ||
| 1140 | * also used as a bit value for deferring the actual trigger | ||
| 1141 | * action until after the current event is finished. Some | ||
| 1142 | * commands need to do this if they themselves log to the trace | ||
| 1143 | * buffer (see the @post_trigger() member below). @trigger_type | ||
| 1144 | * values are defined by adding new values to the trigger_type | ||
| 1145 | * enum in include/linux/ftrace_event.h. | ||
| 1146 | * | ||
| 1147 | * @post_trigger: A flag that says whether or not this command needs | ||
| 1148 | * to have its action delayed until after the current event has | ||
| 1149 | * been closed. Some triggers need to avoid being invoked while | ||
| 1150 | * an event is currently in the process of being logged, since | ||
| 1151 | * the trigger may itself log data into the trace buffer. Thus | ||
| 1152 | * we make sure the current event is committed before invoking | ||
| 1153 | * those triggers. To do that, the trigger invocation is split | ||
| 1154 | * in two - the first part checks the filter using the current | ||
| 1155 | * trace record; if a command has the @post_trigger flag set, it | ||
| 1156 | * sets a bit for itself in the return value, otherwise it | ||
| 1157 | * directly invokes the trigger. Once all commands have been | ||
| 1158 | * either invoked or set their return flag, the current record is | ||
| 1159 | * either committed or discarded. At that point, if any commands | ||
| 1160 | * have deferred their triggers, those commands are finally | ||
| 1161 | * invoked following the close of the current event. In other | ||
| 1162 | * words, if the event_trigger_ops @func() probe implementation | ||
| 1163 | * itself logs to the trace buffer, this flag should be set, | ||
| 1164 | * otherwise it can be left unspecified. | ||
| 1165 | * | ||
| 1166 | * All the methods below, except for @set_filter(), must be | ||
| 1167 | * implemented. | ||
| 1168 | * | ||
| 1169 | * @func: The callback function responsible for parsing and | ||
| 1170 | * registering the trigger written to the 'trigger' file by the | ||
| 1171 | * user. It allocates the trigger instance and registers it with | ||
| 1172 | * the appropriate trace event. It makes use of the other | ||
| 1173 | * event_command callback functions to orchestrate this, and is | ||
| 1174 | * usually implemented by the generic utility function | ||
| 1175 | * @event_trigger_callback() (see trace_event_triggers.c). | ||
| 1176 | * | ||
| 1177 | * @reg: Adds the trigger to the list of triggers associated with the | ||
| 1178 | * event, and enables the event trigger itself, after | ||
| 1179 | * initializing it (via the event_trigger_ops @init() function). | ||
| 1180 | * This is also where commands can use the @trigger_type value to | ||
| 1181 | * make the decision as to whether or not multiple instances of | ||
| 1182 | * the trigger should be allowed. This is usually implemented by | ||
| 1183 | * the generic utility function @register_trigger() (see | ||
| 1184 | * trace_event_triggers.c). | ||
| 1185 | * | ||
| 1186 | * @unreg: Removes the trigger from the list of triggers associated | ||
| 1187 | * with the event, and disables the event trigger itself, after | ||
| 1188 | * initializing it (via the event_trigger_ops @free() function). | ||
| 1189 | * This is usually implemented by the generic utility function | ||
| 1190 | * @unregister_trigger() (see trace_event_triggers.c). | ||
| 1191 | * | ||
| 1192 | * @set_filter: An optional function called to parse and set a filter | ||
| 1193 | * for the trigger. If no @set_filter() method is set for the | ||
| 1194 | * event command, filters set by the user for the command will be | ||
| 1195 | * ignored. This is usually implemented by the generic utility | ||
| 1196 | * function @set_trigger_filter() (see trace_event_triggers.c). | ||
| 1197 | * | ||
| 1198 | * @get_trigger_ops: The callback function invoked to retrieve the | ||
| 1199 | * event_trigger_ops implementation associated with the command. | ||
| 1200 | */ | ||
| 1201 | struct event_command { | ||
| 1202 | struct list_head list; | ||
| 1203 | char *name; | ||
| 1204 | enum event_trigger_type trigger_type; | ||
| 1205 | bool post_trigger; | ||
| 1206 | int (*func)(struct event_command *cmd_ops, | ||
| 1207 | struct ftrace_event_file *file, | ||
| 1208 | char *glob, char *cmd, char *params); | ||
| 1209 | int (*reg)(char *glob, | ||
| 1210 | struct event_trigger_ops *ops, | ||
| 1211 | struct event_trigger_data *data, | ||
| 1212 | struct ftrace_event_file *file); | ||
| 1213 | void (*unreg)(char *glob, | ||
| 1214 | struct event_trigger_ops *ops, | ||
| 1215 | struct event_trigger_data *data, | ||
| 1216 | struct ftrace_event_file *file); | ||
| 1217 | int (*set_filter)(char *filter_str, | ||
| 1218 | struct event_trigger_data *data, | ||
| 1219 | struct ftrace_event_file *file); | ||
| 1220 | struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); | ||
| 1221 | }; | ||
| 1222 | |||
| 1223 | extern int trace_event_enable_disable(struct ftrace_event_file *file, | ||
| 1224 | int enable, int soft_disable); | ||
| 1225 | extern int tracing_alloc_snapshot(void); | ||
| 1226 | |||
| 1034 | extern const char *__start___trace_bprintk_fmt[]; | 1227 | extern const char *__start___trace_bprintk_fmt[]; |
| 1035 | extern const char *__stop___trace_bprintk_fmt[]; | 1228 | extern const char *__stop___trace_bprintk_fmt[]; |
| 1036 | 1229 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index a11800ae96de..e71ffd4eccb5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -342,6 +342,12 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
| 342 | return ret; | 342 | return ret; |
| 343 | } | 343 | } |
| 344 | 344 | ||
| 345 | int trace_event_enable_disable(struct ftrace_event_file *file, | ||
| 346 | int enable, int soft_disable) | ||
| 347 | { | ||
| 348 | return __ftrace_event_enable_disable(file, enable, soft_disable); | ||
| 349 | } | ||
| 350 | |||
| 345 | static int ftrace_event_enable_disable(struct ftrace_event_file *file, | 351 | static int ftrace_event_enable_disable(struct ftrace_event_file *file, |
| 346 | int enable) | 352 | int enable) |
| 347 | { | 353 | { |
| @@ -421,11 +427,6 @@ static void remove_subsystem(struct ftrace_subsystem_dir *dir) | |||
| 421 | } | 427 | } |
| 422 | } | 428 | } |
| 423 | 429 | ||
| 424 | static void *event_file_data(struct file *filp) | ||
| 425 | { | ||
| 426 | return ACCESS_ONCE(file_inode(filp)->i_private); | ||
| 427 | } | ||
| 428 | |||
| 429 | static void remove_event_file_dir(struct ftrace_event_file *file) | 430 | static void remove_event_file_dir(struct ftrace_event_file *file) |
| 430 | { | 431 | { |
| 431 | struct dentry *dir = file->dir; | 432 | struct dentry *dir = file->dir; |
| @@ -1549,6 +1550,9 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1549 | trace_create_file("filter", 0644, file->dir, file, | 1550 | trace_create_file("filter", 0644, file->dir, file, |
| 1550 | &ftrace_event_filter_fops); | 1551 | &ftrace_event_filter_fops); |
| 1551 | 1552 | ||
| 1553 | trace_create_file("trigger", 0644, file->dir, file, | ||
| 1554 | &event_trigger_fops); | ||
| 1555 | |||
| 1552 | trace_create_file("format", 0444, file->dir, call, | 1556 | trace_create_file("format", 0444, file->dir, call, |
| 1553 | &ftrace_event_format_fops); | 1557 | &ftrace_event_format_fops); |
| 1554 | 1558 | ||
| @@ -1645,6 +1649,8 @@ trace_create_new_event(struct ftrace_event_call *call, | |||
| 1645 | file->event_call = call; | 1649 | file->event_call = call; |
| 1646 | file->tr = tr; | 1650 | file->tr = tr; |
| 1647 | atomic_set(&file->sm_ref, 0); | 1651 | atomic_set(&file->sm_ref, 0); |
| 1652 | atomic_set(&file->tm_ref, 0); | ||
| 1653 | INIT_LIST_HEAD(&file->triggers); | ||
| 1648 | list_add(&file->list, &tr->events); | 1654 | list_add(&file->list, &tr->events); |
| 1649 | 1655 | ||
| 1650 | return file; | 1656 | return file; |
| @@ -1849,20 +1855,7 @@ __trace_add_event_dirs(struct trace_array *tr) | |||
| 1849 | } | 1855 | } |
| 1850 | } | 1856 | } |
| 1851 | 1857 | ||
| 1852 | #ifdef CONFIG_DYNAMIC_FTRACE | 1858 | struct ftrace_event_file * |
| 1853 | |||
| 1854 | /* Avoid typos */ | ||
| 1855 | #define ENABLE_EVENT_STR "enable_event" | ||
| 1856 | #define DISABLE_EVENT_STR "disable_event" | ||
| 1857 | |||
| 1858 | struct event_probe_data { | ||
| 1859 | struct ftrace_event_file *file; | ||
| 1860 | unsigned long count; | ||
| 1861 | int ref; | ||
| 1862 | bool enable; | ||
| 1863 | }; | ||
| 1864 | |||
| 1865 | static struct ftrace_event_file * | ||
| 1866 | find_event_file(struct trace_array *tr, const char *system, const char *event) | 1859 | find_event_file(struct trace_array *tr, const char *system, const char *event) |
| 1867 | { | 1860 | { |
| 1868 | struct ftrace_event_file *file; | 1861 | struct ftrace_event_file *file; |
| @@ -1885,6 +1878,19 @@ find_event_file(struct trace_array *tr, const char *system, const char *event) | |||
| 1885 | return NULL; | 1878 | return NULL; |
| 1886 | } | 1879 | } |
| 1887 | 1880 | ||
| 1881 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 1882 | |||
| 1883 | /* Avoid typos */ | ||
| 1884 | #define ENABLE_EVENT_STR "enable_event" | ||
| 1885 | #define DISABLE_EVENT_STR "disable_event" | ||
| 1886 | |||
| 1887 | struct event_probe_data { | ||
| 1888 | struct ftrace_event_file *file; | ||
| 1889 | unsigned long count; | ||
| 1890 | int ref; | ||
| 1891 | bool enable; | ||
| 1892 | }; | ||
| 1893 | |||
| 1888 | static void | 1894 | static void |
| 1889 | event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) | 1895 | event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) |
| 1890 | { | 1896 | { |
| @@ -2311,6 +2317,9 @@ int event_trace_del_tracer(struct trace_array *tr) | |||
| 2311 | { | 2317 | { |
| 2312 | mutex_lock(&event_mutex); | 2318 | mutex_lock(&event_mutex); |
| 2313 | 2319 | ||
| 2320 | /* Disable any event triggers and associated soft-disabled events */ | ||
| 2321 | clear_event_triggers(tr); | ||
| 2322 | |||
| 2314 | /* Disable any running events */ | 2323 | /* Disable any running events */ |
| 2315 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); | 2324 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); |
| 2316 | 2325 | ||
| @@ -2377,6 +2386,8 @@ static __init int event_trace_enable(void) | |||
| 2377 | 2386 | ||
| 2378 | register_event_cmds(); | 2387 | register_event_cmds(); |
| 2379 | 2388 | ||
| 2389 | register_trigger_cmds(); | ||
| 2390 | |||
| 2380 | return 0; | 2391 | return 0; |
| 2381 | } | 2392 | } |
| 2382 | 2393 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 2468f56dc5db..8a8631926a07 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -799,6 +799,11 @@ static void __free_filter(struct event_filter *filter) | |||
| 799 | kfree(filter); | 799 | kfree(filter); |
| 800 | } | 800 | } |
| 801 | 801 | ||
| 802 | void free_event_filter(struct event_filter *filter) | ||
| 803 | { | ||
| 804 | __free_filter(filter); | ||
| 805 | } | ||
| 806 | |||
| 802 | void destroy_call_preds(struct ftrace_event_call *call) | 807 | void destroy_call_preds(struct ftrace_event_call *call) |
| 803 | { | 808 | { |
| 804 | __free_filter(call->filter); | 809 | __free_filter(call->filter); |
| @@ -1938,6 +1943,13 @@ static int create_filter(struct ftrace_event_call *call, | |||
| 1938 | return err; | 1943 | return err; |
| 1939 | } | 1944 | } |
| 1940 | 1945 | ||
| 1946 | int create_event_filter(struct ftrace_event_call *call, | ||
| 1947 | char *filter_str, bool set_str, | ||
| 1948 | struct event_filter **filterp) | ||
| 1949 | { | ||
| 1950 | return create_filter(call, filter_str, set_str, filterp); | ||
| 1951 | } | ||
| 1952 | |||
| 1941 | /** | 1953 | /** |
| 1942 | * create_system_filter - create a filter for an event_subsystem | 1954 | * create_system_filter - create a filter for an event_subsystem |
| 1943 | * @system: event_subsystem to create a filter for | 1955 | * @system: event_subsystem to create a filter for |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c new file mode 100644 index 000000000000..8efbb69b04f0 --- /dev/null +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -0,0 +1,1437 @@ | |||
| 1 | /* | ||
| 2 | * trace_events_trigger - trace event triggers | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/ctype.h> | ||
| 23 | #include <linux/mutex.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | |||
| 26 | #include "trace.h" | ||
| 27 | |||
| 28 | static LIST_HEAD(trigger_commands); | ||
| 29 | static DEFINE_MUTEX(trigger_cmd_mutex); | ||
| 30 | |||
| 31 | static void | ||
| 32 | trigger_data_free(struct event_trigger_data *data) | ||
| 33 | { | ||
| 34 | if (data->cmd_ops->set_filter) | ||
| 35 | data->cmd_ops->set_filter(NULL, data, NULL); | ||
| 36 | |||
| 37 | synchronize_sched(); /* make sure current triggers exit before free */ | ||
| 38 | kfree(data); | ||
| 39 | } | ||
| 40 | |||
| 41 | /** | ||
| 42 | * event_triggers_call - Call triggers associated with a trace event | ||
| 43 | * @file: The ftrace_event_file associated with the event | ||
| 44 | * @rec: The trace entry for the event, NULL for unconditional invocation | ||
| 45 | * | ||
| 46 | * For each trigger associated with an event, invoke the trigger | ||
| 47 | * function registered with the associated trigger command. If rec is | ||
| 48 | * non-NULL, it means that the trigger requires further processing and | ||
| 49 | * shouldn't be unconditionally invoked. If rec is non-NULL and the | ||
| 50 | * trigger has a filter associated with it, rec will checked against | ||
| 51 | * the filter and if the record matches the trigger will be invoked. | ||
| 52 | * If the trigger is a 'post_trigger', meaning it shouldn't be invoked | ||
| 53 | * in any case until the current event is written, the trigger | ||
| 54 | * function isn't invoked but the bit associated with the deferred | ||
| 55 | * trigger is set in the return value. | ||
| 56 | * | ||
| 57 | * Returns an enum event_trigger_type value containing a set bit for | ||
| 58 | * any trigger that should be deferred, ETT_NONE if nothing to defer. | ||
| 59 | * | ||
| 60 | * Called from tracepoint handlers (with rcu_read_lock_sched() held). | ||
| 61 | * | ||
| 62 | * Return: an enum event_trigger_type value containing a set bit for | ||
| 63 | * any trigger that should be deferred, ETT_NONE if nothing to defer. | ||
| 64 | */ | ||
| 65 | enum event_trigger_type | ||
| 66 | event_triggers_call(struct ftrace_event_file *file, void *rec) | ||
| 67 | { | ||
| 68 | struct event_trigger_data *data; | ||
| 69 | enum event_trigger_type tt = ETT_NONE; | ||
| 70 | struct event_filter *filter; | ||
| 71 | |||
| 72 | if (list_empty(&file->triggers)) | ||
| 73 | return tt; | ||
| 74 | |||
| 75 | list_for_each_entry_rcu(data, &file->triggers, list) { | ||
| 76 | if (!rec) { | ||
| 77 | data->ops->func(data); | ||
| 78 | continue; | ||
| 79 | } | ||
| 80 | filter = rcu_dereference(data->filter); | ||
| 81 | if (filter && !filter_match_preds(filter, rec)) | ||
| 82 | continue; | ||
| 83 | if (data->cmd_ops->post_trigger) { | ||
| 84 | tt |= data->cmd_ops->trigger_type; | ||
| 85 | continue; | ||
| 86 | } | ||
| 87 | data->ops->func(data); | ||
| 88 | } | ||
| 89 | return tt; | ||
| 90 | } | ||
| 91 | EXPORT_SYMBOL_GPL(event_triggers_call); | ||
| 92 | |||
| 93 | /** | ||
| 94 | * event_triggers_post_call - Call 'post_triggers' for a trace event | ||
| 95 | * @file: The ftrace_event_file associated with the event | ||
| 96 | * @tt: enum event_trigger_type containing a set bit for each trigger to invoke | ||
| 97 | * | ||
| 98 | * For each trigger associated with an event, invoke the trigger | ||
| 99 | * function registered with the associated trigger command, if the | ||
| 100 | * corresponding bit is set in the tt enum passed into this function. | ||
| 101 | * See @event_triggers_call for details on how those bits are set. | ||
| 102 | * | ||
| 103 | * Called from tracepoint handlers (with rcu_read_lock_sched() held). | ||
| 104 | */ | ||
| 105 | void | ||
| 106 | event_triggers_post_call(struct ftrace_event_file *file, | ||
| 107 | enum event_trigger_type tt) | ||
| 108 | { | ||
| 109 | struct event_trigger_data *data; | ||
| 110 | |||
| 111 | list_for_each_entry_rcu(data, &file->triggers, list) { | ||
| 112 | if (data->cmd_ops->trigger_type & tt) | ||
| 113 | data->ops->func(data); | ||
| 114 | } | ||
| 115 | } | ||
| 116 | EXPORT_SYMBOL_GPL(event_triggers_post_call); | ||
| 117 | |||
| 118 | #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL) | ||
| 119 | |||
| 120 | static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) | ||
| 121 | { | ||
| 122 | struct ftrace_event_file *event_file = event_file_data(m->private); | ||
| 123 | |||
| 124 | if (t == SHOW_AVAILABLE_TRIGGERS) | ||
| 125 | return NULL; | ||
| 126 | |||
| 127 | return seq_list_next(t, &event_file->triggers, pos); | ||
| 128 | } | ||
| 129 | |||
| 130 | static void *trigger_start(struct seq_file *m, loff_t *pos) | ||
| 131 | { | ||
| 132 | struct ftrace_event_file *event_file; | ||
| 133 | |||
| 134 | /* ->stop() is called even if ->start() fails */ | ||
| 135 | mutex_lock(&event_mutex); | ||
| 136 | event_file = event_file_data(m->private); | ||
| 137 | if (unlikely(!event_file)) | ||
| 138 | return ERR_PTR(-ENODEV); | ||
| 139 | |||
| 140 | if (list_empty(&event_file->triggers)) | ||
| 141 | return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; | ||
| 142 | |||
| 143 | return seq_list_start(&event_file->triggers, *pos); | ||
| 144 | } | ||
| 145 | |||
| 146 | static void trigger_stop(struct seq_file *m, void *t) | ||
| 147 | { | ||
| 148 | mutex_unlock(&event_mutex); | ||
| 149 | } | ||
| 150 | |||
| 151 | static int trigger_show(struct seq_file *m, void *v) | ||
| 152 | { | ||
| 153 | struct event_trigger_data *data; | ||
| 154 | struct event_command *p; | ||
| 155 | |||
| 156 | if (v == SHOW_AVAILABLE_TRIGGERS) { | ||
| 157 | seq_puts(m, "# Available triggers:\n"); | ||
| 158 | seq_putc(m, '#'); | ||
| 159 | mutex_lock(&trigger_cmd_mutex); | ||
| 160 | list_for_each_entry_reverse(p, &trigger_commands, list) | ||
| 161 | seq_printf(m, " %s", p->name); | ||
| 162 | seq_putc(m, '\n'); | ||
| 163 | mutex_unlock(&trigger_cmd_mutex); | ||
| 164 | return 0; | ||
| 165 | } | ||
| 166 | |||
| 167 | data = list_entry(v, struct event_trigger_data, list); | ||
| 168 | data->ops->print(m, data->ops, data); | ||
| 169 | |||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | static const struct seq_operations event_triggers_seq_ops = { | ||
| 174 | .start = trigger_start, | ||
| 175 | .next = trigger_next, | ||
| 176 | .stop = trigger_stop, | ||
| 177 | .show = trigger_show, | ||
| 178 | }; | ||
| 179 | |||
| 180 | static int event_trigger_regex_open(struct inode *inode, struct file *file) | ||
| 181 | { | ||
| 182 | int ret = 0; | ||
| 183 | |||
| 184 | mutex_lock(&event_mutex); | ||
| 185 | |||
| 186 | if (unlikely(!event_file_data(file))) { | ||
| 187 | mutex_unlock(&event_mutex); | ||
| 188 | return -ENODEV; | ||
| 189 | } | ||
| 190 | |||
| 191 | if (file->f_mode & FMODE_READ) { | ||
| 192 | ret = seq_open(file, &event_triggers_seq_ops); | ||
| 193 | if (!ret) { | ||
| 194 | struct seq_file *m = file->private_data; | ||
| 195 | m->private = file; | ||
| 196 | } | ||
| 197 | } | ||
| 198 | |||
| 199 | mutex_unlock(&event_mutex); | ||
| 200 | |||
| 201 | return ret; | ||
| 202 | } | ||
| 203 | |||
| 204 | static int trigger_process_regex(struct ftrace_event_file *file, char *buff) | ||
| 205 | { | ||
| 206 | char *command, *next = buff; | ||
| 207 | struct event_command *p; | ||
| 208 | int ret = -EINVAL; | ||
| 209 | |||
| 210 | command = strsep(&next, ": \t"); | ||
| 211 | command = (command[0] != '!') ? command : command + 1; | ||
| 212 | |||
| 213 | mutex_lock(&trigger_cmd_mutex); | ||
| 214 | list_for_each_entry(p, &trigger_commands, list) { | ||
| 215 | if (strcmp(p->name, command) == 0) { | ||
| 216 | ret = p->func(p, file, buff, command, next); | ||
| 217 | goto out_unlock; | ||
| 218 | } | ||
| 219 | } | ||
| 220 | out_unlock: | ||
| 221 | mutex_unlock(&trigger_cmd_mutex); | ||
| 222 | |||
| 223 | return ret; | ||
| 224 | } | ||
| 225 | |||
| 226 | static ssize_t event_trigger_regex_write(struct file *file, | ||
| 227 | const char __user *ubuf, | ||
| 228 | size_t cnt, loff_t *ppos) | ||
| 229 | { | ||
| 230 | struct ftrace_event_file *event_file; | ||
| 231 | ssize_t ret; | ||
| 232 | char *buf; | ||
| 233 | |||
| 234 | if (!cnt) | ||
| 235 | return 0; | ||
| 236 | |||
| 237 | if (cnt >= PAGE_SIZE) | ||
| 238 | return -EINVAL; | ||
| 239 | |||
| 240 | buf = (char *)__get_free_page(GFP_TEMPORARY); | ||
| 241 | if (!buf) | ||
| 242 | return -ENOMEM; | ||
| 243 | |||
| 244 | if (copy_from_user(buf, ubuf, cnt)) { | ||
| 245 | free_page((unsigned long)buf); | ||
| 246 | return -EFAULT; | ||
| 247 | } | ||
| 248 | buf[cnt] = '\0'; | ||
| 249 | strim(buf); | ||
| 250 | |||
| 251 | mutex_lock(&event_mutex); | ||
| 252 | event_file = event_file_data(file); | ||
| 253 | if (unlikely(!event_file)) { | ||
| 254 | mutex_unlock(&event_mutex); | ||
| 255 | free_page((unsigned long)buf); | ||
| 256 | return -ENODEV; | ||
| 257 | } | ||
| 258 | ret = trigger_process_regex(event_file, buf); | ||
| 259 | mutex_unlock(&event_mutex); | ||
| 260 | |||
| 261 | free_page((unsigned long)buf); | ||
| 262 | if (ret < 0) | ||
| 263 | goto out; | ||
| 264 | |||
| 265 | *ppos += cnt; | ||
| 266 | ret = cnt; | ||
| 267 | out: | ||
| 268 | return ret; | ||
| 269 | } | ||
| 270 | |||
| 271 | static int event_trigger_regex_release(struct inode *inode, struct file *file) | ||
| 272 | { | ||
| 273 | mutex_lock(&event_mutex); | ||
| 274 | |||
| 275 | if (file->f_mode & FMODE_READ) | ||
| 276 | seq_release(inode, file); | ||
| 277 | |||
| 278 | mutex_unlock(&event_mutex); | ||
| 279 | |||
| 280 | return 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | static ssize_t | ||
| 284 | event_trigger_write(struct file *filp, const char __user *ubuf, | ||
| 285 | size_t cnt, loff_t *ppos) | ||
| 286 | { | ||
| 287 | return event_trigger_regex_write(filp, ubuf, cnt, ppos); | ||
| 288 | } | ||
| 289 | |||
| 290 | static int | ||
| 291 | event_trigger_open(struct inode *inode, struct file *filp) | ||
| 292 | { | ||
| 293 | return event_trigger_regex_open(inode, filp); | ||
| 294 | } | ||
| 295 | |||
| 296 | static int | ||
| 297 | event_trigger_release(struct inode *inode, struct file *file) | ||
| 298 | { | ||
| 299 | return event_trigger_regex_release(inode, file); | ||
| 300 | } | ||
| 301 | |||
| 302 | const struct file_operations event_trigger_fops = { | ||
| 303 | .open = event_trigger_open, | ||
| 304 | .read = seq_read, | ||
| 305 | .write = event_trigger_write, | ||
| 306 | .llseek = tracing_lseek, | ||
| 307 | .release = event_trigger_release, | ||
| 308 | }; | ||
| 309 | |||
| 310 | /* | ||
| 311 | * Currently we only register event commands from __init, so mark this | ||
| 312 | * __init too. | ||
| 313 | */ | ||
| 314 | static __init int register_event_command(struct event_command *cmd) | ||
| 315 | { | ||
| 316 | struct event_command *p; | ||
| 317 | int ret = 0; | ||
| 318 | |||
| 319 | mutex_lock(&trigger_cmd_mutex); | ||
| 320 | list_for_each_entry(p, &trigger_commands, list) { | ||
| 321 | if (strcmp(cmd->name, p->name) == 0) { | ||
| 322 | ret = -EBUSY; | ||
| 323 | goto out_unlock; | ||
| 324 | } | ||
| 325 | } | ||
| 326 | list_add(&cmd->list, &trigger_commands); | ||
| 327 | out_unlock: | ||
| 328 | mutex_unlock(&trigger_cmd_mutex); | ||
| 329 | |||
| 330 | return ret; | ||
| 331 | } | ||
| 332 | |||
| 333 | /* | ||
| 334 | * Currently we only unregister event commands from __init, so mark | ||
| 335 | * this __init too. | ||
| 336 | */ | ||
| 337 | static __init int unregister_event_command(struct event_command *cmd) | ||
| 338 | { | ||
| 339 | struct event_command *p, *n; | ||
| 340 | int ret = -ENODEV; | ||
| 341 | |||
| 342 | mutex_lock(&trigger_cmd_mutex); | ||
| 343 | list_for_each_entry_safe(p, n, &trigger_commands, list) { | ||
| 344 | if (strcmp(cmd->name, p->name) == 0) { | ||
| 345 | ret = 0; | ||
| 346 | list_del_init(&p->list); | ||
| 347 | goto out_unlock; | ||
| 348 | } | ||
| 349 | } | ||
| 350 | out_unlock: | ||
| 351 | mutex_unlock(&trigger_cmd_mutex); | ||
| 352 | |||
| 353 | return ret; | ||
| 354 | } | ||
| 355 | |||
| 356 | /** | ||
| 357 | * event_trigger_print - Generic event_trigger_ops @print implementation | ||
| 358 | * @name: The name of the event trigger | ||
| 359 | * @m: The seq_file being printed to | ||
| 360 | * @data: Trigger-specific data | ||
| 361 | * @filter_str: filter_str to print, if present | ||
| 362 | * | ||
| 363 | * Common implementation for event triggers to print themselves. | ||
| 364 | * | ||
| 365 | * Usually wrapped by a function that simply sets the @name of the | ||
| 366 | * trigger command and then invokes this. | ||
| 367 | * | ||
| 368 | * Return: 0 on success, errno otherwise | ||
| 369 | */ | ||
| 370 | static int | ||
| 371 | event_trigger_print(const char *name, struct seq_file *m, | ||
| 372 | void *data, char *filter_str) | ||
| 373 | { | ||
| 374 | long count = (long)data; | ||
| 375 | |||
| 376 | seq_printf(m, "%s", name); | ||
| 377 | |||
| 378 | if (count == -1) | ||
| 379 | seq_puts(m, ":unlimited"); | ||
| 380 | else | ||
| 381 | seq_printf(m, ":count=%ld", count); | ||
| 382 | |||
| 383 | if (filter_str) | ||
| 384 | seq_printf(m, " if %s\n", filter_str); | ||
| 385 | else | ||
| 386 | seq_puts(m, "\n"); | ||
| 387 | |||
| 388 | return 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | /** | ||
| 392 | * event_trigger_init - Generic event_trigger_ops @init implementation | ||
| 393 | * @ops: The trigger ops associated with the trigger | ||
| 394 | * @data: Trigger-specific data | ||
| 395 | * | ||
| 396 | * Common implementation of event trigger initialization. | ||
| 397 | * | ||
| 398 | * Usually used directly as the @init method in event trigger | ||
| 399 | * implementations. | ||
| 400 | * | ||
| 401 | * Return: 0 on success, errno otherwise | ||
| 402 | */ | ||
| 403 | static int | ||
| 404 | event_trigger_init(struct event_trigger_ops *ops, | ||
| 405 | struct event_trigger_data *data) | ||
| 406 | { | ||
| 407 | data->ref++; | ||
| 408 | return 0; | ||
| 409 | } | ||
| 410 | |||
| 411 | /** | ||
| 412 | * event_trigger_free - Generic event_trigger_ops @free implementation | ||
| 413 | * @ops: The trigger ops associated with the trigger | ||
| 414 | * @data: Trigger-specific data | ||
| 415 | * | ||
| 416 | * Common implementation of event trigger de-initialization. | ||
| 417 | * | ||
| 418 | * Usually used directly as the @free method in event trigger | ||
| 419 | * implementations. | ||
| 420 | */ | ||
| 421 | static void | ||
| 422 | event_trigger_free(struct event_trigger_ops *ops, | ||
| 423 | struct event_trigger_data *data) | ||
| 424 | { | ||
| 425 | if (WARN_ON_ONCE(data->ref <= 0)) | ||
| 426 | return; | ||
| 427 | |||
| 428 | data->ref--; | ||
| 429 | if (!data->ref) | ||
| 430 | trigger_data_free(data); | ||
| 431 | } | ||
| 432 | |||
| 433 | static int trace_event_trigger_enable_disable(struct ftrace_event_file *file, | ||
| 434 | int trigger_enable) | ||
| 435 | { | ||
| 436 | int ret = 0; | ||
| 437 | |||
| 438 | if (trigger_enable) { | ||
| 439 | if (atomic_inc_return(&file->tm_ref) > 1) | ||
| 440 | return ret; | ||
| 441 | set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags); | ||
| 442 | ret = trace_event_enable_disable(file, 1, 1); | ||
| 443 | } else { | ||
| 444 | if (atomic_dec_return(&file->tm_ref) > 0) | ||
| 445 | return ret; | ||
| 446 | clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags); | ||
| 447 | ret = trace_event_enable_disable(file, 0, 1); | ||
| 448 | } | ||
| 449 | |||
| 450 | return ret; | ||
| 451 | } | ||
| 452 | |||
| 453 | /** | ||
| 454 | * clear_event_triggers - Clear all triggers associated with a trace array | ||
| 455 | * @tr: The trace array to clear | ||
| 456 | * | ||
| 457 | * For each trigger, the triggering event has its tm_ref decremented | ||
| 458 | * via trace_event_trigger_enable_disable(), and any associated event | ||
| 459 | * (in the case of enable/disable_event triggers) will have its sm_ref | ||
| 460 | * decremented via free()->trace_event_enable_disable(). That | ||
| 461 | * combination effectively reverses the soft-mode/trigger state added | ||
| 462 | * by trigger registration. | ||
| 463 | * | ||
| 464 | * Must be called with event_mutex held. | ||
| 465 | */ | ||
| 466 | void | ||
| 467 | clear_event_triggers(struct trace_array *tr) | ||
| 468 | { | ||
| 469 | struct ftrace_event_file *file; | ||
| 470 | |||
| 471 | list_for_each_entry(file, &tr->events, list) { | ||
| 472 | struct event_trigger_data *data; | ||
| 473 | list_for_each_entry_rcu(data, &file->triggers, list) { | ||
| 474 | trace_event_trigger_enable_disable(file, 0); | ||
| 475 | if (data->ops->free) | ||
| 476 | data->ops->free(data->ops, data); | ||
| 477 | } | ||
| 478 | } | ||
| 479 | } | ||
| 480 | |||
| 481 | /** | ||
| 482 | * update_cond_flag - Set or reset the TRIGGER_COND bit | ||
| 483 | * @file: The ftrace_event_file associated with the event | ||
| 484 | * | ||
| 485 | * If an event has triggers and any of those triggers has a filter or | ||
| 486 | * a post_trigger, trigger invocation needs to be deferred until after | ||
| 487 | * the current event has logged its data, and the event should have | ||
| 488 | * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be | ||
| 489 | * cleared. | ||
| 490 | */ | ||
| 491 | static void update_cond_flag(struct ftrace_event_file *file) | ||
| 492 | { | ||
| 493 | struct event_trigger_data *data; | ||
| 494 | bool set_cond = false; | ||
| 495 | |||
| 496 | list_for_each_entry_rcu(data, &file->triggers, list) { | ||
| 497 | if (data->filter || data->cmd_ops->post_trigger) { | ||
| 498 | set_cond = true; | ||
| 499 | break; | ||
| 500 | } | ||
| 501 | } | ||
| 502 | |||
| 503 | if (set_cond) | ||
| 504 | set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags); | ||
| 505 | else | ||
| 506 | clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags); | ||
| 507 | } | ||
| 508 | |||
| 509 | /** | ||
| 510 | * register_trigger - Generic event_command @reg implementation | ||
| 511 | * @glob: The raw string used to register the trigger | ||
| 512 | * @ops: The trigger ops associated with the trigger | ||
| 513 | * @data: Trigger-specific data to associate with the trigger | ||
| 514 | * @file: The ftrace_event_file associated with the event | ||
| 515 | * | ||
| 516 | * Common implementation for event trigger registration. | ||
| 517 | * | ||
| 518 | * Usually used directly as the @reg method in event command | ||
| 519 | * implementations. | ||
| 520 | * | ||
| 521 | * Return: 0 on success, errno otherwise | ||
| 522 | */ | ||
| 523 | static int register_trigger(char *glob, struct event_trigger_ops *ops, | ||
| 524 | struct event_trigger_data *data, | ||
| 525 | struct ftrace_event_file *file) | ||
| 526 | { | ||
| 527 | struct event_trigger_data *test; | ||
| 528 | int ret = 0; | ||
| 529 | |||
| 530 | list_for_each_entry_rcu(test, &file->triggers, list) { | ||
| 531 | if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { | ||
| 532 | ret = -EEXIST; | ||
| 533 | goto out; | ||
| 534 | } | ||
| 535 | } | ||
| 536 | |||
| 537 | if (data->ops->init) { | ||
| 538 | ret = data->ops->init(data->ops, data); | ||
| 539 | if (ret < 0) | ||
| 540 | goto out; | ||
| 541 | } | ||
| 542 | |||
| 543 | list_add_rcu(&data->list, &file->triggers); | ||
| 544 | ret++; | ||
| 545 | |||
| 546 | if (trace_event_trigger_enable_disable(file, 1) < 0) { | ||
| 547 | list_del_rcu(&data->list); | ||
| 548 | ret--; | ||
| 549 | } | ||
| 550 | update_cond_flag(file); | ||
| 551 | out: | ||
| 552 | return ret; | ||
| 553 | } | ||
| 554 | |||
| 555 | /** | ||
| 556 | * unregister_trigger - Generic event_command @unreg implementation | ||
| 557 | * @glob: The raw string used to register the trigger | ||
| 558 | * @ops: The trigger ops associated with the trigger | ||
| 559 | * @test: Trigger-specific data used to find the trigger to remove | ||
| 560 | * @file: The ftrace_event_file associated with the event | ||
| 561 | * | ||
| 562 | * Common implementation for event trigger unregistration. | ||
| 563 | * | ||
| 564 | * Usually used directly as the @unreg method in event command | ||
| 565 | * implementations. | ||
| 566 | */ | ||
| 567 | static void unregister_trigger(char *glob, struct event_trigger_ops *ops, | ||
| 568 | struct event_trigger_data *test, | ||
| 569 | struct ftrace_event_file *file) | ||
| 570 | { | ||
| 571 | struct event_trigger_data *data; | ||
| 572 | bool unregistered = false; | ||
| 573 | |||
| 574 | list_for_each_entry_rcu(data, &file->triggers, list) { | ||
| 575 | if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { | ||
| 576 | unregistered = true; | ||
| 577 | list_del_rcu(&data->list); | ||
| 578 | update_cond_flag(file); | ||
| 579 | trace_event_trigger_enable_disable(file, 0); | ||
| 580 | break; | ||
| 581 | } | ||
| 582 | } | ||
| 583 | |||
| 584 | if (unregistered && data->ops->free) | ||
| 585 | data->ops->free(data->ops, data); | ||
| 586 | } | ||
| 587 | |||
| 588 | /** | ||
| 589 | * event_trigger_callback - Generic event_command @func implementation | ||
| 590 | * @cmd_ops: The command ops, used for trigger registration | ||
| 591 | * @file: The ftrace_event_file associated with the event | ||
| 592 | * @glob: The raw string used to register the trigger | ||
| 593 | * @cmd: The cmd portion of the string used to register the trigger | ||
| 594 | * @param: The params portion of the string used to register the trigger | ||
| 595 | * | ||
| 596 | * Common implementation for event command parsing and trigger | ||
| 597 | * instantiation. | ||
| 598 | * | ||
| 599 | * Usually used directly as the @func method in event command | ||
| 600 | * implementations. | ||
| 601 | * | ||
| 602 | * Return: 0 on success, errno otherwise | ||
| 603 | */ | ||
| 604 | static int | ||
| 605 | event_trigger_callback(struct event_command *cmd_ops, | ||
| 606 | struct ftrace_event_file *file, | ||
| 607 | char *glob, char *cmd, char *param) | ||
| 608 | { | ||
| 609 | struct event_trigger_data *trigger_data; | ||
| 610 | struct event_trigger_ops *trigger_ops; | ||
| 611 | char *trigger = NULL; | ||
| 612 | char *number; | ||
| 613 | int ret; | ||
| 614 | |||
| 615 | /* separate the trigger from the filter (t:n [if filter]) */ | ||
| 616 | if (param && isdigit(param[0])) | ||
| 617 | trigger = strsep(¶m, " \t"); | ||
| 618 | |||
| 619 | trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); | ||
| 620 | |||
| 621 | ret = -ENOMEM; | ||
| 622 | trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); | ||
| 623 | if (!trigger_data) | ||
| 624 | goto out; | ||
| 625 | |||
| 626 | trigger_data->count = -1; | ||
| 627 | trigger_data->ops = trigger_ops; | ||
| 628 | trigger_data->cmd_ops = cmd_ops; | ||
| 629 | INIT_LIST_HEAD(&trigger_data->list); | ||
| 630 | |||
| 631 | if (glob[0] == '!') { | ||
| 632 | cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); | ||
| 633 | kfree(trigger_data); | ||
| 634 | ret = 0; | ||
| 635 | goto out; | ||
| 636 | } | ||
| 637 | |||
| 638 | if (trigger) { | ||
| 639 | number = strsep(&trigger, ":"); | ||
| 640 | |||
| 641 | ret = -EINVAL; | ||
| 642 | if (!strlen(number)) | ||
| 643 | goto out_free; | ||
| 644 | |||
| 645 | /* | ||
| 646 | * We use the callback data field (which is a pointer) | ||
| 647 | * as our counter. | ||
| 648 | */ | ||
| 649 | ret = kstrtoul(number, 0, &trigger_data->count); | ||
| 650 | if (ret) | ||
| 651 | goto out_free; | ||
| 652 | } | ||
| 653 | |||
| 654 | if (!param) /* if param is non-empty, it's supposed to be a filter */ | ||
| 655 | goto out_reg; | ||
| 656 | |||
| 657 | if (!cmd_ops->set_filter) | ||
| 658 | goto out_reg; | ||
| 659 | |||
| 660 | ret = cmd_ops->set_filter(param, trigger_data, file); | ||
| 661 | if (ret < 0) | ||
| 662 | goto out_free; | ||
| 663 | |||
| 664 | out_reg: | ||
| 665 | ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); | ||
| 666 | /* | ||
| 667 | * The above returns on success the # of functions enabled, | ||
| 668 | * but if it didn't find any functions it returns zero. | ||
| 669 | * Consider no functions a failure too. | ||
| 670 | */ | ||
| 671 | if (!ret) { | ||
| 672 | ret = -ENOENT; | ||
| 673 | goto out_free; | ||
| 674 | } else if (ret < 0) | ||
| 675 | goto out_free; | ||
| 676 | ret = 0; | ||
| 677 | out: | ||
| 678 | return ret; | ||
| 679 | |||
| 680 | out_free: | ||
| 681 | if (cmd_ops->set_filter) | ||
| 682 | cmd_ops->set_filter(NULL, trigger_data, NULL); | ||
| 683 | kfree(trigger_data); | ||
| 684 | goto out; | ||
| 685 | } | ||
| 686 | |||
| 687 | /** | ||
| 688 | * set_trigger_filter - Generic event_command @set_filter implementation | ||
| 689 | * @filter_str: The filter string for the trigger, NULL to remove filter | ||
| 690 | * @trigger_data: Trigger-specific data | ||
| 691 | * @file: The ftrace_event_file associated with the event | ||
| 692 | * | ||
| 693 | * Common implementation for event command filter parsing and filter | ||
| 694 | * instantiation. | ||
| 695 | * | ||
| 696 | * Usually used directly as the @set_filter method in event command | ||
| 697 | * implementations. | ||
| 698 | * | ||
| 699 | * Also used to remove a filter (if filter_str = NULL). | ||
| 700 | * | ||
| 701 | * Return: 0 on success, errno otherwise | ||
| 702 | */ | ||
| 703 | static int set_trigger_filter(char *filter_str, | ||
| 704 | struct event_trigger_data *trigger_data, | ||
| 705 | struct ftrace_event_file *file) | ||
| 706 | { | ||
| 707 | struct event_trigger_data *data = trigger_data; | ||
| 708 | struct event_filter *filter = NULL, *tmp; | ||
| 709 | int ret = -EINVAL; | ||
| 710 | char *s; | ||
| 711 | |||
| 712 | if (!filter_str) /* clear the current filter */ | ||
| 713 | goto assign; | ||
| 714 | |||
| 715 | s = strsep(&filter_str, " \t"); | ||
| 716 | |||
| 717 | if (!strlen(s) || strcmp(s, "if") != 0) | ||
| 718 | goto out; | ||
| 719 | |||
| 720 | if (!filter_str) | ||
| 721 | goto out; | ||
| 722 | |||
| 723 | /* The filter is for the 'trigger' event, not the triggered event */ | ||
| 724 | ret = create_event_filter(file->event_call, filter_str, false, &filter); | ||
| 725 | if (ret) | ||
| 726 | goto out; | ||
| 727 | assign: | ||
| 728 | tmp = rcu_access_pointer(data->filter); | ||
| 729 | |||
| 730 | rcu_assign_pointer(data->filter, filter); | ||
| 731 | |||
| 732 | if (tmp) { | ||
| 733 | /* Make sure the call is done with the filter */ | ||
| 734 | synchronize_sched(); | ||
| 735 | free_event_filter(tmp); | ||
| 736 | } | ||
| 737 | |||
| 738 | kfree(data->filter_str); | ||
| 739 | data->filter_str = NULL; | ||
| 740 | |||
| 741 | if (filter_str) { | ||
| 742 | data->filter_str = kstrdup(filter_str, GFP_KERNEL); | ||
| 743 | if (!data->filter_str) { | ||
| 744 | free_event_filter(rcu_access_pointer(data->filter)); | ||
| 745 | data->filter = NULL; | ||
| 746 | ret = -ENOMEM; | ||
| 747 | } | ||
| 748 | } | ||
| 749 | out: | ||
| 750 | return ret; | ||
| 751 | } | ||
| 752 | |||
| 753 | static void | ||
| 754 | traceon_trigger(struct event_trigger_data *data) | ||
| 755 | { | ||
| 756 | if (tracing_is_on()) | ||
| 757 | return; | ||
| 758 | |||
| 759 | tracing_on(); | ||
| 760 | } | ||
| 761 | |||
| 762 | static void | ||
| 763 | traceon_count_trigger(struct event_trigger_data *data) | ||
| 764 | { | ||
| 765 | if (tracing_is_on()) | ||
| 766 | return; | ||
| 767 | |||
| 768 | if (!data->count) | ||
| 769 | return; | ||
| 770 | |||
| 771 | if (data->count != -1) | ||
| 772 | (data->count)--; | ||
| 773 | |||
| 774 | tracing_on(); | ||
| 775 | } | ||
| 776 | |||
| 777 | static void | ||
| 778 | traceoff_trigger(struct event_trigger_data *data) | ||
| 779 | { | ||
| 780 | if (!tracing_is_on()) | ||
| 781 | return; | ||
| 782 | |||
| 783 | tracing_off(); | ||
| 784 | } | ||
| 785 | |||
| 786 | static void | ||
| 787 | traceoff_count_trigger(struct event_trigger_data *data) | ||
| 788 | { | ||
| 789 | if (!tracing_is_on()) | ||
| 790 | return; | ||
| 791 | |||
| 792 | if (!data->count) | ||
| 793 | return; | ||
| 794 | |||
| 795 | if (data->count != -1) | ||
| 796 | (data->count)--; | ||
| 797 | |||
| 798 | tracing_off(); | ||
| 799 | } | ||
| 800 | |||
| 801 | static int | ||
| 802 | traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | ||
| 803 | struct event_trigger_data *data) | ||
| 804 | { | ||
| 805 | return event_trigger_print("traceon", m, (void *)data->count, | ||
| 806 | data->filter_str); | ||
| 807 | } | ||
| 808 | |||
| 809 | static int | ||
| 810 | traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | ||
| 811 | struct event_trigger_data *data) | ||
| 812 | { | ||
| 813 | return event_trigger_print("traceoff", m, (void *)data->count, | ||
| 814 | data->filter_str); | ||
| 815 | } | ||
| 816 | |||
| 817 | static struct event_trigger_ops traceon_trigger_ops = { | ||
| 818 | .func = traceon_trigger, | ||
| 819 | .print = traceon_trigger_print, | ||
| 820 | .init = event_trigger_init, | ||
| 821 | .free = event_trigger_free, | ||
| 822 | }; | ||
| 823 | |||
| 824 | static struct event_trigger_ops traceon_count_trigger_ops = { | ||
| 825 | .func = traceon_count_trigger, | ||
| 826 | .print = traceon_trigger_print, | ||
| 827 | .init = event_trigger_init, | ||
| 828 | .free = event_trigger_free, | ||
| 829 | }; | ||
| 830 | |||
| 831 | static struct event_trigger_ops traceoff_trigger_ops = { | ||
| 832 | .func = traceoff_trigger, | ||
| 833 | .print = traceoff_trigger_print, | ||
| 834 | .init = event_trigger_init, | ||
| 835 | .free = event_trigger_free, | ||
| 836 | }; | ||
| 837 | |||
| 838 | static struct event_trigger_ops traceoff_count_trigger_ops = { | ||
| 839 | .func = traceoff_count_trigger, | ||
| 840 | .print = traceoff_trigger_print, | ||
| 841 | .init = event_trigger_init, | ||
| 842 | .free = event_trigger_free, | ||
| 843 | }; | ||
| 844 | |||
| 845 | static struct event_trigger_ops * | ||
| 846 | onoff_get_trigger_ops(char *cmd, char *param) | ||
| 847 | { | ||
| 848 | struct event_trigger_ops *ops; | ||
| 849 | |||
| 850 | /* we register both traceon and traceoff to this callback */ | ||
| 851 | if (strcmp(cmd, "traceon") == 0) | ||
| 852 | ops = param ? &traceon_count_trigger_ops : | ||
| 853 | &traceon_trigger_ops; | ||
| 854 | else | ||
| 855 | ops = param ? &traceoff_count_trigger_ops : | ||
| 856 | &traceoff_trigger_ops; | ||
| 857 | |||
| 858 | return ops; | ||
| 859 | } | ||
| 860 | |||
| 861 | static struct event_command trigger_traceon_cmd = { | ||
| 862 | .name = "traceon", | ||
| 863 | .trigger_type = ETT_TRACE_ONOFF, | ||
| 864 | .func = event_trigger_callback, | ||
| 865 | .reg = register_trigger, | ||
| 866 | .unreg = unregister_trigger, | ||
| 867 | .get_trigger_ops = onoff_get_trigger_ops, | ||
| 868 | .set_filter = set_trigger_filter, | ||
| 869 | }; | ||
| 870 | |||
| 871 | static struct event_command trigger_traceoff_cmd = { | ||
| 872 | .name = "traceoff", | ||
| 873 | .trigger_type = ETT_TRACE_ONOFF, | ||
| 874 | .func = event_trigger_callback, | ||
| 875 | .reg = register_trigger, | ||
| 876 | .unreg = unregister_trigger, | ||
| 877 | .get_trigger_ops = onoff_get_trigger_ops, | ||
| 878 | .set_filter = set_trigger_filter, | ||
| 879 | }; | ||
| 880 | |||
| 881 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
| 882 | static void | ||
| 883 | snapshot_trigger(struct event_trigger_data *data) | ||
| 884 | { | ||
| 885 | tracing_snapshot(); | ||
| 886 | } | ||
| 887 | |||
| 888 | static void | ||
| 889 | snapshot_count_trigger(struct event_trigger_data *data) | ||
| 890 | { | ||
| 891 | if (!data->count) | ||
| 892 | return; | ||
| 893 | |||
| 894 | if (data->count != -1) | ||
| 895 | (data->count)--; | ||
| 896 | |||
| 897 | snapshot_trigger(data); | ||
| 898 | } | ||
| 899 | |||
| 900 | static int | ||
| 901 | register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, | ||
| 902 | struct event_trigger_data *data, | ||
| 903 | struct ftrace_event_file *file) | ||
| 904 | { | ||
| 905 | int ret = register_trigger(glob, ops, data, file); | ||
| 906 | |||
| 907 | if (ret > 0 && tracing_alloc_snapshot() != 0) { | ||
| 908 | unregister_trigger(glob, ops, data, file); | ||
| 909 | ret = 0; | ||
| 910 | } | ||
| 911 | |||
| 912 | return ret; | ||
| 913 | } | ||
| 914 | |||
| 915 | static int | ||
| 916 | snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | ||
| 917 | struct event_trigger_data *data) | ||
| 918 | { | ||
| 919 | return event_trigger_print("snapshot", m, (void *)data->count, | ||
| 920 | data->filter_str); | ||
| 921 | } | ||
| 922 | |||
| 923 | static struct event_trigger_ops snapshot_trigger_ops = { | ||
| 924 | .func = snapshot_trigger, | ||
| 925 | .print = snapshot_trigger_print, | ||
| 926 | .init = event_trigger_init, | ||
| 927 | .free = event_trigger_free, | ||
| 928 | }; | ||
| 929 | |||
| 930 | static struct event_trigger_ops snapshot_count_trigger_ops = { | ||
| 931 | .func = snapshot_count_trigger, | ||
| 932 | .print = snapshot_trigger_print, | ||
| 933 | .init = event_trigger_init, | ||
| 934 | .free = event_trigger_free, | ||
| 935 | }; | ||
| 936 | |||
| 937 | static struct event_trigger_ops * | ||
| 938 | snapshot_get_trigger_ops(char *cmd, char *param) | ||
| 939 | { | ||
| 940 | return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; | ||
| 941 | } | ||
| 942 | |||
| 943 | static struct event_command trigger_snapshot_cmd = { | ||
| 944 | .name = "snapshot", | ||
| 945 | .trigger_type = ETT_SNAPSHOT, | ||
| 946 | .func = event_trigger_callback, | ||
| 947 | .reg = register_snapshot_trigger, | ||
| 948 | .unreg = unregister_trigger, | ||
| 949 | .get_trigger_ops = snapshot_get_trigger_ops, | ||
| 950 | .set_filter = set_trigger_filter, | ||
| 951 | }; | ||
| 952 | |||
| 953 | static __init int register_trigger_snapshot_cmd(void) | ||
| 954 | { | ||
| 955 | int ret; | ||
| 956 | |||
| 957 | ret = register_event_command(&trigger_snapshot_cmd); | ||
| 958 | WARN_ON(ret < 0); | ||
| 959 | |||
| 960 | return ret; | ||
| 961 | } | ||
| 962 | #else | ||
| 963 | static __init int register_trigger_snapshot_cmd(void) { return 0; } | ||
| 964 | #endif /* CONFIG_TRACER_SNAPSHOT */ | ||
| 965 | |||
| 966 | #ifdef CONFIG_STACKTRACE | ||
| 967 | /* | ||
| 968 | * Skip 3: | ||
| 969 | * stacktrace_trigger() | ||
| 970 | * event_triggers_post_call() | ||
| 971 | * ftrace_raw_event_xxx() | ||
| 972 | */ | ||
| 973 | #define STACK_SKIP 3 | ||
| 974 | |||
| 975 | static void | ||
| 976 | stacktrace_trigger(struct event_trigger_data *data) | ||
| 977 | { | ||
| 978 | trace_dump_stack(STACK_SKIP); | ||
| 979 | } | ||
| 980 | |||
| 981 | static void | ||
| 982 | stacktrace_count_trigger(struct event_trigger_data *data) | ||
| 983 | { | ||
| 984 | if (!data->count) | ||
| 985 | return; | ||
| 986 | |||
| 987 | if (data->count != -1) | ||
| 988 | (data->count)--; | ||
| 989 | |||
| 990 | stacktrace_trigger(data); | ||
| 991 | } | ||
| 992 | |||
| 993 | static int | ||
| 994 | stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | ||
| 995 | struct event_trigger_data *data) | ||
| 996 | { | ||
| 997 | return event_trigger_print("stacktrace", m, (void *)data->count, | ||
| 998 | data->filter_str); | ||
| 999 | } | ||
| 1000 | |||
| 1001 | static struct event_trigger_ops stacktrace_trigger_ops = { | ||
| 1002 | .func = stacktrace_trigger, | ||
| 1003 | .print = stacktrace_trigger_print, | ||
| 1004 | .init = event_trigger_init, | ||
| 1005 | .free = event_trigger_free, | ||
| 1006 | }; | ||
| 1007 | |||
| 1008 | static struct event_trigger_ops stacktrace_count_trigger_ops = { | ||
| 1009 | .func = stacktrace_count_trigger, | ||
| 1010 | .print = stacktrace_trigger_print, | ||
| 1011 | .init = event_trigger_init, | ||
| 1012 | .free = event_trigger_free, | ||
| 1013 | }; | ||
| 1014 | |||
| 1015 | static struct event_trigger_ops * | ||
| 1016 | stacktrace_get_trigger_ops(char *cmd, char *param) | ||
| 1017 | { | ||
| 1018 | return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | static struct event_command trigger_stacktrace_cmd = { | ||
| 1022 | .name = "stacktrace", | ||
| 1023 | .trigger_type = ETT_STACKTRACE, | ||
| 1024 | .post_trigger = true, | ||
| 1025 | .func = event_trigger_callback, | ||
| 1026 | .reg = register_trigger, | ||
| 1027 | .unreg = unregister_trigger, | ||
| 1028 | .get_trigger_ops = stacktrace_get_trigger_ops, | ||
| 1029 | .set_filter = set_trigger_filter, | ||
| 1030 | }; | ||
| 1031 | |||
| 1032 | static __init int register_trigger_stacktrace_cmd(void) | ||
| 1033 | { | ||
| 1034 | int ret; | ||
| 1035 | |||
| 1036 | ret = register_event_command(&trigger_stacktrace_cmd); | ||
| 1037 | WARN_ON(ret < 0); | ||
| 1038 | |||
| 1039 | return ret; | ||
| 1040 | } | ||
| 1041 | #else | ||
| 1042 | static __init int register_trigger_stacktrace_cmd(void) { return 0; } | ||
| 1043 | #endif /* CONFIG_STACKTRACE */ | ||
| 1044 | |||
| 1045 | static __init void unregister_trigger_traceon_traceoff_cmds(void) | ||
| 1046 | { | ||
| 1047 | unregister_event_command(&trigger_traceon_cmd); | ||
| 1048 | unregister_event_command(&trigger_traceoff_cmd); | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | /* Avoid typos */ | ||
| 1052 | #define ENABLE_EVENT_STR "enable_event" | ||
| 1053 | #define DISABLE_EVENT_STR "disable_event" | ||
| 1054 | |||
| 1055 | struct enable_trigger_data { | ||
| 1056 | struct ftrace_event_file *file; | ||
| 1057 | bool enable; | ||
| 1058 | }; | ||
| 1059 | |||
| 1060 | static void | ||
| 1061 | event_enable_trigger(struct event_trigger_data *data) | ||
| 1062 | { | ||
| 1063 | struct enable_trigger_data *enable_data = data->private_data; | ||
| 1064 | |||
| 1065 | if (enable_data->enable) | ||
| 1066 | clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); | ||
| 1067 | else | ||
| 1068 | set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | static void | ||
| 1072 | event_enable_count_trigger(struct event_trigger_data *data) | ||
| 1073 | { | ||
| 1074 | struct enable_trigger_data *enable_data = data->private_data; | ||
| 1075 | |||
| 1076 | if (!data->count) | ||
| 1077 | return; | ||
| 1078 | |||
| 1079 | /* Skip if the event is in a state we want to switch to */ | ||
| 1080 | if (enable_data->enable == !(enable_data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) | ||
| 1081 | return; | ||
| 1082 | |||
| 1083 | if (data->count != -1) | ||
| 1084 | (data->count)--; | ||
| 1085 | |||
| 1086 | event_enable_trigger(data); | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | static int | ||
| 1090 | event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | ||
| 1091 | struct event_trigger_data *data) | ||
| 1092 | { | ||
| 1093 | struct enable_trigger_data *enable_data = data->private_data; | ||
| 1094 | |||
| 1095 | seq_printf(m, "%s:%s:%s", | ||
| 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | ||
| 1097 | enable_data->file->event_call->class->system, | ||
| 1098 | enable_data->file->event_call->name); | ||
| 1099 | |||
| 1100 | if (data->count == -1) | ||
| 1101 | seq_puts(m, ":unlimited"); | ||
| 1102 | else | ||
| 1103 | seq_printf(m, ":count=%ld", data->count); | ||
| 1104 | |||
| 1105 | if (data->filter_str) | ||
| 1106 | seq_printf(m, " if %s\n", data->filter_str); | ||
| 1107 | else | ||
| 1108 | seq_puts(m, "\n"); | ||
| 1109 | |||
| 1110 | return 0; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | static void | ||
| 1114 | event_enable_trigger_free(struct event_trigger_ops *ops, | ||
| 1115 | struct event_trigger_data *data) | ||
| 1116 | { | ||
| 1117 | struct enable_trigger_data *enable_data = data->private_data; | ||
| 1118 | |||
| 1119 | if (WARN_ON_ONCE(data->ref <= 0)) | ||
| 1120 | return; | ||
| 1121 | |||
| 1122 | data->ref--; | ||
| 1123 | if (!data->ref) { | ||
| 1124 | /* Remove the SOFT_MODE flag */ | ||
| 1125 | trace_event_enable_disable(enable_data->file, 0, 1); | ||
| 1126 | module_put(enable_data->file->event_call->mod); | ||
| 1127 | trigger_data_free(data); | ||
| 1128 | kfree(enable_data); | ||
| 1129 | } | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | static struct event_trigger_ops event_enable_trigger_ops = { | ||
| 1133 | .func = event_enable_trigger, | ||
| 1134 | .print = event_enable_trigger_print, | ||
| 1135 | .init = event_trigger_init, | ||
| 1136 | .free = event_enable_trigger_free, | ||
| 1137 | }; | ||
| 1138 | |||
| 1139 | static struct event_trigger_ops event_enable_count_trigger_ops = { | ||
| 1140 | .func = event_enable_count_trigger, | ||
| 1141 | .print = event_enable_trigger_print, | ||
| 1142 | .init = event_trigger_init, | ||
| 1143 | .free = event_enable_trigger_free, | ||
| 1144 | }; | ||
| 1145 | |||
| 1146 | static struct event_trigger_ops event_disable_trigger_ops = { | ||
| 1147 | .func = event_enable_trigger, | ||
| 1148 | .print = event_enable_trigger_print, | ||
| 1149 | .init = event_trigger_init, | ||
| 1150 | .free = event_enable_trigger_free, | ||
| 1151 | }; | ||
| 1152 | |||
| 1153 | static struct event_trigger_ops event_disable_count_trigger_ops = { | ||
| 1154 | .func = event_enable_count_trigger, | ||
| 1155 | .print = event_enable_trigger_print, | ||
| 1156 | .init = event_trigger_init, | ||
| 1157 | .free = event_enable_trigger_free, | ||
| 1158 | }; | ||
| 1159 | |||
| 1160 | static int | ||
| 1161 | event_enable_trigger_func(struct event_command *cmd_ops, | ||
| 1162 | struct ftrace_event_file *file, | ||
| 1163 | char *glob, char *cmd, char *param) | ||
| 1164 | { | ||
| 1165 | struct ftrace_event_file *event_enable_file; | ||
| 1166 | struct enable_trigger_data *enable_data; | ||
| 1167 | struct event_trigger_data *trigger_data; | ||
| 1168 | struct event_trigger_ops *trigger_ops; | ||
| 1169 | struct trace_array *tr = file->tr; | ||
| 1170 | const char *system; | ||
| 1171 | const char *event; | ||
| 1172 | char *trigger; | ||
| 1173 | char *number; | ||
| 1174 | bool enable; | ||
| 1175 | int ret; | ||
| 1176 | |||
| 1177 | if (!param) | ||
| 1178 | return -EINVAL; | ||
| 1179 | |||
| 1180 | /* separate the trigger from the filter (s:e:n [if filter]) */ | ||
| 1181 | trigger = strsep(¶m, " \t"); | ||
| 1182 | if (!trigger) | ||
| 1183 | return -EINVAL; | ||
| 1184 | |||
| 1185 | system = strsep(&trigger, ":"); | ||
| 1186 | if (!trigger) | ||
| 1187 | return -EINVAL; | ||
| 1188 | |||
| 1189 | event = strsep(&trigger, ":"); | ||
| 1190 | |||
| 1191 | ret = -EINVAL; | ||
| 1192 | event_enable_file = find_event_file(tr, system, event); | ||
| 1193 | if (!event_enable_file) | ||
| 1194 | goto out; | ||
| 1195 | |||
| 1196 | enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; | ||
| 1197 | |||
| 1198 | trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); | ||
| 1199 | |||
| 1200 | ret = -ENOMEM; | ||
| 1201 | trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); | ||
| 1202 | if (!trigger_data) | ||
| 1203 | goto out; | ||
| 1204 | |||
| 1205 | enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); | ||
| 1206 | if (!enable_data) { | ||
| 1207 | kfree(trigger_data); | ||
| 1208 | goto out; | ||
| 1209 | } | ||
| 1210 | |||
| 1211 | trigger_data->count = -1; | ||
| 1212 | trigger_data->ops = trigger_ops; | ||
| 1213 | trigger_data->cmd_ops = cmd_ops; | ||
| 1214 | INIT_LIST_HEAD(&trigger_data->list); | ||
| 1215 | RCU_INIT_POINTER(trigger_data->filter, NULL); | ||
| 1216 | |||
| 1217 | enable_data->enable = enable; | ||
| 1218 | enable_data->file = event_enable_file; | ||
| 1219 | trigger_data->private_data = enable_data; | ||
| 1220 | |||
| 1221 | if (glob[0] == '!') { | ||
| 1222 | cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); | ||
| 1223 | kfree(trigger_data); | ||
| 1224 | kfree(enable_data); | ||
| 1225 | ret = 0; | ||
| 1226 | goto out; | ||
| 1227 | } | ||
| 1228 | |||
| 1229 | if (trigger) { | ||
| 1230 | number = strsep(&trigger, ":"); | ||
| 1231 | |||
| 1232 | ret = -EINVAL; | ||
| 1233 | if (!strlen(number)) | ||
| 1234 | goto out_free; | ||
| 1235 | |||
| 1236 | /* | ||
| 1237 | * We use the callback data field (which is a pointer) | ||
| 1238 | * as our counter. | ||
| 1239 | */ | ||
| 1240 | ret = kstrtoul(number, 0, &trigger_data->count); | ||
| 1241 | if (ret) | ||
| 1242 | goto out_free; | ||
| 1243 | } | ||
| 1244 | |||
| 1245 | if (!param) /* if param is non-empty, it's supposed to be a filter */ | ||
| 1246 | goto out_reg; | ||
| 1247 | |||
| 1248 | if (!cmd_ops->set_filter) | ||
| 1249 | goto out_reg; | ||
| 1250 | |||
| 1251 | ret = cmd_ops->set_filter(param, trigger_data, file); | ||
| 1252 | if (ret < 0) | ||
| 1253 | goto out_free; | ||
| 1254 | |||
| 1255 | out_reg: | ||
| 1256 | /* Don't let event modules unload while probe registered */ | ||
| 1257 | ret = try_module_get(event_enable_file->event_call->mod); | ||
| 1258 | if (!ret) { | ||
| 1259 | ret = -EBUSY; | ||
| 1260 | goto out_free; | ||
| 1261 | } | ||
| 1262 | |||
| 1263 | ret = trace_event_enable_disable(event_enable_file, 1, 1); | ||
| 1264 | if (ret < 0) | ||
| 1265 | goto out_put; | ||
| 1266 | ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); | ||
| 1267 | /* | ||
| 1268 | * The above returns on success the # of functions enabled, | ||
| 1269 | * but if it didn't find any functions it returns zero. | ||
| 1270 | * Consider no functions a failure too. | ||
| 1271 | */ | ||
| 1272 | if (!ret) { | ||
| 1273 | ret = -ENOENT; | ||
| 1274 | goto out_disable; | ||
| 1275 | } else if (ret < 0) | ||
| 1276 | goto out_disable; | ||
| 1277 | /* Just return zero, not the number of enabled functions */ | ||
| 1278 | ret = 0; | ||
| 1279 | out: | ||
| 1280 | return ret; | ||
| 1281 | |||
| 1282 | out_disable: | ||
| 1283 | trace_event_enable_disable(event_enable_file, 0, 1); | ||
| 1284 | out_put: | ||
| 1285 | module_put(event_enable_file->event_call->mod); | ||
| 1286 | out_free: | ||
| 1287 | if (cmd_ops->set_filter) | ||
| 1288 | cmd_ops->set_filter(NULL, trigger_data, NULL); | ||
| 1289 | kfree(trigger_data); | ||
| 1290 | kfree(enable_data); | ||
| 1291 | goto out; | ||
| 1292 | } | ||
| 1293 | |||
| 1294 | static int event_enable_register_trigger(char *glob, | ||
| 1295 | struct event_trigger_ops *ops, | ||
| 1296 | struct event_trigger_data *data, | ||
| 1297 | struct ftrace_event_file *file) | ||
| 1298 | { | ||
| 1299 | struct enable_trigger_data *enable_data = data->private_data; | ||
| 1300 | struct enable_trigger_data *test_enable_data; | ||
| 1301 | struct event_trigger_data *test; | ||
| 1302 | int ret = 0; | ||
| 1303 | |||
| 1304 | list_for_each_entry_rcu(test, &file->triggers, list) { | ||
| 1305 | test_enable_data = test->private_data; | ||
| 1306 | if (test_enable_data && | ||
| 1307 | (test_enable_data->file == enable_data->file)) { | ||
| 1308 | ret = -EEXIST; | ||
| 1309 | goto out; | ||
| 1310 | } | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | if (data->ops->init) { | ||
| 1314 | ret = data->ops->init(data->ops, data); | ||
| 1315 | if (ret < 0) | ||
| 1316 | goto out; | ||
| 1317 | } | ||
| 1318 | |||
| 1319 | list_add_rcu(&data->list, &file->triggers); | ||
| 1320 | ret++; | ||
| 1321 | |||
| 1322 | if (trace_event_trigger_enable_disable(file, 1) < 0) { | ||
| 1323 | list_del_rcu(&data->list); | ||
| 1324 | ret--; | ||
| 1325 | } | ||
| 1326 | update_cond_flag(file); | ||
| 1327 | out: | ||
| 1328 | return ret; | ||
| 1329 | } | ||
| 1330 | |||
| 1331 | static void event_enable_unregister_trigger(char *glob, | ||
| 1332 | struct event_trigger_ops *ops, | ||
| 1333 | struct event_trigger_data *test, | ||
| 1334 | struct ftrace_event_file *file) | ||
| 1335 | { | ||
| 1336 | struct enable_trigger_data *test_enable_data = test->private_data; | ||
| 1337 | struct enable_trigger_data *enable_data; | ||
| 1338 | struct event_trigger_data *data; | ||
| 1339 | bool unregistered = false; | ||
| 1340 | |||
| 1341 | list_for_each_entry_rcu(data, &file->triggers, list) { | ||
| 1342 | enable_data = data->private_data; | ||
| 1343 | if (enable_data && | ||
| 1344 | (enable_data->file == test_enable_data->file)) { | ||
| 1345 | unregistered = true; | ||
| 1346 | list_del_rcu(&data->list); | ||
| 1347 | update_cond_flag(file); | ||
| 1348 | trace_event_trigger_enable_disable(file, 0); | ||
| 1349 | break; | ||
| 1350 | } | ||
| 1351 | } | ||
| 1352 | |||
| 1353 | if (unregistered && data->ops->free) | ||
| 1354 | data->ops->free(data->ops, data); | ||
| 1355 | } | ||
| 1356 | |||
| 1357 | static struct event_trigger_ops * | ||
| 1358 | event_enable_get_trigger_ops(char *cmd, char *param) | ||
| 1359 | { | ||
| 1360 | struct event_trigger_ops *ops; | ||
| 1361 | bool enable; | ||
| 1362 | |||
| 1363 | enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; | ||
| 1364 | |||
| 1365 | if (enable) | ||
| 1366 | ops = param ? &event_enable_count_trigger_ops : | ||
| 1367 | &event_enable_trigger_ops; | ||
| 1368 | else | ||
| 1369 | ops = param ? &event_disable_count_trigger_ops : | ||
| 1370 | &event_disable_trigger_ops; | ||
| 1371 | |||
| 1372 | return ops; | ||
| 1373 | } | ||
| 1374 | |||
| 1375 | static struct event_command trigger_enable_cmd = { | ||
| 1376 | .name = ENABLE_EVENT_STR, | ||
| 1377 | .trigger_type = ETT_EVENT_ENABLE, | ||
| 1378 | .func = event_enable_trigger_func, | ||
| 1379 | .reg = event_enable_register_trigger, | ||
| 1380 | .unreg = event_enable_unregister_trigger, | ||
| 1381 | .get_trigger_ops = event_enable_get_trigger_ops, | ||
| 1382 | .set_filter = set_trigger_filter, | ||
| 1383 | }; | ||
| 1384 | |||
| 1385 | static struct event_command trigger_disable_cmd = { | ||
| 1386 | .name = DISABLE_EVENT_STR, | ||
| 1387 | .trigger_type = ETT_EVENT_ENABLE, | ||
| 1388 | .func = event_enable_trigger_func, | ||
| 1389 | .reg = event_enable_register_trigger, | ||
| 1390 | .unreg = event_enable_unregister_trigger, | ||
| 1391 | .get_trigger_ops = event_enable_get_trigger_ops, | ||
| 1392 | .set_filter = set_trigger_filter, | ||
| 1393 | }; | ||
| 1394 | |||
| 1395 | static __init void unregister_trigger_enable_disable_cmds(void) | ||
| 1396 | { | ||
| 1397 | unregister_event_command(&trigger_enable_cmd); | ||
| 1398 | unregister_event_command(&trigger_disable_cmd); | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | static __init int register_trigger_enable_disable_cmds(void) | ||
| 1402 | { | ||
| 1403 | int ret; | ||
| 1404 | |||
| 1405 | ret = register_event_command(&trigger_enable_cmd); | ||
| 1406 | if (WARN_ON(ret < 0)) | ||
| 1407 | return ret; | ||
| 1408 | ret = register_event_command(&trigger_disable_cmd); | ||
| 1409 | if (WARN_ON(ret < 0)) | ||
| 1410 | unregister_trigger_enable_disable_cmds(); | ||
| 1411 | |||
| 1412 | return ret; | ||
| 1413 | } | ||
| 1414 | |||
| 1415 | static __init int register_trigger_traceon_traceoff_cmds(void) | ||
| 1416 | { | ||
| 1417 | int ret; | ||
| 1418 | |||
| 1419 | ret = register_event_command(&trigger_traceon_cmd); | ||
| 1420 | if (WARN_ON(ret < 0)) | ||
| 1421 | return ret; | ||
| 1422 | ret = register_event_command(&trigger_traceoff_cmd); | ||
| 1423 | if (WARN_ON(ret < 0)) | ||
| 1424 | unregister_trigger_traceon_traceoff_cmds(); | ||
| 1425 | |||
| 1426 | return ret; | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | __init int register_trigger_cmds(void) | ||
| 1430 | { | ||
| 1431 | register_trigger_traceon_traceoff_cmds(); | ||
| 1432 | register_trigger_snapshot_cmd(); | ||
| 1433 | register_trigger_stacktrace_cmd(); | ||
| 1434 | register_trigger_enable_disable_cmds(); | ||
| 1435 | |||
| 1436 | return 0; | ||
| 1437 | } | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index dae9541ada9e..bdbae450c13e 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -27,18 +27,12 @@ | |||
| 27 | /** | 27 | /** |
| 28 | * Kprobe event core functions | 28 | * Kprobe event core functions |
| 29 | */ | 29 | */ |
| 30 | struct trace_probe { | 30 | struct trace_kprobe { |
| 31 | struct list_head list; | 31 | struct list_head list; |
| 32 | struct kretprobe rp; /* Use rp.kp for kprobe use */ | 32 | struct kretprobe rp; /* Use rp.kp for kprobe use */ |
| 33 | unsigned long nhit; | 33 | unsigned long nhit; |
| 34 | unsigned int flags; /* For TP_FLAG_* */ | ||
| 35 | const char *symbol; /* symbol name */ | 34 | const char *symbol; /* symbol name */ |
| 36 | struct ftrace_event_class class; | 35 | struct trace_probe tp; |
| 37 | struct ftrace_event_call call; | ||
| 38 | struct list_head files; | ||
| 39 | ssize_t size; /* trace entry size */ | ||
| 40 | unsigned int nr_args; | ||
| 41 | struct probe_arg args[]; | ||
| 42 | }; | 36 | }; |
| 43 | 37 | ||
| 44 | struct event_file_link { | 38 | struct event_file_link { |
| @@ -46,56 +40,46 @@ struct event_file_link { | |||
| 46 | struct list_head list; | 40 | struct list_head list; |
| 47 | }; | 41 | }; |
| 48 | 42 | ||
| 49 | #define SIZEOF_TRACE_PROBE(n) \ | 43 | #define SIZEOF_TRACE_KPROBE(n) \ |
| 50 | (offsetof(struct trace_probe, args) + \ | 44 | (offsetof(struct trace_kprobe, tp.args) + \ |
| 51 | (sizeof(struct probe_arg) * (n))) | 45 | (sizeof(struct probe_arg) * (n))) |
| 52 | 46 | ||
| 53 | 47 | ||
| 54 | static __kprobes bool trace_probe_is_return(struct trace_probe *tp) | 48 | static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk) |
| 55 | { | 49 | { |
| 56 | return tp->rp.handler != NULL; | 50 | return tk->rp.handler != NULL; |
| 57 | } | 51 | } |
| 58 | 52 | ||
| 59 | static __kprobes const char *trace_probe_symbol(struct trace_probe *tp) | 53 | static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk) |
| 60 | { | 54 | { |
| 61 | return tp->symbol ? tp->symbol : "unknown"; | 55 | return tk->symbol ? tk->symbol : "unknown"; |
| 62 | } | 56 | } |
| 63 | 57 | ||
| 64 | static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp) | 58 | static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk) |
| 65 | { | 59 | { |
| 66 | return tp->rp.kp.offset; | 60 | return tk->rp.kp.offset; |
| 67 | } | 61 | } |
| 68 | 62 | ||
| 69 | static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp) | 63 | static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk) |
| 70 | { | 64 | { |
| 71 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | 65 | return !!(kprobe_gone(&tk->rp.kp)); |
| 72 | } | 66 | } |
| 73 | 67 | ||
| 74 | static __kprobes bool trace_probe_is_registered(struct trace_probe *tp) | 68 | static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk, |
| 75 | { | 69 | struct module *mod) |
| 76 | return !!(tp->flags & TP_FLAG_REGISTERED); | ||
| 77 | } | ||
| 78 | |||
| 79 | static __kprobes bool trace_probe_has_gone(struct trace_probe *tp) | ||
| 80 | { | ||
| 81 | return !!(kprobe_gone(&tp->rp.kp)); | ||
| 82 | } | ||
| 83 | |||
| 84 | static __kprobes bool trace_probe_within_module(struct trace_probe *tp, | ||
| 85 | struct module *mod) | ||
| 86 | { | 70 | { |
| 87 | int len = strlen(mod->name); | 71 | int len = strlen(mod->name); |
| 88 | const char *name = trace_probe_symbol(tp); | 72 | const char *name = trace_kprobe_symbol(tk); |
| 89 | return strncmp(mod->name, name, len) == 0 && name[len] == ':'; | 73 | return strncmp(mod->name, name, len) == 0 && name[len] == ':'; |
| 90 | } | 74 | } |
| 91 | 75 | ||
| 92 | static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) | 76 | static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk) |
| 93 | { | 77 | { |
| 94 | return !!strchr(trace_probe_symbol(tp), ':'); | 78 | return !!strchr(trace_kprobe_symbol(tk), ':'); |
| 95 | } | 79 | } |
| 96 | 80 | ||
| 97 | static int register_probe_event(struct trace_probe *tp); | 81 | static int register_kprobe_event(struct trace_kprobe *tk); |
| 98 | static int unregister_probe_event(struct trace_probe *tp); | 82 | static int unregister_kprobe_event(struct trace_kprobe *tk); |
| 99 | 83 | ||
| 100 | static DEFINE_MUTEX(probe_lock); | 84 | static DEFINE_MUTEX(probe_lock); |
| 101 | static LIST_HEAD(probe_list); | 85 | static LIST_HEAD(probe_list); |
| @@ -104,45 +88,224 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); | |||
| 104 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, | 88 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, |
| 105 | struct pt_regs *regs); | 89 | struct pt_regs *regs); |
| 106 | 90 | ||
| 91 | /* Memory fetching by symbol */ | ||
| 92 | struct symbol_cache { | ||
| 93 | char *symbol; | ||
| 94 | long offset; | ||
| 95 | unsigned long addr; | ||
| 96 | }; | ||
| 97 | |||
| 98 | unsigned long update_symbol_cache(struct symbol_cache *sc) | ||
| 99 | { | ||
| 100 | sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); | ||
| 101 | |||
| 102 | if (sc->addr) | ||
| 103 | sc->addr += sc->offset; | ||
| 104 | |||
| 105 | return sc->addr; | ||
| 106 | } | ||
| 107 | |||
| 108 | void free_symbol_cache(struct symbol_cache *sc) | ||
| 109 | { | ||
| 110 | kfree(sc->symbol); | ||
| 111 | kfree(sc); | ||
| 112 | } | ||
| 113 | |||
| 114 | struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) | ||
| 115 | { | ||
| 116 | struct symbol_cache *sc; | ||
| 117 | |||
| 118 | if (!sym || strlen(sym) == 0) | ||
| 119 | return NULL; | ||
| 120 | |||
| 121 | sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); | ||
| 122 | if (!sc) | ||
| 123 | return NULL; | ||
| 124 | |||
| 125 | sc->symbol = kstrdup(sym, GFP_KERNEL); | ||
| 126 | if (!sc->symbol) { | ||
| 127 | kfree(sc); | ||
| 128 | return NULL; | ||
| 129 | } | ||
| 130 | sc->offset = offset; | ||
| 131 | update_symbol_cache(sc); | ||
| 132 | |||
| 133 | return sc; | ||
| 134 | } | ||
| 135 | |||
| 136 | /* | ||
| 137 | * Kprobes-specific fetch functions | ||
| 138 | */ | ||
| 139 | #define DEFINE_FETCH_stack(type) \ | ||
| 140 | static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ | ||
| 141 | void *offset, void *dest) \ | ||
| 142 | { \ | ||
| 143 | *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ | ||
| 144 | (unsigned int)((unsigned long)offset)); \ | ||
| 145 | } | ||
| 146 | DEFINE_BASIC_FETCH_FUNCS(stack) | ||
| 147 | /* No string on the stack entry */ | ||
| 148 | #define fetch_stack_string NULL | ||
| 149 | #define fetch_stack_string_size NULL | ||
| 150 | |||
| 151 | #define DEFINE_FETCH_memory(type) \ | ||
| 152 | static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ | ||
| 153 | void *addr, void *dest) \ | ||
| 154 | { \ | ||
| 155 | type retval; \ | ||
| 156 | if (probe_kernel_address(addr, retval)) \ | ||
| 157 | *(type *)dest = 0; \ | ||
| 158 | else \ | ||
| 159 | *(type *)dest = retval; \ | ||
| 160 | } | ||
| 161 | DEFINE_BASIC_FETCH_FUNCS(memory) | ||
| 162 | /* | ||
| 163 | * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max | ||
| 164 | * length and relative data location. | ||
| 165 | */ | ||
| 166 | static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, | ||
| 167 | void *addr, void *dest) | ||
| 168 | { | ||
| 169 | long ret; | ||
| 170 | int maxlen = get_rloc_len(*(u32 *)dest); | ||
| 171 | u8 *dst = get_rloc_data(dest); | ||
| 172 | u8 *src = addr; | ||
| 173 | mm_segment_t old_fs = get_fs(); | ||
| 174 | |||
| 175 | if (!maxlen) | ||
| 176 | return; | ||
| 177 | |||
| 178 | /* | ||
| 179 | * Try to get string again, since the string can be changed while | ||
| 180 | * probing. | ||
| 181 | */ | ||
| 182 | set_fs(KERNEL_DS); | ||
| 183 | pagefault_disable(); | ||
| 184 | |||
| 185 | do | ||
| 186 | ret = __copy_from_user_inatomic(dst++, src++, 1); | ||
| 187 | while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); | ||
| 188 | |||
| 189 | dst[-1] = '\0'; | ||
| 190 | pagefault_enable(); | ||
| 191 | set_fs(old_fs); | ||
| 192 | |||
| 193 | if (ret < 0) { /* Failed to fetch string */ | ||
| 194 | ((u8 *)get_rloc_data(dest))[0] = '\0'; | ||
| 195 | *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); | ||
| 196 | } else { | ||
| 197 | *(u32 *)dest = make_data_rloc(src - (u8 *)addr, | ||
| 198 | get_rloc_offs(*(u32 *)dest)); | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | /* Return the length of string -- including null terminal byte */ | ||
| 203 | static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, | ||
| 204 | void *addr, void *dest) | ||
| 205 | { | ||
| 206 | mm_segment_t old_fs; | ||
| 207 | int ret, len = 0; | ||
| 208 | u8 c; | ||
| 209 | |||
| 210 | old_fs = get_fs(); | ||
| 211 | set_fs(KERNEL_DS); | ||
| 212 | pagefault_disable(); | ||
| 213 | |||
| 214 | do { | ||
| 215 | ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); | ||
| 216 | len++; | ||
| 217 | } while (c && ret == 0 && len < MAX_STRING_SIZE); | ||
| 218 | |||
| 219 | pagefault_enable(); | ||
| 220 | set_fs(old_fs); | ||
| 221 | |||
| 222 | if (ret < 0) /* Failed to check the length */ | ||
| 223 | *(u32 *)dest = 0; | ||
| 224 | else | ||
| 225 | *(u32 *)dest = len; | ||
| 226 | } | ||
| 227 | |||
| 228 | #define DEFINE_FETCH_symbol(type) \ | ||
| 229 | __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, \ | ||
| 230 | void *data, void *dest) \ | ||
| 231 | { \ | ||
| 232 | struct symbol_cache *sc = data; \ | ||
| 233 | if (sc->addr) \ | ||
| 234 | fetch_memory_##type(regs, (void *)sc->addr, dest); \ | ||
| 235 | else \ | ||
| 236 | *(type *)dest = 0; \ | ||
| 237 | } | ||
| 238 | DEFINE_BASIC_FETCH_FUNCS(symbol) | ||
| 239 | DEFINE_FETCH_symbol(string) | ||
| 240 | DEFINE_FETCH_symbol(string_size) | ||
| 241 | |||
| 242 | /* kprobes don't support file_offset fetch methods */ | ||
| 243 | #define fetch_file_offset_u8 NULL | ||
| 244 | #define fetch_file_offset_u16 NULL | ||
| 245 | #define fetch_file_offset_u32 NULL | ||
| 246 | #define fetch_file_offset_u64 NULL | ||
| 247 | #define fetch_file_offset_string NULL | ||
| 248 | #define fetch_file_offset_string_size NULL | ||
| 249 | |||
| 250 | /* Fetch type information table */ | ||
| 251 | const struct fetch_type kprobes_fetch_type_table[] = { | ||
| 252 | /* Special types */ | ||
| 253 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, | ||
| 254 | sizeof(u32), 1, "__data_loc char[]"), | ||
| 255 | [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, | ||
| 256 | string_size, sizeof(u32), 0, "u32"), | ||
| 257 | /* Basic types */ | ||
| 258 | ASSIGN_FETCH_TYPE(u8, u8, 0), | ||
| 259 | ASSIGN_FETCH_TYPE(u16, u16, 0), | ||
| 260 | ASSIGN_FETCH_TYPE(u32, u32, 0), | ||
| 261 | ASSIGN_FETCH_TYPE(u64, u64, 0), | ||
| 262 | ASSIGN_FETCH_TYPE(s8, u8, 1), | ||
| 263 | ASSIGN_FETCH_TYPE(s16, u16, 1), | ||
| 264 | ASSIGN_FETCH_TYPE(s32, u32, 1), | ||
| 265 | ASSIGN_FETCH_TYPE(s64, u64, 1), | ||
| 266 | |||
| 267 | ASSIGN_FETCH_TYPE_END | ||
| 268 | }; | ||
| 269 | |||
| 107 | /* | 270 | /* |
| 108 | * Allocate new trace_probe and initialize it (including kprobes). | 271 | * Allocate new trace_probe and initialize it (including kprobes). |
| 109 | */ | 272 | */ |
| 110 | static struct trace_probe *alloc_trace_probe(const char *group, | 273 | static struct trace_kprobe *alloc_trace_kprobe(const char *group, |
| 111 | const char *event, | 274 | const char *event, |
| 112 | void *addr, | 275 | void *addr, |
| 113 | const char *symbol, | 276 | const char *symbol, |
| 114 | unsigned long offs, | 277 | unsigned long offs, |
| 115 | int nargs, bool is_return) | 278 | int nargs, bool is_return) |
| 116 | { | 279 | { |
| 117 | struct trace_probe *tp; | 280 | struct trace_kprobe *tk; |
| 118 | int ret = -ENOMEM; | 281 | int ret = -ENOMEM; |
| 119 | 282 | ||
| 120 | tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); | 283 | tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); |
| 121 | if (!tp) | 284 | if (!tk) |
| 122 | return ERR_PTR(ret); | 285 | return ERR_PTR(ret); |
| 123 | 286 | ||
| 124 | if (symbol) { | 287 | if (symbol) { |
| 125 | tp->symbol = kstrdup(symbol, GFP_KERNEL); | 288 | tk->symbol = kstrdup(symbol, GFP_KERNEL); |
| 126 | if (!tp->symbol) | 289 | if (!tk->symbol) |
| 127 | goto error; | 290 | goto error; |
| 128 | tp->rp.kp.symbol_name = tp->symbol; | 291 | tk->rp.kp.symbol_name = tk->symbol; |
| 129 | tp->rp.kp.offset = offs; | 292 | tk->rp.kp.offset = offs; |
| 130 | } else | 293 | } else |
| 131 | tp->rp.kp.addr = addr; | 294 | tk->rp.kp.addr = addr; |
| 132 | 295 | ||
| 133 | if (is_return) | 296 | if (is_return) |
| 134 | tp->rp.handler = kretprobe_dispatcher; | 297 | tk->rp.handler = kretprobe_dispatcher; |
| 135 | else | 298 | else |
| 136 | tp->rp.kp.pre_handler = kprobe_dispatcher; | 299 | tk->rp.kp.pre_handler = kprobe_dispatcher; |
| 137 | 300 | ||
| 138 | if (!event || !is_good_name(event)) { | 301 | if (!event || !is_good_name(event)) { |
| 139 | ret = -EINVAL; | 302 | ret = -EINVAL; |
| 140 | goto error; | 303 | goto error; |
| 141 | } | 304 | } |
| 142 | 305 | ||
| 143 | tp->call.class = &tp->class; | 306 | tk->tp.call.class = &tk->tp.class; |
| 144 | tp->call.name = kstrdup(event, GFP_KERNEL); | 307 | tk->tp.call.name = kstrdup(event, GFP_KERNEL); |
| 145 | if (!tp->call.name) | 308 | if (!tk->tp.call.name) |
| 146 | goto error; | 309 | goto error; |
| 147 | 310 | ||
| 148 | if (!group || !is_good_name(group)) { | 311 | if (!group || !is_good_name(group)) { |
| @@ -150,42 +313,42 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
| 150 | goto error; | 313 | goto error; |
| 151 | } | 314 | } |
| 152 | 315 | ||
| 153 | tp->class.system = kstrdup(group, GFP_KERNEL); | 316 | tk->tp.class.system = kstrdup(group, GFP_KERNEL); |
| 154 | if (!tp->class.system) | 317 | if (!tk->tp.class.system) |
| 155 | goto error; | 318 | goto error; |
| 156 | 319 | ||
| 157 | INIT_LIST_HEAD(&tp->list); | 320 | INIT_LIST_HEAD(&tk->list); |
| 158 | INIT_LIST_HEAD(&tp->files); | 321 | INIT_LIST_HEAD(&tk->tp.files); |
| 159 | return tp; | 322 | return tk; |
| 160 | error: | 323 | error: |
| 161 | kfree(tp->call.name); | 324 | kfree(tk->tp.call.name); |
| 162 | kfree(tp->symbol); | 325 | kfree(tk->symbol); |
| 163 | kfree(tp); | 326 | kfree(tk); |
| 164 | return ERR_PTR(ret); | 327 | return ERR_PTR(ret); |
| 165 | } | 328 | } |
| 166 | 329 | ||
| 167 | static void free_trace_probe(struct trace_probe *tp) | 330 | static void free_trace_kprobe(struct trace_kprobe *tk) |
| 168 | { | 331 | { |
| 169 | int i; | 332 | int i; |
| 170 | 333 | ||
| 171 | for (i = 0; i < tp->nr_args; i++) | 334 | for (i = 0; i < tk->tp.nr_args; i++) |
| 172 | traceprobe_free_probe_arg(&tp->args[i]); | 335 | traceprobe_free_probe_arg(&tk->tp.args[i]); |
| 173 | 336 | ||
| 174 | kfree(tp->call.class->system); | 337 | kfree(tk->tp.call.class->system); |
| 175 | kfree(tp->call.name); | 338 | kfree(tk->tp.call.name); |
| 176 | kfree(tp->symbol); | 339 | kfree(tk->symbol); |
| 177 | kfree(tp); | 340 | kfree(tk); |
| 178 | } | 341 | } |
| 179 | 342 | ||
| 180 | static struct trace_probe *find_trace_probe(const char *event, | 343 | static struct trace_kprobe *find_trace_kprobe(const char *event, |
| 181 | const char *group) | 344 | const char *group) |
| 182 | { | 345 | { |
| 183 | struct trace_probe *tp; | 346 | struct trace_kprobe *tk; |
| 184 | 347 | ||
| 185 | list_for_each_entry(tp, &probe_list, list) | 348 | list_for_each_entry(tk, &probe_list, list) |
| 186 | if (strcmp(tp->call.name, event) == 0 && | 349 | if (strcmp(tk->tp.call.name, event) == 0 && |
| 187 | strcmp(tp->call.class->system, group) == 0) | 350 | strcmp(tk->tp.call.class->system, group) == 0) |
| 188 | return tp; | 351 | return tk; |
| 189 | return NULL; | 352 | return NULL; |
| 190 | } | 353 | } |
| 191 | 354 | ||
| @@ -194,7 +357,7 @@ static struct trace_probe *find_trace_probe(const char *event, | |||
| 194 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. | 357 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. |
| 195 | */ | 358 | */ |
| 196 | static int | 359 | static int |
| 197 | enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | 360 | enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) |
| 198 | { | 361 | { |
| 199 | int ret = 0; | 362 | int ret = 0; |
| 200 | 363 | ||
| @@ -208,17 +371,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 208 | } | 371 | } |
| 209 | 372 | ||
| 210 | link->file = file; | 373 | link->file = file; |
| 211 | list_add_tail_rcu(&link->list, &tp->files); | 374 | list_add_tail_rcu(&link->list, &tk->tp.files); |
| 212 | 375 | ||
| 213 | tp->flags |= TP_FLAG_TRACE; | 376 | tk->tp.flags |= TP_FLAG_TRACE; |
| 214 | } else | 377 | } else |
| 215 | tp->flags |= TP_FLAG_PROFILE; | 378 | tk->tp.flags |= TP_FLAG_PROFILE; |
| 216 | 379 | ||
| 217 | if (trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) { | 380 | if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { |
| 218 | if (trace_probe_is_return(tp)) | 381 | if (trace_kprobe_is_return(tk)) |
| 219 | ret = enable_kretprobe(&tp->rp); | 382 | ret = enable_kretprobe(&tk->rp); |
| 220 | else | 383 | else |
| 221 | ret = enable_kprobe(&tp->rp.kp); | 384 | ret = enable_kprobe(&tk->rp.kp); |
| 222 | } | 385 | } |
| 223 | out: | 386 | out: |
| 224 | return ret; | 387 | return ret; |
| @@ -241,14 +404,14 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 241 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. | 404 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
| 242 | */ | 405 | */ |
| 243 | static int | 406 | static int |
| 244 | disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | 407 | disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) |
| 245 | { | 408 | { |
| 246 | struct event_file_link *link = NULL; | 409 | struct event_file_link *link = NULL; |
| 247 | int wait = 0; | 410 | int wait = 0; |
| 248 | int ret = 0; | 411 | int ret = 0; |
| 249 | 412 | ||
| 250 | if (file) { | 413 | if (file) { |
| 251 | link = find_event_file_link(tp, file); | 414 | link = find_event_file_link(&tk->tp, file); |
| 252 | if (!link) { | 415 | if (!link) { |
| 253 | ret = -EINVAL; | 416 | ret = -EINVAL; |
| 254 | goto out; | 417 | goto out; |
| @@ -256,18 +419,18 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 256 | 419 | ||
| 257 | list_del_rcu(&link->list); | 420 | list_del_rcu(&link->list); |
| 258 | wait = 1; | 421 | wait = 1; |
| 259 | if (!list_empty(&tp->files)) | 422 | if (!list_empty(&tk->tp.files)) |
| 260 | goto out; | 423 | goto out; |
| 261 | 424 | ||
| 262 | tp->flags &= ~TP_FLAG_TRACE; | 425 | tk->tp.flags &= ~TP_FLAG_TRACE; |
| 263 | } else | 426 | } else |
| 264 | tp->flags &= ~TP_FLAG_PROFILE; | 427 | tk->tp.flags &= ~TP_FLAG_PROFILE; |
| 265 | 428 | ||
| 266 | if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { | 429 | if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) { |
| 267 | if (trace_probe_is_return(tp)) | 430 | if (trace_kprobe_is_return(tk)) |
| 268 | disable_kretprobe(&tp->rp); | 431 | disable_kretprobe(&tk->rp); |
| 269 | else | 432 | else |
| 270 | disable_kprobe(&tp->rp.kp); | 433 | disable_kprobe(&tk->rp.kp); |
| 271 | wait = 1; | 434 | wait = 1; |
| 272 | } | 435 | } |
| 273 | out: | 436 | out: |
| @@ -288,40 +451,40 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
| 288 | } | 451 | } |
| 289 | 452 | ||
| 290 | /* Internal register function - just handle k*probes and flags */ | 453 | /* Internal register function - just handle k*probes and flags */ |
| 291 | static int __register_trace_probe(struct trace_probe *tp) | 454 | static int __register_trace_kprobe(struct trace_kprobe *tk) |
| 292 | { | 455 | { |
| 293 | int i, ret; | 456 | int i, ret; |
| 294 | 457 | ||
| 295 | if (trace_probe_is_registered(tp)) | 458 | if (trace_probe_is_registered(&tk->tp)) |
| 296 | return -EINVAL; | 459 | return -EINVAL; |
| 297 | 460 | ||
| 298 | for (i = 0; i < tp->nr_args; i++) | 461 | for (i = 0; i < tk->tp.nr_args; i++) |
| 299 | traceprobe_update_arg(&tp->args[i]); | 462 | traceprobe_update_arg(&tk->tp.args[i]); |
| 300 | 463 | ||
| 301 | /* Set/clear disabled flag according to tp->flag */ | 464 | /* Set/clear disabled flag according to tp->flag */ |
| 302 | if (trace_probe_is_enabled(tp)) | 465 | if (trace_probe_is_enabled(&tk->tp)) |
| 303 | tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; | 466 | tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; |
| 304 | else | 467 | else |
| 305 | tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; | 468 | tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; |
| 306 | 469 | ||
| 307 | if (trace_probe_is_return(tp)) | 470 | if (trace_kprobe_is_return(tk)) |
| 308 | ret = register_kretprobe(&tp->rp); | 471 | ret = register_kretprobe(&tk->rp); |
| 309 | else | 472 | else |
| 310 | ret = register_kprobe(&tp->rp.kp); | 473 | ret = register_kprobe(&tk->rp.kp); |
| 311 | 474 | ||
| 312 | if (ret == 0) | 475 | if (ret == 0) |
| 313 | tp->flags |= TP_FLAG_REGISTERED; | 476 | tk->tp.flags |= TP_FLAG_REGISTERED; |
| 314 | else { | 477 | else { |
| 315 | pr_warning("Could not insert probe at %s+%lu: %d\n", | 478 | pr_warning("Could not insert probe at %s+%lu: %d\n", |
| 316 | trace_probe_symbol(tp), trace_probe_offset(tp), ret); | 479 | trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); |
| 317 | if (ret == -ENOENT && trace_probe_is_on_module(tp)) { | 480 | if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { |
| 318 | pr_warning("This probe might be able to register after" | 481 | pr_warning("This probe might be able to register after" |
| 319 | "target module is loaded. Continue.\n"); | 482 | "target module is loaded. Continue.\n"); |
| 320 | ret = 0; | 483 | ret = 0; |
| 321 | } else if (ret == -EILSEQ) { | 484 | } else if (ret == -EILSEQ) { |
| 322 | pr_warning("Probing address(0x%p) is not an " | 485 | pr_warning("Probing address(0x%p) is not an " |
| 323 | "instruction boundary.\n", | 486 | "instruction boundary.\n", |
| 324 | tp->rp.kp.addr); | 487 | tk->rp.kp.addr); |
| 325 | ret = -EINVAL; | 488 | ret = -EINVAL; |
| 326 | } | 489 | } |
| 327 | } | 490 | } |
| @@ -330,67 +493,67 @@ static int __register_trace_probe(struct trace_probe *tp) | |||
| 330 | } | 493 | } |
| 331 | 494 | ||
| 332 | /* Internal unregister function - just handle k*probes and flags */ | 495 | /* Internal unregister function - just handle k*probes and flags */ |
| 333 | static void __unregister_trace_probe(struct trace_probe *tp) | 496 | static void __unregister_trace_kprobe(struct trace_kprobe *tk) |
| 334 | { | 497 | { |
| 335 | if (trace_probe_is_registered(tp)) { | 498 | if (trace_probe_is_registered(&tk->tp)) { |
| 336 | if (trace_probe_is_return(tp)) | 499 | if (trace_kprobe_is_return(tk)) |
| 337 | unregister_kretprobe(&tp->rp); | 500 | unregister_kretprobe(&tk->rp); |
| 338 | else | 501 | else |
| 339 | unregister_kprobe(&tp->rp.kp); | 502 | unregister_kprobe(&tk->rp.kp); |
| 340 | tp->flags &= ~TP_FLAG_REGISTERED; | 503 | tk->tp.flags &= ~TP_FLAG_REGISTERED; |
| 341 | /* Cleanup kprobe for reuse */ | 504 | /* Cleanup kprobe for reuse */ |
| 342 | if (tp->rp.kp.symbol_name) | 505 | if (tk->rp.kp.symbol_name) |
| 343 | tp->rp.kp.addr = NULL; | 506 | tk->rp.kp.addr = NULL; |
| 344 | } | 507 | } |
| 345 | } | 508 | } |
| 346 | 509 | ||
| 347 | /* Unregister a trace_probe and probe_event: call with locking probe_lock */ | 510 | /* Unregister a trace_probe and probe_event: call with locking probe_lock */ |
| 348 | static int unregister_trace_probe(struct trace_probe *tp) | 511 | static int unregister_trace_kprobe(struct trace_kprobe *tk) |
| 349 | { | 512 | { |
| 350 | /* Enabled event can not be unregistered */ | 513 | /* Enabled event can not be unregistered */ |
| 351 | if (trace_probe_is_enabled(tp)) | 514 | if (trace_probe_is_enabled(&tk->tp)) |
| 352 | return -EBUSY; | 515 | return -EBUSY; |
| 353 | 516 | ||
| 354 | /* Will fail if probe is being used by ftrace or perf */ | 517 | /* Will fail if probe is being used by ftrace or perf */ |
| 355 | if (unregister_probe_event(tp)) | 518 | if (unregister_kprobe_event(tk)) |
| 356 | return -EBUSY; | 519 | return -EBUSY; |
| 357 | 520 | ||
| 358 | __unregister_trace_probe(tp); | 521 | __unregister_trace_kprobe(tk); |
| 359 | list_del(&tp->list); | 522 | list_del(&tk->list); |
| 360 | 523 | ||
| 361 | return 0; | 524 | return 0; |
| 362 | } | 525 | } |
| 363 | 526 | ||
| 364 | /* Register a trace_probe and probe_event */ | 527 | /* Register a trace_probe and probe_event */ |
| 365 | static int register_trace_probe(struct trace_probe *tp) | 528 | static int register_trace_kprobe(struct trace_kprobe *tk) |
| 366 | { | 529 | { |
| 367 | struct trace_probe *old_tp; | 530 | struct trace_kprobe *old_tk; |
| 368 | int ret; | 531 | int ret; |
| 369 | 532 | ||
| 370 | mutex_lock(&probe_lock); | 533 | mutex_lock(&probe_lock); |
| 371 | 534 | ||
| 372 | /* Delete old (same name) event if exist */ | 535 | /* Delete old (same name) event if exist */ |
| 373 | old_tp = find_trace_probe(tp->call.name, tp->call.class->system); | 536 | old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); |
| 374 | if (old_tp) { | 537 | if (old_tk) { |
| 375 | ret = unregister_trace_probe(old_tp); | 538 | ret = unregister_trace_kprobe(old_tk); |
| 376 | if (ret < 0) | 539 | if (ret < 0) |
| 377 | goto end; | 540 | goto end; |
| 378 | free_trace_probe(old_tp); | 541 | free_trace_kprobe(old_tk); |
| 379 | } | 542 | } |
| 380 | 543 | ||
| 381 | /* Register new event */ | 544 | /* Register new event */ |
| 382 | ret = register_probe_event(tp); | 545 | ret = register_kprobe_event(tk); |
| 383 | if (ret) { | 546 | if (ret) { |
| 384 | pr_warning("Failed to register probe event(%d)\n", ret); | 547 | pr_warning("Failed to register probe event(%d)\n", ret); |
| 385 | goto end; | 548 | goto end; |
| 386 | } | 549 | } |
| 387 | 550 | ||
| 388 | /* Register k*probe */ | 551 | /* Register k*probe */ |
| 389 | ret = __register_trace_probe(tp); | 552 | ret = __register_trace_kprobe(tk); |
| 390 | if (ret < 0) | 553 | if (ret < 0) |
| 391 | unregister_probe_event(tp); | 554 | unregister_kprobe_event(tk); |
| 392 | else | 555 | else |
| 393 | list_add_tail(&tp->list, &probe_list); | 556 | list_add_tail(&tk->list, &probe_list); |
| 394 | 557 | ||
| 395 | end: | 558 | end: |
| 396 | mutex_unlock(&probe_lock); | 559 | mutex_unlock(&probe_lock); |
| @@ -398,11 +561,11 @@ end: | |||
| 398 | } | 561 | } |
| 399 | 562 | ||
| 400 | /* Module notifier call back, checking event on the module */ | 563 | /* Module notifier call back, checking event on the module */ |
| 401 | static int trace_probe_module_callback(struct notifier_block *nb, | 564 | static int trace_kprobe_module_callback(struct notifier_block *nb, |
| 402 | unsigned long val, void *data) | 565 | unsigned long val, void *data) |
| 403 | { | 566 | { |
| 404 | struct module *mod = data; | 567 | struct module *mod = data; |
| 405 | struct trace_probe *tp; | 568 | struct trace_kprobe *tk; |
| 406 | int ret; | 569 | int ret; |
| 407 | 570 | ||
| 408 | if (val != MODULE_STATE_COMING) | 571 | if (val != MODULE_STATE_COMING) |
| @@ -410,15 +573,15 @@ static int trace_probe_module_callback(struct notifier_block *nb, | |||
| 410 | 573 | ||
| 411 | /* Update probes on coming module */ | 574 | /* Update probes on coming module */ |
| 412 | mutex_lock(&probe_lock); | 575 | mutex_lock(&probe_lock); |
| 413 | list_for_each_entry(tp, &probe_list, list) { | 576 | list_for_each_entry(tk, &probe_list, list) { |
| 414 | if (trace_probe_within_module(tp, mod)) { | 577 | if (trace_kprobe_within_module(tk, mod)) { |
| 415 | /* Don't need to check busy - this should have gone. */ | 578 | /* Don't need to check busy - this should have gone. */ |
| 416 | __unregister_trace_probe(tp); | 579 | __unregister_trace_kprobe(tk); |
| 417 | ret = __register_trace_probe(tp); | 580 | ret = __register_trace_kprobe(tk); |
| 418 | if (ret) | 581 | if (ret) |
| 419 | pr_warning("Failed to re-register probe %s on" | 582 | pr_warning("Failed to re-register probe %s on" |
| 420 | "%s: %d\n", | 583 | "%s: %d\n", |
| 421 | tp->call.name, mod->name, ret); | 584 | tk->tp.call.name, mod->name, ret); |
| 422 | } | 585 | } |
| 423 | } | 586 | } |
| 424 | mutex_unlock(&probe_lock); | 587 | mutex_unlock(&probe_lock); |
| @@ -426,12 +589,12 @@ static int trace_probe_module_callback(struct notifier_block *nb, | |||
| 426 | return NOTIFY_DONE; | 589 | return NOTIFY_DONE; |
| 427 | } | 590 | } |
| 428 | 591 | ||
| 429 | static struct notifier_block trace_probe_module_nb = { | 592 | static struct notifier_block trace_kprobe_module_nb = { |
| 430 | .notifier_call = trace_probe_module_callback, | 593 | .notifier_call = trace_kprobe_module_callback, |
| 431 | .priority = 1 /* Invoked after kprobe module callback */ | 594 | .priority = 1 /* Invoked after kprobe module callback */ |
| 432 | }; | 595 | }; |
| 433 | 596 | ||
| 434 | static int create_trace_probe(int argc, char **argv) | 597 | static int create_trace_kprobe(int argc, char **argv) |
| 435 | { | 598 | { |
| 436 | /* | 599 | /* |
| 437 | * Argument syntax: | 600 | * Argument syntax: |
| @@ -451,7 +614,7 @@ static int create_trace_probe(int argc, char **argv) | |||
| 451 | * Type of args: | 614 | * Type of args: |
| 452 | * FETCHARG:TYPE : use TYPE instead of unsigned long. | 615 | * FETCHARG:TYPE : use TYPE instead of unsigned long. |
| 453 | */ | 616 | */ |
| 454 | struct trace_probe *tp; | 617 | struct trace_kprobe *tk; |
| 455 | int i, ret = 0; | 618 | int i, ret = 0; |
| 456 | bool is_return = false, is_delete = false; | 619 | bool is_return = false, is_delete = false; |
| 457 | char *symbol = NULL, *event = NULL, *group = NULL; | 620 | char *symbol = NULL, *event = NULL, *group = NULL; |
| @@ -498,16 +661,16 @@ static int create_trace_probe(int argc, char **argv) | |||
| 498 | return -EINVAL; | 661 | return -EINVAL; |
| 499 | } | 662 | } |
| 500 | mutex_lock(&probe_lock); | 663 | mutex_lock(&probe_lock); |
| 501 | tp = find_trace_probe(event, group); | 664 | tk = find_trace_kprobe(event, group); |
| 502 | if (!tp) { | 665 | if (!tk) { |
| 503 | mutex_unlock(&probe_lock); | 666 | mutex_unlock(&probe_lock); |
| 504 | pr_info("Event %s/%s doesn't exist.\n", group, event); | 667 | pr_info("Event %s/%s doesn't exist.\n", group, event); |
| 505 | return -ENOENT; | 668 | return -ENOENT; |
| 506 | } | 669 | } |
| 507 | /* delete an event */ | 670 | /* delete an event */ |
| 508 | ret = unregister_trace_probe(tp); | 671 | ret = unregister_trace_kprobe(tk); |
| 509 | if (ret == 0) | 672 | if (ret == 0) |
| 510 | free_trace_probe(tp); | 673 | free_trace_kprobe(tk); |
| 511 | mutex_unlock(&probe_lock); | 674 | mutex_unlock(&probe_lock); |
| 512 | return ret; | 675 | return ret; |
| 513 | } | 676 | } |
| @@ -554,47 +717,49 @@ static int create_trace_probe(int argc, char **argv) | |||
| 554 | is_return ? 'r' : 'p', addr); | 717 | is_return ? 'r' : 'p', addr); |
| 555 | event = buf; | 718 | event = buf; |
| 556 | } | 719 | } |
| 557 | tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, | 720 | tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc, |
| 558 | is_return); | 721 | is_return); |
| 559 | if (IS_ERR(tp)) { | 722 | if (IS_ERR(tk)) { |
| 560 | pr_info("Failed to allocate trace_probe.(%d)\n", | 723 | pr_info("Failed to allocate trace_probe.(%d)\n", |
| 561 | (int)PTR_ERR(tp)); | 724 | (int)PTR_ERR(tk)); |
| 562 | return PTR_ERR(tp); | 725 | return PTR_ERR(tk); |
| 563 | } | 726 | } |
| 564 | 727 | ||
| 565 | /* parse arguments */ | 728 | /* parse arguments */ |
| 566 | ret = 0; | 729 | ret = 0; |
| 567 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | 730 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
| 731 | struct probe_arg *parg = &tk->tp.args[i]; | ||
| 732 | |||
| 568 | /* Increment count for freeing args in error case */ | 733 | /* Increment count for freeing args in error case */ |
| 569 | tp->nr_args++; | 734 | tk->tp.nr_args++; |
| 570 | 735 | ||
| 571 | /* Parse argument name */ | 736 | /* Parse argument name */ |
| 572 | arg = strchr(argv[i], '='); | 737 | arg = strchr(argv[i], '='); |
| 573 | if (arg) { | 738 | if (arg) { |
| 574 | *arg++ = '\0'; | 739 | *arg++ = '\0'; |
| 575 | tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); | 740 | parg->name = kstrdup(argv[i], GFP_KERNEL); |
| 576 | } else { | 741 | } else { |
| 577 | arg = argv[i]; | 742 | arg = argv[i]; |
| 578 | /* If argument name is omitted, set "argN" */ | 743 | /* If argument name is omitted, set "argN" */ |
| 579 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); | 744 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); |
| 580 | tp->args[i].name = kstrdup(buf, GFP_KERNEL); | 745 | parg->name = kstrdup(buf, GFP_KERNEL); |
| 581 | } | 746 | } |
| 582 | 747 | ||
| 583 | if (!tp->args[i].name) { | 748 | if (!parg->name) { |
| 584 | pr_info("Failed to allocate argument[%d] name.\n", i); | 749 | pr_info("Failed to allocate argument[%d] name.\n", i); |
| 585 | ret = -ENOMEM; | 750 | ret = -ENOMEM; |
| 586 | goto error; | 751 | goto error; |
| 587 | } | 752 | } |
| 588 | 753 | ||
| 589 | if (!is_good_name(tp->args[i].name)) { | 754 | if (!is_good_name(parg->name)) { |
| 590 | pr_info("Invalid argument[%d] name: %s\n", | 755 | pr_info("Invalid argument[%d] name: %s\n", |
| 591 | i, tp->args[i].name); | 756 | i, parg->name); |
| 592 | ret = -EINVAL; | 757 | ret = -EINVAL; |
| 593 | goto error; | 758 | goto error; |
| 594 | } | 759 | } |
| 595 | 760 | ||
| 596 | if (traceprobe_conflict_field_name(tp->args[i].name, | 761 | if (traceprobe_conflict_field_name(parg->name, |
| 597 | tp->args, i)) { | 762 | tk->tp.args, i)) { |
| 598 | pr_info("Argument[%d] name '%s' conflicts with " | 763 | pr_info("Argument[%d] name '%s' conflicts with " |
| 599 | "another field.\n", i, argv[i]); | 764 | "another field.\n", i, argv[i]); |
| 600 | ret = -EINVAL; | 765 | ret = -EINVAL; |
| @@ -602,7 +767,7 @@ static int create_trace_probe(int argc, char **argv) | |||
| 602 | } | 767 | } |
| 603 | 768 | ||
| 604 | /* Parse fetch argument */ | 769 | /* Parse fetch argument */ |
| 605 | ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i], | 770 | ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, |
| 606 | is_return, true); | 771 | is_return, true); |
| 607 | if (ret) { | 772 | if (ret) { |
| 608 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); | 773 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
| @@ -610,35 +775,35 @@ static int create_trace_probe(int argc, char **argv) | |||
| 610 | } | 775 | } |
| 611 | } | 776 | } |
| 612 | 777 | ||
| 613 | ret = register_trace_probe(tp); | 778 | ret = register_trace_kprobe(tk); |
| 614 | if (ret) | 779 | if (ret) |
| 615 | goto error; | 780 | goto error; |
| 616 | return 0; | 781 | return 0; |
| 617 | 782 | ||
| 618 | error: | 783 | error: |
| 619 | free_trace_probe(tp); | 784 | free_trace_kprobe(tk); |
| 620 | return ret; | 785 | return ret; |
| 621 | } | 786 | } |
| 622 | 787 | ||
| 623 | static int release_all_trace_probes(void) | 788 | static int release_all_trace_kprobes(void) |
| 624 | { | 789 | { |
| 625 | struct trace_probe *tp; | 790 | struct trace_kprobe *tk; |
| 626 | int ret = 0; | 791 | int ret = 0; |
| 627 | 792 | ||
| 628 | mutex_lock(&probe_lock); | 793 | mutex_lock(&probe_lock); |
| 629 | /* Ensure no probe is in use. */ | 794 | /* Ensure no probe is in use. */ |
| 630 | list_for_each_entry(tp, &probe_list, list) | 795 | list_for_each_entry(tk, &probe_list, list) |
| 631 | if (trace_probe_is_enabled(tp)) { | 796 | if (trace_probe_is_enabled(&tk->tp)) { |
| 632 | ret = -EBUSY; | 797 | ret = -EBUSY; |
| 633 | goto end; | 798 | goto end; |
| 634 | } | 799 | } |
| 635 | /* TODO: Use batch unregistration */ | 800 | /* TODO: Use batch unregistration */ |
| 636 | while (!list_empty(&probe_list)) { | 801 | while (!list_empty(&probe_list)) { |
| 637 | tp = list_entry(probe_list.next, struct trace_probe, list); | 802 | tk = list_entry(probe_list.next, struct trace_kprobe, list); |
| 638 | ret = unregister_trace_probe(tp); | 803 | ret = unregister_trace_kprobe(tk); |
| 639 | if (ret) | 804 | if (ret) |
| 640 | goto end; | 805 | goto end; |
| 641 | free_trace_probe(tp); | 806 | free_trace_kprobe(tk); |
| 642 | } | 807 | } |
| 643 | 808 | ||
| 644 | end: | 809 | end: |
| @@ -666,22 +831,22 @@ static void probes_seq_stop(struct seq_file *m, void *v) | |||
| 666 | 831 | ||
| 667 | static int probes_seq_show(struct seq_file *m, void *v) | 832 | static int probes_seq_show(struct seq_file *m, void *v) |
| 668 | { | 833 | { |
| 669 | struct trace_probe *tp = v; | 834 | struct trace_kprobe *tk = v; |
| 670 | int i; | 835 | int i; |
| 671 | 836 | ||
| 672 | seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p'); | 837 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); |
| 673 | seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); | 838 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); |
| 674 | 839 | ||
| 675 | if (!tp->symbol) | 840 | if (!tk->symbol) |
| 676 | seq_printf(m, " 0x%p", tp->rp.kp.addr); | 841 | seq_printf(m, " 0x%p", tk->rp.kp.addr); |
| 677 | else if (tp->rp.kp.offset) | 842 | else if (tk->rp.kp.offset) |
| 678 | seq_printf(m, " %s+%u", trace_probe_symbol(tp), | 843 | seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), |
| 679 | tp->rp.kp.offset); | 844 | tk->rp.kp.offset); |
| 680 | else | 845 | else |
| 681 | seq_printf(m, " %s", trace_probe_symbol(tp)); | 846 | seq_printf(m, " %s", trace_kprobe_symbol(tk)); |
| 682 | 847 | ||
| 683 | for (i = 0; i < tp->nr_args; i++) | 848 | for (i = 0; i < tk->tp.nr_args; i++) |
| 684 | seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm); | 849 | seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); |
| 685 | seq_printf(m, "\n"); | 850 | seq_printf(m, "\n"); |
| 686 | 851 | ||
| 687 | return 0; | 852 | return 0; |
| @@ -699,7 +864,7 @@ static int probes_open(struct inode *inode, struct file *file) | |||
| 699 | int ret; | 864 | int ret; |
| 700 | 865 | ||
| 701 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { | 866 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
| 702 | ret = release_all_trace_probes(); | 867 | ret = release_all_trace_kprobes(); |
| 703 | if (ret < 0) | 868 | if (ret < 0) |
| 704 | return ret; | 869 | return ret; |
| 705 | } | 870 | } |
| @@ -711,7 +876,7 @@ static ssize_t probes_write(struct file *file, const char __user *buffer, | |||
| 711 | size_t count, loff_t *ppos) | 876 | size_t count, loff_t *ppos) |
| 712 | { | 877 | { |
| 713 | return traceprobe_probes_write(file, buffer, count, ppos, | 878 | return traceprobe_probes_write(file, buffer, count, ppos, |
| 714 | create_trace_probe); | 879 | create_trace_kprobe); |
| 715 | } | 880 | } |
| 716 | 881 | ||
| 717 | static const struct file_operations kprobe_events_ops = { | 882 | static const struct file_operations kprobe_events_ops = { |
| @@ -726,10 +891,10 @@ static const struct file_operations kprobe_events_ops = { | |||
| 726 | /* Probes profiling interfaces */ | 891 | /* Probes profiling interfaces */ |
| 727 | static int probes_profile_seq_show(struct seq_file *m, void *v) | 892 | static int probes_profile_seq_show(struct seq_file *m, void *v) |
| 728 | { | 893 | { |
| 729 | struct trace_probe *tp = v; | 894 | struct trace_kprobe *tk = v; |
| 730 | 895 | ||
| 731 | seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, | 896 | seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, |
| 732 | tp->rp.kp.nmissed); | 897 | tk->rp.kp.nmissed); |
| 733 | 898 | ||
| 734 | return 0; | 899 | return 0; |
| 735 | } | 900 | } |
| @@ -754,57 +919,9 @@ static const struct file_operations kprobe_profile_ops = { | |||
| 754 | .release = seq_release, | 919 | .release = seq_release, |
| 755 | }; | 920 | }; |
| 756 | 921 | ||
| 757 | /* Sum up total data length for dynamic arraies (strings) */ | ||
| 758 | static __kprobes int __get_data_size(struct trace_probe *tp, | ||
| 759 | struct pt_regs *regs) | ||
| 760 | { | ||
| 761 | int i, ret = 0; | ||
| 762 | u32 len; | ||
| 763 | |||
| 764 | for (i = 0; i < tp->nr_args; i++) | ||
| 765 | if (unlikely(tp->args[i].fetch_size.fn)) { | ||
| 766 | call_fetch(&tp->args[i].fetch_size, regs, &len); | ||
| 767 | ret += len; | ||
| 768 | } | ||
| 769 | |||
| 770 | return ret; | ||
| 771 | } | ||
| 772 | |||
| 773 | /* Store the value of each argument */ | ||
| 774 | static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp, | ||
| 775 | struct pt_regs *regs, | ||
| 776 | u8 *data, int maxlen) | ||
| 777 | { | ||
| 778 | int i; | ||
| 779 | u32 end = tp->size; | ||
| 780 | u32 *dl; /* Data (relative) location */ | ||
| 781 | |||
| 782 | for (i = 0; i < tp->nr_args; i++) { | ||
| 783 | if (unlikely(tp->args[i].fetch_size.fn)) { | ||
| 784 | /* | ||
| 785 | * First, we set the relative location and | ||
| 786 | * maximum data length to *dl | ||
| 787 | */ | ||
| 788 | dl = (u32 *)(data + tp->args[i].offset); | ||
| 789 | *dl = make_data_rloc(maxlen, end - tp->args[i].offset); | ||
| 790 | /* Then try to fetch string or dynamic array data */ | ||
| 791 | call_fetch(&tp->args[i].fetch, regs, dl); | ||
| 792 | /* Reduce maximum length */ | ||
| 793 | end += get_rloc_len(*dl); | ||
| 794 | maxlen -= get_rloc_len(*dl); | ||
| 795 | /* Trick here, convert data_rloc to data_loc */ | ||
| 796 | *dl = convert_rloc_to_loc(*dl, | ||
| 797 | ent_size + tp->args[i].offset); | ||
| 798 | } else | ||
| 799 | /* Just fetching data normally */ | ||
| 800 | call_fetch(&tp->args[i].fetch, regs, | ||
| 801 | data + tp->args[i].offset); | ||
| 802 | } | ||
| 803 | } | ||
| 804 | |||
| 805 | /* Kprobe handler */ | 922 | /* Kprobe handler */ |
| 806 | static __kprobes void | 923 | static __kprobes void |
| 807 | __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | 924 | __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, |
| 808 | struct ftrace_event_file *ftrace_file) | 925 | struct ftrace_event_file *ftrace_file) |
| 809 | { | 926 | { |
| 810 | struct kprobe_trace_entry_head *entry; | 927 | struct kprobe_trace_entry_head *entry; |
| @@ -812,18 +929,18 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 812 | struct ring_buffer *buffer; | 929 | struct ring_buffer *buffer; |
| 813 | int size, dsize, pc; | 930 | int size, dsize, pc; |
| 814 | unsigned long irq_flags; | 931 | unsigned long irq_flags; |
| 815 | struct ftrace_event_call *call = &tp->call; | 932 | struct ftrace_event_call *call = &tk->tp.call; |
| 816 | 933 | ||
| 817 | WARN_ON(call != ftrace_file->event_call); | 934 | WARN_ON(call != ftrace_file->event_call); |
| 818 | 935 | ||
| 819 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | 936 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
| 820 | return; | 937 | return; |
| 821 | 938 | ||
| 822 | local_save_flags(irq_flags); | 939 | local_save_flags(irq_flags); |
| 823 | pc = preempt_count(); | 940 | pc = preempt_count(); |
| 824 | 941 | ||
| 825 | dsize = __get_data_size(tp, regs); | 942 | dsize = __get_data_size(&tk->tp, regs); |
| 826 | size = sizeof(*entry) + tp->size + dsize; | 943 | size = sizeof(*entry) + tk->tp.size + dsize; |
| 827 | 944 | ||
| 828 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, | 945 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
| 829 | call->event.type, | 946 | call->event.type, |
| @@ -832,26 +949,25 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 832 | return; | 949 | return; |
| 833 | 950 | ||
| 834 | entry = ring_buffer_event_data(event); | 951 | entry = ring_buffer_event_data(event); |
| 835 | entry->ip = (unsigned long)tp->rp.kp.addr; | 952 | entry->ip = (unsigned long)tk->rp.kp.addr; |
| 836 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 953 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
| 837 | 954 | ||
| 838 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 955 | event_trigger_unlock_commit_regs(ftrace_file, buffer, event, |
| 839 | trace_buffer_unlock_commit_regs(buffer, event, | 956 | entry, irq_flags, pc, regs); |
| 840 | irq_flags, pc, regs); | ||
| 841 | } | 957 | } |
| 842 | 958 | ||
| 843 | static __kprobes void | 959 | static __kprobes void |
| 844 | kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) | 960 | kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) |
| 845 | { | 961 | { |
| 846 | struct event_file_link *link; | 962 | struct event_file_link *link; |
| 847 | 963 | ||
| 848 | list_for_each_entry_rcu(link, &tp->files, list) | 964 | list_for_each_entry_rcu(link, &tk->tp.files, list) |
| 849 | __kprobe_trace_func(tp, regs, link->file); | 965 | __kprobe_trace_func(tk, regs, link->file); |
| 850 | } | 966 | } |
| 851 | 967 | ||
| 852 | /* Kretprobe handler */ | 968 | /* Kretprobe handler */ |
| 853 | static __kprobes void | 969 | static __kprobes void |
| 854 | __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 970 | __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, |
| 855 | struct pt_regs *regs, | 971 | struct pt_regs *regs, |
| 856 | struct ftrace_event_file *ftrace_file) | 972 | struct ftrace_event_file *ftrace_file) |
| 857 | { | 973 | { |
| @@ -860,18 +976,18 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 860 | struct ring_buffer *buffer; | 976 | struct ring_buffer *buffer; |
| 861 | int size, pc, dsize; | 977 | int size, pc, dsize; |
| 862 | unsigned long irq_flags; | 978 | unsigned long irq_flags; |
| 863 | struct ftrace_event_call *call = &tp->call; | 979 | struct ftrace_event_call *call = &tk->tp.call; |
| 864 | 980 | ||
| 865 | WARN_ON(call != ftrace_file->event_call); | 981 | WARN_ON(call != ftrace_file->event_call); |
| 866 | 982 | ||
| 867 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | 983 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
| 868 | return; | 984 | return; |
| 869 | 985 | ||
| 870 | local_save_flags(irq_flags); | 986 | local_save_flags(irq_flags); |
| 871 | pc = preempt_count(); | 987 | pc = preempt_count(); |
| 872 | 988 | ||
| 873 | dsize = __get_data_size(tp, regs); | 989 | dsize = __get_data_size(&tk->tp, regs); |
| 874 | size = sizeof(*entry) + tp->size + dsize; | 990 | size = sizeof(*entry) + tk->tp.size + dsize; |
| 875 | 991 | ||
| 876 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, | 992 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
| 877 | call->event.type, | 993 | call->event.type, |
| @@ -880,23 +996,22 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 880 | return; | 996 | return; |
| 881 | 997 | ||
| 882 | entry = ring_buffer_event_data(event); | 998 | entry = ring_buffer_event_data(event); |
| 883 | entry->func = (unsigned long)tp->rp.kp.addr; | 999 | entry->func = (unsigned long)tk->rp.kp.addr; |
| 884 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1000 | entry->ret_ip = (unsigned long)ri->ret_addr; |
| 885 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1001 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
| 886 | 1002 | ||
| 887 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 1003 | event_trigger_unlock_commit_regs(ftrace_file, buffer, event, |
| 888 | trace_buffer_unlock_commit_regs(buffer, event, | 1004 | entry, irq_flags, pc, regs); |
| 889 | irq_flags, pc, regs); | ||
| 890 | } | 1005 | } |
| 891 | 1006 | ||
| 892 | static __kprobes void | 1007 | static __kprobes void |
| 893 | kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 1008 | kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, |
| 894 | struct pt_regs *regs) | 1009 | struct pt_regs *regs) |
| 895 | { | 1010 | { |
| 896 | struct event_file_link *link; | 1011 | struct event_file_link *link; |
| 897 | 1012 | ||
| 898 | list_for_each_entry_rcu(link, &tp->files, list) | 1013 | list_for_each_entry_rcu(link, &tk->tp.files, list) |
| 899 | __kretprobe_trace_func(tp, ri, regs, link->file); | 1014 | __kretprobe_trace_func(tk, ri, regs, link->file); |
| 900 | } | 1015 | } |
| 901 | 1016 | ||
| 902 | /* Event entry printers */ | 1017 | /* Event entry printers */ |
| @@ -983,16 +1098,18 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 983 | { | 1098 | { |
| 984 | int ret, i; | 1099 | int ret, i; |
| 985 | struct kprobe_trace_entry_head field; | 1100 | struct kprobe_trace_entry_head field; |
| 986 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1101 | struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; |
| 987 | 1102 | ||
| 988 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1103 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
| 989 | /* Set argument names as fields */ | 1104 | /* Set argument names as fields */ |
| 990 | for (i = 0; i < tp->nr_args; i++) { | 1105 | for (i = 0; i < tk->tp.nr_args; i++) { |
| 991 | ret = trace_define_field(event_call, tp->args[i].type->fmttype, | 1106 | struct probe_arg *parg = &tk->tp.args[i]; |
| 992 | tp->args[i].name, | 1107 | |
| 993 | sizeof(field) + tp->args[i].offset, | 1108 | ret = trace_define_field(event_call, parg->type->fmttype, |
| 994 | tp->args[i].type->size, | 1109 | parg->name, |
| 995 | tp->args[i].type->is_signed, | 1110 | sizeof(field) + parg->offset, |
| 1111 | parg->type->size, | ||
| 1112 | parg->type->is_signed, | ||
| 996 | FILTER_OTHER); | 1113 | FILTER_OTHER); |
| 997 | if (ret) | 1114 | if (ret) |
| 998 | return ret; | 1115 | return ret; |
| @@ -1004,17 +1121,19 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 1004 | { | 1121 | { |
| 1005 | int ret, i; | 1122 | int ret, i; |
| 1006 | struct kretprobe_trace_entry_head field; | 1123 | struct kretprobe_trace_entry_head field; |
| 1007 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1124 | struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; |
| 1008 | 1125 | ||
| 1009 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | 1126 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
| 1010 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | 1127 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
| 1011 | /* Set argument names as fields */ | 1128 | /* Set argument names as fields */ |
| 1012 | for (i = 0; i < tp->nr_args; i++) { | 1129 | for (i = 0; i < tk->tp.nr_args; i++) { |
| 1013 | ret = trace_define_field(event_call, tp->args[i].type->fmttype, | 1130 | struct probe_arg *parg = &tk->tp.args[i]; |
| 1014 | tp->args[i].name, | 1131 | |
| 1015 | sizeof(field) + tp->args[i].offset, | 1132 | ret = trace_define_field(event_call, parg->type->fmttype, |
| 1016 | tp->args[i].type->size, | 1133 | parg->name, |
| 1017 | tp->args[i].type->is_signed, | 1134 | sizeof(field) + parg->offset, |
| 1135 | parg->type->size, | ||
| 1136 | parg->type->is_signed, | ||
| 1018 | FILTER_OTHER); | 1137 | FILTER_OTHER); |
| 1019 | if (ret) | 1138 | if (ret) |
| 1020 | return ret; | 1139 | return ret; |
| @@ -1022,74 +1141,13 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 1022 | return 0; | 1141 | return 0; |
| 1023 | } | 1142 | } |
| 1024 | 1143 | ||
| 1025 | static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) | ||
| 1026 | { | ||
| 1027 | int i; | ||
| 1028 | int pos = 0; | ||
| 1029 | |||
| 1030 | const char *fmt, *arg; | ||
| 1031 | |||
| 1032 | if (!trace_probe_is_return(tp)) { | ||
| 1033 | fmt = "(%lx)"; | ||
| 1034 | arg = "REC->" FIELD_STRING_IP; | ||
| 1035 | } else { | ||
| 1036 | fmt = "(%lx <- %lx)"; | ||
| 1037 | arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP; | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | /* When len=0, we just calculate the needed length */ | ||
| 1041 | #define LEN_OR_ZERO (len ? len - pos : 0) | ||
| 1042 | |||
| 1043 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); | ||
| 1044 | |||
| 1045 | for (i = 0; i < tp->nr_args; i++) { | ||
| 1046 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", | ||
| 1047 | tp->args[i].name, tp->args[i].type->fmt); | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); | ||
| 1051 | |||
| 1052 | for (i = 0; i < tp->nr_args; i++) { | ||
| 1053 | if (strcmp(tp->args[i].type->name, "string") == 0) | ||
| 1054 | pos += snprintf(buf + pos, LEN_OR_ZERO, | ||
| 1055 | ", __get_str(%s)", | ||
| 1056 | tp->args[i].name); | ||
| 1057 | else | ||
| 1058 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | ||
| 1059 | tp->args[i].name); | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | #undef LEN_OR_ZERO | ||
| 1063 | |||
| 1064 | /* return the length of print_fmt */ | ||
| 1065 | return pos; | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | static int set_print_fmt(struct trace_probe *tp) | ||
| 1069 | { | ||
| 1070 | int len; | ||
| 1071 | char *print_fmt; | ||
| 1072 | |||
| 1073 | /* First: called with 0 length to calculate the needed length */ | ||
| 1074 | len = __set_print_fmt(tp, NULL, 0); | ||
| 1075 | print_fmt = kmalloc(len + 1, GFP_KERNEL); | ||
| 1076 | if (!print_fmt) | ||
| 1077 | return -ENOMEM; | ||
| 1078 | |||
| 1079 | /* Second: actually write the @print_fmt */ | ||
| 1080 | __set_print_fmt(tp, print_fmt, len + 1); | ||
| 1081 | tp->call.print_fmt = print_fmt; | ||
| 1082 | |||
| 1083 | return 0; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | #ifdef CONFIG_PERF_EVENTS | 1144 | #ifdef CONFIG_PERF_EVENTS |
| 1087 | 1145 | ||
| 1088 | /* Kprobe profile handler */ | 1146 | /* Kprobe profile handler */ |
| 1089 | static __kprobes void | 1147 | static __kprobes void |
| 1090 | kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | 1148 | kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) |
| 1091 | { | 1149 | { |
| 1092 | struct ftrace_event_call *call = &tp->call; | 1150 | struct ftrace_event_call *call = &tk->tp.call; |
| 1093 | struct kprobe_trace_entry_head *entry; | 1151 | struct kprobe_trace_entry_head *entry; |
| 1094 | struct hlist_head *head; | 1152 | struct hlist_head *head; |
| 1095 | int size, __size, dsize; | 1153 | int size, __size, dsize; |
| @@ -1099,8 +1157,8 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | |||
| 1099 | if (hlist_empty(head)) | 1157 | if (hlist_empty(head)) |
| 1100 | return; | 1158 | return; |
| 1101 | 1159 | ||
| 1102 | dsize = __get_data_size(tp, regs); | 1160 | dsize = __get_data_size(&tk->tp, regs); |
| 1103 | __size = sizeof(*entry) + tp->size + dsize; | 1161 | __size = sizeof(*entry) + tk->tp.size + dsize; |
| 1104 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1162 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
| 1105 | size -= sizeof(u32); | 1163 | size -= sizeof(u32); |
| 1106 | 1164 | ||
| @@ -1108,18 +1166,18 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | |||
| 1108 | if (!entry) | 1166 | if (!entry) |
| 1109 | return; | 1167 | return; |
| 1110 | 1168 | ||
| 1111 | entry->ip = (unsigned long)tp->rp.kp.addr; | 1169 | entry->ip = (unsigned long)tk->rp.kp.addr; |
| 1112 | memset(&entry[1], 0, dsize); | 1170 | memset(&entry[1], 0, dsize); |
| 1113 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1171 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
| 1114 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1172 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 1115 | } | 1173 | } |
| 1116 | 1174 | ||
| 1117 | /* Kretprobe profile handler */ | 1175 | /* Kretprobe profile handler */ |
| 1118 | static __kprobes void | 1176 | static __kprobes void |
| 1119 | kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | 1177 | kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, |
| 1120 | struct pt_regs *regs) | 1178 | struct pt_regs *regs) |
| 1121 | { | 1179 | { |
| 1122 | struct ftrace_event_call *call = &tp->call; | 1180 | struct ftrace_event_call *call = &tk->tp.call; |
| 1123 | struct kretprobe_trace_entry_head *entry; | 1181 | struct kretprobe_trace_entry_head *entry; |
| 1124 | struct hlist_head *head; | 1182 | struct hlist_head *head; |
| 1125 | int size, __size, dsize; | 1183 | int size, __size, dsize; |
| @@ -1129,8 +1187,8 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 1129 | if (hlist_empty(head)) | 1187 | if (hlist_empty(head)) |
| 1130 | return; | 1188 | return; |
| 1131 | 1189 | ||
| 1132 | dsize = __get_data_size(tp, regs); | 1190 | dsize = __get_data_size(&tk->tp, regs); |
| 1133 | __size = sizeof(*entry) + tp->size + dsize; | 1191 | __size = sizeof(*entry) + tk->tp.size + dsize; |
| 1134 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1192 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
| 1135 | size -= sizeof(u32); | 1193 | size -= sizeof(u32); |
| 1136 | 1194 | ||
| @@ -1138,9 +1196,9 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 1138 | if (!entry) | 1196 | if (!entry) |
| 1139 | return; | 1197 | return; |
| 1140 | 1198 | ||
| 1141 | entry->func = (unsigned long)tp->rp.kp.addr; | 1199 | entry->func = (unsigned long)tk->rp.kp.addr; |
| 1142 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1200 | entry->ret_ip = (unsigned long)ri->ret_addr; |
| 1143 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1201 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
| 1144 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1202 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 1145 | } | 1203 | } |
| 1146 | #endif /* CONFIG_PERF_EVENTS */ | 1204 | #endif /* CONFIG_PERF_EVENTS */ |
| @@ -1155,20 +1213,20 @@ static __kprobes | |||
| 1155 | int kprobe_register(struct ftrace_event_call *event, | 1213 | int kprobe_register(struct ftrace_event_call *event, |
| 1156 | enum trace_reg type, void *data) | 1214 | enum trace_reg type, void *data) |
| 1157 | { | 1215 | { |
| 1158 | struct trace_probe *tp = (struct trace_probe *)event->data; | 1216 | struct trace_kprobe *tk = (struct trace_kprobe *)event->data; |
| 1159 | struct ftrace_event_file *file = data; | 1217 | struct ftrace_event_file *file = data; |
| 1160 | 1218 | ||
| 1161 | switch (type) { | 1219 | switch (type) { |
| 1162 | case TRACE_REG_REGISTER: | 1220 | case TRACE_REG_REGISTER: |
| 1163 | return enable_trace_probe(tp, file); | 1221 | return enable_trace_kprobe(tk, file); |
| 1164 | case TRACE_REG_UNREGISTER: | 1222 | case TRACE_REG_UNREGISTER: |
| 1165 | return disable_trace_probe(tp, file); | 1223 | return disable_trace_kprobe(tk, file); |
| 1166 | 1224 | ||
| 1167 | #ifdef CONFIG_PERF_EVENTS | 1225 | #ifdef CONFIG_PERF_EVENTS |
| 1168 | case TRACE_REG_PERF_REGISTER: | 1226 | case TRACE_REG_PERF_REGISTER: |
| 1169 | return enable_trace_probe(tp, NULL); | 1227 | return enable_trace_kprobe(tk, NULL); |
| 1170 | case TRACE_REG_PERF_UNREGISTER: | 1228 | case TRACE_REG_PERF_UNREGISTER: |
| 1171 | return disable_trace_probe(tp, NULL); | 1229 | return disable_trace_kprobe(tk, NULL); |
| 1172 | case TRACE_REG_PERF_OPEN: | 1230 | case TRACE_REG_PERF_OPEN: |
| 1173 | case TRACE_REG_PERF_CLOSE: | 1231 | case TRACE_REG_PERF_CLOSE: |
| 1174 | case TRACE_REG_PERF_ADD: | 1232 | case TRACE_REG_PERF_ADD: |
| @@ -1182,15 +1240,15 @@ int kprobe_register(struct ftrace_event_call *event, | |||
| 1182 | static __kprobes | 1240 | static __kprobes |
| 1183 | int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | 1241 | int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) |
| 1184 | { | 1242 | { |
| 1185 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1243 | struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); |
| 1186 | 1244 | ||
| 1187 | tp->nhit++; | 1245 | tk->nhit++; |
| 1188 | 1246 | ||
| 1189 | if (tp->flags & TP_FLAG_TRACE) | 1247 | if (tk->tp.flags & TP_FLAG_TRACE) |
| 1190 | kprobe_trace_func(tp, regs); | 1248 | kprobe_trace_func(tk, regs); |
| 1191 | #ifdef CONFIG_PERF_EVENTS | 1249 | #ifdef CONFIG_PERF_EVENTS |
| 1192 | if (tp->flags & TP_FLAG_PROFILE) | 1250 | if (tk->tp.flags & TP_FLAG_PROFILE) |
| 1193 | kprobe_perf_func(tp, regs); | 1251 | kprobe_perf_func(tk, regs); |
| 1194 | #endif | 1252 | #endif |
| 1195 | return 0; /* We don't tweek kernel, so just return 0 */ | 1253 | return 0; /* We don't tweek kernel, so just return 0 */ |
| 1196 | } | 1254 | } |
| @@ -1198,15 +1256,15 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | |||
| 1198 | static __kprobes | 1256 | static __kprobes |
| 1199 | int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) | 1257 | int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) |
| 1200 | { | 1258 | { |
| 1201 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1259 | struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); |
| 1202 | 1260 | ||
| 1203 | tp->nhit++; | 1261 | tk->nhit++; |
| 1204 | 1262 | ||
| 1205 | if (tp->flags & TP_FLAG_TRACE) | 1263 | if (tk->tp.flags & TP_FLAG_TRACE) |
| 1206 | kretprobe_trace_func(tp, ri, regs); | 1264 | kretprobe_trace_func(tk, ri, regs); |
| 1207 | #ifdef CONFIG_PERF_EVENTS | 1265 | #ifdef CONFIG_PERF_EVENTS |
| 1208 | if (tp->flags & TP_FLAG_PROFILE) | 1266 | if (tk->tp.flags & TP_FLAG_PROFILE) |
| 1209 | kretprobe_perf_func(tp, ri, regs); | 1267 | kretprobe_perf_func(tk, ri, regs); |
| 1210 | #endif | 1268 | #endif |
| 1211 | return 0; /* We don't tweek kernel, so just return 0 */ | 1269 | return 0; /* We don't tweek kernel, so just return 0 */ |
| 1212 | } | 1270 | } |
| @@ -1219,21 +1277,21 @@ static struct trace_event_functions kprobe_funcs = { | |||
| 1219 | .trace = print_kprobe_event | 1277 | .trace = print_kprobe_event |
| 1220 | }; | 1278 | }; |
| 1221 | 1279 | ||
| 1222 | static int register_probe_event(struct trace_probe *tp) | 1280 | static int register_kprobe_event(struct trace_kprobe *tk) |
| 1223 | { | 1281 | { |
| 1224 | struct ftrace_event_call *call = &tp->call; | 1282 | struct ftrace_event_call *call = &tk->tp.call; |
| 1225 | int ret; | 1283 | int ret; |
| 1226 | 1284 | ||
| 1227 | /* Initialize ftrace_event_call */ | 1285 | /* Initialize ftrace_event_call */ |
| 1228 | INIT_LIST_HEAD(&call->class->fields); | 1286 | INIT_LIST_HEAD(&call->class->fields); |
| 1229 | if (trace_probe_is_return(tp)) { | 1287 | if (trace_kprobe_is_return(tk)) { |
| 1230 | call->event.funcs = &kretprobe_funcs; | 1288 | call->event.funcs = &kretprobe_funcs; |
| 1231 | call->class->define_fields = kretprobe_event_define_fields; | 1289 | call->class->define_fields = kretprobe_event_define_fields; |
| 1232 | } else { | 1290 | } else { |
| 1233 | call->event.funcs = &kprobe_funcs; | 1291 | call->event.funcs = &kprobe_funcs; |
| 1234 | call->class->define_fields = kprobe_event_define_fields; | 1292 | call->class->define_fields = kprobe_event_define_fields; |
| 1235 | } | 1293 | } |
| 1236 | if (set_print_fmt(tp) < 0) | 1294 | if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) |
| 1237 | return -ENOMEM; | 1295 | return -ENOMEM; |
| 1238 | ret = register_ftrace_event(&call->event); | 1296 | ret = register_ftrace_event(&call->event); |
| 1239 | if (!ret) { | 1297 | if (!ret) { |
| @@ -1242,7 +1300,7 @@ static int register_probe_event(struct trace_probe *tp) | |||
| 1242 | } | 1300 | } |
| 1243 | call->flags = 0; | 1301 | call->flags = 0; |
| 1244 | call->class->reg = kprobe_register; | 1302 | call->class->reg = kprobe_register; |
| 1245 | call->data = tp; | 1303 | call->data = tk; |
| 1246 | ret = trace_add_event_call(call); | 1304 | ret = trace_add_event_call(call); |
| 1247 | if (ret) { | 1305 | if (ret) { |
| 1248 | pr_info("Failed to register kprobe event: %s\n", call->name); | 1306 | pr_info("Failed to register kprobe event: %s\n", call->name); |
| @@ -1252,14 +1310,14 @@ static int register_probe_event(struct trace_probe *tp) | |||
| 1252 | return ret; | 1310 | return ret; |
| 1253 | } | 1311 | } |
| 1254 | 1312 | ||
| 1255 | static int unregister_probe_event(struct trace_probe *tp) | 1313 | static int unregister_kprobe_event(struct trace_kprobe *tk) |
| 1256 | { | 1314 | { |
| 1257 | int ret; | 1315 | int ret; |
| 1258 | 1316 | ||
| 1259 | /* tp->event is unregistered in trace_remove_event_call() */ | 1317 | /* tp->event is unregistered in trace_remove_event_call() */ |
| 1260 | ret = trace_remove_event_call(&tp->call); | 1318 | ret = trace_remove_event_call(&tk->tp.call); |
| 1261 | if (!ret) | 1319 | if (!ret) |
| 1262 | kfree(tp->call.print_fmt); | 1320 | kfree(tk->tp.call.print_fmt); |
| 1263 | return ret; | 1321 | return ret; |
| 1264 | } | 1322 | } |
| 1265 | 1323 | ||
| @@ -1269,7 +1327,7 @@ static __init int init_kprobe_trace(void) | |||
| 1269 | struct dentry *d_tracer; | 1327 | struct dentry *d_tracer; |
| 1270 | struct dentry *entry; | 1328 | struct dentry *entry; |
| 1271 | 1329 | ||
| 1272 | if (register_module_notifier(&trace_probe_module_nb)) | 1330 | if (register_module_notifier(&trace_kprobe_module_nb)) |
| 1273 | return -EINVAL; | 1331 | return -EINVAL; |
| 1274 | 1332 | ||
| 1275 | d_tracer = tracing_init_dentry(); | 1333 | d_tracer = tracing_init_dentry(); |
| @@ -1309,26 +1367,26 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, | |||
| 1309 | } | 1367 | } |
| 1310 | 1368 | ||
| 1311 | static struct ftrace_event_file * | 1369 | static struct ftrace_event_file * |
| 1312 | find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr) | 1370 | find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) |
| 1313 | { | 1371 | { |
| 1314 | struct ftrace_event_file *file; | 1372 | struct ftrace_event_file *file; |
| 1315 | 1373 | ||
| 1316 | list_for_each_entry(file, &tr->events, list) | 1374 | list_for_each_entry(file, &tr->events, list) |
| 1317 | if (file->event_call == &tp->call) | 1375 | if (file->event_call == &tk->tp.call) |
| 1318 | return file; | 1376 | return file; |
| 1319 | 1377 | ||
| 1320 | return NULL; | 1378 | return NULL; |
| 1321 | } | 1379 | } |
| 1322 | 1380 | ||
| 1323 | /* | 1381 | /* |
| 1324 | * Nobody but us can call enable_trace_probe/disable_trace_probe at this | 1382 | * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this |
| 1325 | * stage, we can do this lockless. | 1383 | * stage, we can do this lockless. |
| 1326 | */ | 1384 | */ |
| 1327 | static __init int kprobe_trace_self_tests_init(void) | 1385 | static __init int kprobe_trace_self_tests_init(void) |
| 1328 | { | 1386 | { |
| 1329 | int ret, warn = 0; | 1387 | int ret, warn = 0; |
| 1330 | int (*target)(int, int, int, int, int, int); | 1388 | int (*target)(int, int, int, int, int, int); |
| 1331 | struct trace_probe *tp; | 1389 | struct trace_kprobe *tk; |
| 1332 | struct ftrace_event_file *file; | 1390 | struct ftrace_event_file *file; |
| 1333 | 1391 | ||
| 1334 | target = kprobe_trace_selftest_target; | 1392 | target = kprobe_trace_selftest_target; |
| @@ -1337,44 +1395,44 @@ static __init int kprobe_trace_self_tests_init(void) | |||
| 1337 | 1395 | ||
| 1338 | ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target " | 1396 | ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target " |
| 1339 | "$stack $stack0 +0($stack)", | 1397 | "$stack $stack0 +0($stack)", |
| 1340 | create_trace_probe); | 1398 | create_trace_kprobe); |
| 1341 | if (WARN_ON_ONCE(ret)) { | 1399 | if (WARN_ON_ONCE(ret)) { |
| 1342 | pr_warn("error on probing function entry.\n"); | 1400 | pr_warn("error on probing function entry.\n"); |
| 1343 | warn++; | 1401 | warn++; |
| 1344 | } else { | 1402 | } else { |
| 1345 | /* Enable trace point */ | 1403 | /* Enable trace point */ |
| 1346 | tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); | 1404 | tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); |
| 1347 | if (WARN_ON_ONCE(tp == NULL)) { | 1405 | if (WARN_ON_ONCE(tk == NULL)) { |
| 1348 | pr_warn("error on getting new probe.\n"); | 1406 | pr_warn("error on getting new probe.\n"); |
| 1349 | warn++; | 1407 | warn++; |
| 1350 | } else { | 1408 | } else { |
| 1351 | file = find_trace_probe_file(tp, top_trace_array()); | 1409 | file = find_trace_probe_file(tk, top_trace_array()); |
| 1352 | if (WARN_ON_ONCE(file == NULL)) { | 1410 | if (WARN_ON_ONCE(file == NULL)) { |
| 1353 | pr_warn("error on getting probe file.\n"); | 1411 | pr_warn("error on getting probe file.\n"); |
| 1354 | warn++; | 1412 | warn++; |
| 1355 | } else | 1413 | } else |
| 1356 | enable_trace_probe(tp, file); | 1414 | enable_trace_kprobe(tk, file); |
| 1357 | } | 1415 | } |
| 1358 | } | 1416 | } |
| 1359 | 1417 | ||
| 1360 | ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " | 1418 | ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " |
| 1361 | "$retval", create_trace_probe); | 1419 | "$retval", create_trace_kprobe); |
| 1362 | if (WARN_ON_ONCE(ret)) { | 1420 | if (WARN_ON_ONCE(ret)) { |
| 1363 | pr_warn("error on probing function return.\n"); | 1421 | pr_warn("error on probing function return.\n"); |
| 1364 | warn++; | 1422 | warn++; |
| 1365 | } else { | 1423 | } else { |
| 1366 | /* Enable trace point */ | 1424 | /* Enable trace point */ |
| 1367 | tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); | 1425 | tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); |
| 1368 | if (WARN_ON_ONCE(tp == NULL)) { | 1426 | if (WARN_ON_ONCE(tk == NULL)) { |
| 1369 | pr_warn("error on getting 2nd new probe.\n"); | 1427 | pr_warn("error on getting 2nd new probe.\n"); |
| 1370 | warn++; | 1428 | warn++; |
| 1371 | } else { | 1429 | } else { |
| 1372 | file = find_trace_probe_file(tp, top_trace_array()); | 1430 | file = find_trace_probe_file(tk, top_trace_array()); |
| 1373 | if (WARN_ON_ONCE(file == NULL)) { | 1431 | if (WARN_ON_ONCE(file == NULL)) { |
| 1374 | pr_warn("error on getting probe file.\n"); | 1432 | pr_warn("error on getting probe file.\n"); |
| 1375 | warn++; | 1433 | warn++; |
| 1376 | } else | 1434 | } else |
| 1377 | enable_trace_probe(tp, file); | 1435 | enable_trace_kprobe(tk, file); |
| 1378 | } | 1436 | } |
| 1379 | } | 1437 | } |
| 1380 | 1438 | ||
| @@ -1384,46 +1442,46 @@ static __init int kprobe_trace_self_tests_init(void) | |||
| 1384 | ret = target(1, 2, 3, 4, 5, 6); | 1442 | ret = target(1, 2, 3, 4, 5, 6); |
| 1385 | 1443 | ||
| 1386 | /* Disable trace points before removing it */ | 1444 | /* Disable trace points before removing it */ |
| 1387 | tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); | 1445 | tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); |
| 1388 | if (WARN_ON_ONCE(tp == NULL)) { | 1446 | if (WARN_ON_ONCE(tk == NULL)) { |
| 1389 | pr_warn("error on getting test probe.\n"); | 1447 | pr_warn("error on getting test probe.\n"); |
| 1390 | warn++; | 1448 | warn++; |
| 1391 | } else { | 1449 | } else { |
| 1392 | file = find_trace_probe_file(tp, top_trace_array()); | 1450 | file = find_trace_probe_file(tk, top_trace_array()); |
| 1393 | if (WARN_ON_ONCE(file == NULL)) { | 1451 | if (WARN_ON_ONCE(file == NULL)) { |
| 1394 | pr_warn("error on getting probe file.\n"); | 1452 | pr_warn("error on getting probe file.\n"); |
| 1395 | warn++; | 1453 | warn++; |
| 1396 | } else | 1454 | } else |
| 1397 | disable_trace_probe(tp, file); | 1455 | disable_trace_kprobe(tk, file); |
| 1398 | } | 1456 | } |
| 1399 | 1457 | ||
| 1400 | tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); | 1458 | tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); |
| 1401 | if (WARN_ON_ONCE(tp == NULL)) { | 1459 | if (WARN_ON_ONCE(tk == NULL)) { |
| 1402 | pr_warn("error on getting 2nd test probe.\n"); | 1460 | pr_warn("error on getting 2nd test probe.\n"); |
| 1403 | warn++; | 1461 | warn++; |
| 1404 | } else { | 1462 | } else { |
| 1405 | file = find_trace_probe_file(tp, top_trace_array()); | 1463 | file = find_trace_probe_file(tk, top_trace_array()); |
| 1406 | if (WARN_ON_ONCE(file == NULL)) { | 1464 | if (WARN_ON_ONCE(file == NULL)) { |
| 1407 | pr_warn("error on getting probe file.\n"); | 1465 | pr_warn("error on getting probe file.\n"); |
| 1408 | warn++; | 1466 | warn++; |
| 1409 | } else | 1467 | } else |
| 1410 | disable_trace_probe(tp, file); | 1468 | disable_trace_kprobe(tk, file); |
| 1411 | } | 1469 | } |
| 1412 | 1470 | ||
| 1413 | ret = traceprobe_command("-:testprobe", create_trace_probe); | 1471 | ret = traceprobe_command("-:testprobe", create_trace_kprobe); |
| 1414 | if (WARN_ON_ONCE(ret)) { | 1472 | if (WARN_ON_ONCE(ret)) { |
| 1415 | pr_warn("error on deleting a probe.\n"); | 1473 | pr_warn("error on deleting a probe.\n"); |
| 1416 | warn++; | 1474 | warn++; |
| 1417 | } | 1475 | } |
| 1418 | 1476 | ||
| 1419 | ret = traceprobe_command("-:testprobe2", create_trace_probe); | 1477 | ret = traceprobe_command("-:testprobe2", create_trace_kprobe); |
| 1420 | if (WARN_ON_ONCE(ret)) { | 1478 | if (WARN_ON_ONCE(ret)) { |
| 1421 | pr_warn("error on deleting a probe.\n"); | 1479 | pr_warn("error on deleting a probe.\n"); |
| 1422 | warn++; | 1480 | warn++; |
| 1423 | } | 1481 | } |
| 1424 | 1482 | ||
| 1425 | end: | 1483 | end: |
| 1426 | release_all_trace_probes(); | 1484 | release_all_trace_kprobes(); |
| 1427 | if (warn) | 1485 | if (warn) |
| 1428 | pr_cont("NG: Some tests are failed. Please check them.\n"); | 1486 | pr_cont("NG: Some tests are failed. Please check them.\n"); |
| 1429 | else | 1487 | else |
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 412e959709b4..8364a421b4df 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c | |||
| @@ -35,46 +35,27 @@ const char *reserved_field_names[] = { | |||
| 35 | FIELD_STRING_FUNC, | 35 | FIELD_STRING_FUNC, |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | /* Printing function type */ | ||
| 39 | #define PRINT_TYPE_FUNC_NAME(type) print_type_##type | ||
| 40 | #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type | ||
| 41 | |||
| 42 | /* Printing in basic type function template */ | 38 | /* Printing in basic type function template */ |
| 43 | #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \ | 39 | #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt) \ |
| 44 | static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ | 40 | __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ |
| 45 | const char *name, \ | 41 | const char *name, \ |
| 46 | void *data, void *ent)\ | 42 | void *data, void *ent) \ |
| 47 | { \ | 43 | { \ |
| 48 | return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\ | 44 | return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ |
| 49 | } \ | 45 | } \ |
| 50 | static const char PRINT_TYPE_FMT_NAME(type)[] = fmt; | 46 | const char PRINT_TYPE_FMT_NAME(type)[] = fmt; |
| 51 | |||
| 52 | DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int) | ||
| 53 | DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int) | ||
| 54 | DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long) | ||
| 55 | DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long) | ||
| 56 | DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int) | ||
| 57 | DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int) | ||
| 58 | DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long) | ||
| 59 | DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long) | ||
| 60 | |||
| 61 | static inline void *get_rloc_data(u32 *dl) | ||
| 62 | { | ||
| 63 | return (u8 *)dl + get_rloc_offs(*dl); | ||
| 64 | } | ||
| 65 | 47 | ||
| 66 | /* For data_loc conversion */ | 48 | DEFINE_BASIC_PRINT_TYPE_FUNC(u8 , "0x%x") |
| 67 | static inline void *get_loc_data(u32 *dl, void *ent) | 49 | DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "0x%x") |
| 68 | { | 50 | DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "0x%x") |
| 69 | return (u8 *)ent + get_rloc_offs(*dl); | 51 | DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "0x%Lx") |
| 70 | } | 52 | DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d") |
| 71 | 53 | DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d") | |
| 72 | /* For defining macros, define string/string_size types */ | 54 | DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%d") |
| 73 | typedef u32 string; | 55 | DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%Ld") |
| 74 | typedef u32 string_size; | ||
| 75 | 56 | ||
| 76 | /* Print type function for string type */ | 57 | /* Print type function for string type */ |
| 77 | static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, | 58 | __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, |
| 78 | const char *name, | 59 | const char *name, |
| 79 | void *data, void *ent) | 60 | void *data, void *ent) |
| 80 | { | 61 | { |
| @@ -87,18 +68,7 @@ static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, | |||
| 87 | (const char *)get_loc_data(data, ent)); | 68 | (const char *)get_loc_data(data, ent)); |
| 88 | } | 69 | } |
| 89 | 70 | ||
| 90 | static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; | 71 | const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; |
| 91 | |||
| 92 | #define FETCH_FUNC_NAME(method, type) fetch_##method##_##type | ||
| 93 | /* | ||
| 94 | * Define macro for basic types - we don't need to define s* types, because | ||
| 95 | * we have to care only about bitwidth at recording time. | ||
| 96 | */ | ||
| 97 | #define DEFINE_BASIC_FETCH_FUNCS(method) \ | ||
| 98 | DEFINE_FETCH_##method(u8) \ | ||
| 99 | DEFINE_FETCH_##method(u16) \ | ||
| 100 | DEFINE_FETCH_##method(u32) \ | ||
| 101 | DEFINE_FETCH_##method(u64) | ||
| 102 | 72 | ||
| 103 | #define CHECK_FETCH_FUNCS(method, fn) \ | 73 | #define CHECK_FETCH_FUNCS(method, fn) \ |
| 104 | (((FETCH_FUNC_NAME(method, u8) == fn) || \ | 74 | (((FETCH_FUNC_NAME(method, u8) == fn) || \ |
| @@ -111,7 +81,7 @@ DEFINE_FETCH_##method(u64) | |||
| 111 | 81 | ||
| 112 | /* Data fetch function templates */ | 82 | /* Data fetch function templates */ |
| 113 | #define DEFINE_FETCH_reg(type) \ | 83 | #define DEFINE_FETCH_reg(type) \ |
| 114 | static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ | 84 | __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ |
| 115 | void *offset, void *dest) \ | 85 | void *offset, void *dest) \ |
| 116 | { \ | 86 | { \ |
| 117 | *(type *)dest = (type)regs_get_register(regs, \ | 87 | *(type *)dest = (type)regs_get_register(regs, \ |
| @@ -122,20 +92,8 @@ DEFINE_BASIC_FETCH_FUNCS(reg) | |||
| 122 | #define fetch_reg_string NULL | 92 | #define fetch_reg_string NULL |
| 123 | #define fetch_reg_string_size NULL | 93 | #define fetch_reg_string_size NULL |
| 124 | 94 | ||
| 125 | #define DEFINE_FETCH_stack(type) \ | ||
| 126 | static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ | ||
| 127 | void *offset, void *dest) \ | ||
| 128 | { \ | ||
| 129 | *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ | ||
| 130 | (unsigned int)((unsigned long)offset)); \ | ||
| 131 | } | ||
| 132 | DEFINE_BASIC_FETCH_FUNCS(stack) | ||
| 133 | /* No string on the stack entry */ | ||
| 134 | #define fetch_stack_string NULL | ||
| 135 | #define fetch_stack_string_size NULL | ||
| 136 | |||
| 137 | #define DEFINE_FETCH_retval(type) \ | 95 | #define DEFINE_FETCH_retval(type) \ |
| 138 | static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ | 96 | __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \ |
| 139 | void *dummy, void *dest) \ | 97 | void *dummy, void *dest) \ |
| 140 | { \ | 98 | { \ |
| 141 | *(type *)dest = (type)regs_return_value(regs); \ | 99 | *(type *)dest = (type)regs_return_value(regs); \ |
| @@ -145,150 +103,16 @@ DEFINE_BASIC_FETCH_FUNCS(retval) | |||
| 145 | #define fetch_retval_string NULL | 103 | #define fetch_retval_string NULL |
| 146 | #define fetch_retval_string_size NULL | 104 | #define fetch_retval_string_size NULL |
| 147 | 105 | ||
| 148 | #define DEFINE_FETCH_memory(type) \ | ||
| 149 | static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ | ||
| 150 | void *addr, void *dest) \ | ||
| 151 | { \ | ||
| 152 | type retval; \ | ||
| 153 | if (probe_kernel_address(addr, retval)) \ | ||
| 154 | *(type *)dest = 0; \ | ||
| 155 | else \ | ||
| 156 | *(type *)dest = retval; \ | ||
| 157 | } | ||
| 158 | DEFINE_BASIC_FETCH_FUNCS(memory) | ||
| 159 | /* | ||
| 160 | * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max | ||
| 161 | * length and relative data location. | ||
| 162 | */ | ||
| 163 | static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, | ||
| 164 | void *addr, void *dest) | ||
| 165 | { | ||
| 166 | long ret; | ||
| 167 | int maxlen = get_rloc_len(*(u32 *)dest); | ||
| 168 | u8 *dst = get_rloc_data(dest); | ||
| 169 | u8 *src = addr; | ||
| 170 | mm_segment_t old_fs = get_fs(); | ||
| 171 | |||
| 172 | if (!maxlen) | ||
| 173 | return; | ||
| 174 | |||
| 175 | /* | ||
| 176 | * Try to get string again, since the string can be changed while | ||
| 177 | * probing. | ||
| 178 | */ | ||
| 179 | set_fs(KERNEL_DS); | ||
| 180 | pagefault_disable(); | ||
| 181 | |||
| 182 | do | ||
| 183 | ret = __copy_from_user_inatomic(dst++, src++, 1); | ||
| 184 | while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); | ||
| 185 | |||
| 186 | dst[-1] = '\0'; | ||
| 187 | pagefault_enable(); | ||
| 188 | set_fs(old_fs); | ||
| 189 | |||
| 190 | if (ret < 0) { /* Failed to fetch string */ | ||
| 191 | ((u8 *)get_rloc_data(dest))[0] = '\0'; | ||
| 192 | *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); | ||
| 193 | } else { | ||
| 194 | *(u32 *)dest = make_data_rloc(src - (u8 *)addr, | ||
| 195 | get_rloc_offs(*(u32 *)dest)); | ||
| 196 | } | ||
| 197 | } | ||
| 198 | |||
| 199 | /* Return the length of string -- including null terminal byte */ | ||
| 200 | static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, | ||
| 201 | void *addr, void *dest) | ||
| 202 | { | ||
| 203 | mm_segment_t old_fs; | ||
| 204 | int ret, len = 0; | ||
| 205 | u8 c; | ||
| 206 | |||
| 207 | old_fs = get_fs(); | ||
| 208 | set_fs(KERNEL_DS); | ||
| 209 | pagefault_disable(); | ||
| 210 | |||
| 211 | do { | ||
| 212 | ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); | ||
| 213 | len++; | ||
| 214 | } while (c && ret == 0 && len < MAX_STRING_SIZE); | ||
| 215 | |||
| 216 | pagefault_enable(); | ||
| 217 | set_fs(old_fs); | ||
| 218 | |||
| 219 | if (ret < 0) /* Failed to check the length */ | ||
| 220 | *(u32 *)dest = 0; | ||
| 221 | else | ||
| 222 | *(u32 *)dest = len; | ||
| 223 | } | ||
| 224 | |||
| 225 | /* Memory fetching by symbol */ | ||
| 226 | struct symbol_cache { | ||
| 227 | char *symbol; | ||
| 228 | long offset; | ||
| 229 | unsigned long addr; | ||
| 230 | }; | ||
| 231 | |||
| 232 | static unsigned long update_symbol_cache(struct symbol_cache *sc) | ||
| 233 | { | ||
| 234 | sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); | ||
| 235 | |||
| 236 | if (sc->addr) | ||
| 237 | sc->addr += sc->offset; | ||
| 238 | |||
| 239 | return sc->addr; | ||
| 240 | } | ||
| 241 | |||
| 242 | static void free_symbol_cache(struct symbol_cache *sc) | ||
| 243 | { | ||
| 244 | kfree(sc->symbol); | ||
| 245 | kfree(sc); | ||
| 246 | } | ||
| 247 | |||
| 248 | static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) | ||
| 249 | { | ||
| 250 | struct symbol_cache *sc; | ||
| 251 | |||
| 252 | if (!sym || strlen(sym) == 0) | ||
| 253 | return NULL; | ||
| 254 | |||
| 255 | sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); | ||
| 256 | if (!sc) | ||
| 257 | return NULL; | ||
| 258 | |||
| 259 | sc->symbol = kstrdup(sym, GFP_KERNEL); | ||
| 260 | if (!sc->symbol) { | ||
| 261 | kfree(sc); | ||
| 262 | return NULL; | ||
| 263 | } | ||
| 264 | sc->offset = offset; | ||
| 265 | update_symbol_cache(sc); | ||
| 266 | |||
| 267 | return sc; | ||
| 268 | } | ||
| 269 | |||
| 270 | #define DEFINE_FETCH_symbol(type) \ | ||
| 271 | static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\ | ||
| 272 | void *data, void *dest) \ | ||
| 273 | { \ | ||
| 274 | struct symbol_cache *sc = data; \ | ||
| 275 | if (sc->addr) \ | ||
| 276 | fetch_memory_##type(regs, (void *)sc->addr, dest); \ | ||
| 277 | else \ | ||
| 278 | *(type *)dest = 0; \ | ||
| 279 | } | ||
| 280 | DEFINE_BASIC_FETCH_FUNCS(symbol) | ||
| 281 | DEFINE_FETCH_symbol(string) | ||
| 282 | DEFINE_FETCH_symbol(string_size) | ||
| 283 | |||
| 284 | /* Dereference memory access function */ | 106 | /* Dereference memory access function */ |
| 285 | struct deref_fetch_param { | 107 | struct deref_fetch_param { |
| 286 | struct fetch_param orig; | 108 | struct fetch_param orig; |
| 287 | long offset; | 109 | long offset; |
| 110 | fetch_func_t fetch; | ||
| 111 | fetch_func_t fetch_size; | ||
| 288 | }; | 112 | }; |
| 289 | 113 | ||
| 290 | #define DEFINE_FETCH_deref(type) \ | 114 | #define DEFINE_FETCH_deref(type) \ |
| 291 | static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\ | 115 | __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \ |
| 292 | void *data, void *dest) \ | 116 | void *data, void *dest) \ |
| 293 | { \ | 117 | { \ |
| 294 | struct deref_fetch_param *dprm = data; \ | 118 | struct deref_fetch_param *dprm = data; \ |
| @@ -296,13 +120,26 @@ static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\ | |||
| 296 | call_fetch(&dprm->orig, regs, &addr); \ | 120 | call_fetch(&dprm->orig, regs, &addr); \ |
| 297 | if (addr) { \ | 121 | if (addr) { \ |
| 298 | addr += dprm->offset; \ | 122 | addr += dprm->offset; \ |
| 299 | fetch_memory_##type(regs, (void *)addr, dest); \ | 123 | dprm->fetch(regs, (void *)addr, dest); \ |
| 300 | } else \ | 124 | } else \ |
| 301 | *(type *)dest = 0; \ | 125 | *(type *)dest = 0; \ |
| 302 | } | 126 | } |
| 303 | DEFINE_BASIC_FETCH_FUNCS(deref) | 127 | DEFINE_BASIC_FETCH_FUNCS(deref) |
| 304 | DEFINE_FETCH_deref(string) | 128 | DEFINE_FETCH_deref(string) |
| 305 | DEFINE_FETCH_deref(string_size) | 129 | |
| 130 | __kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs, | ||
| 131 | void *data, void *dest) | ||
| 132 | { | ||
| 133 | struct deref_fetch_param *dprm = data; | ||
| 134 | unsigned long addr; | ||
| 135 | |||
| 136 | call_fetch(&dprm->orig, regs, &addr); | ||
| 137 | if (addr && dprm->fetch_size) { | ||
| 138 | addr += dprm->offset; | ||
| 139 | dprm->fetch_size(regs, (void *)addr, dest); | ||
| 140 | } else | ||
| 141 | *(string_size *)dest = 0; | ||
| 142 | } | ||
| 306 | 143 | ||
| 307 | static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data) | 144 | static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data) |
| 308 | { | 145 | { |
| @@ -329,7 +166,7 @@ struct bitfield_fetch_param { | |||
| 329 | }; | 166 | }; |
| 330 | 167 | ||
| 331 | #define DEFINE_FETCH_bitfield(type) \ | 168 | #define DEFINE_FETCH_bitfield(type) \ |
| 332 | static __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs,\ | 169 | __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \ |
| 333 | void *data, void *dest) \ | 170 | void *data, void *dest) \ |
| 334 | { \ | 171 | { \ |
| 335 | struct bitfield_fetch_param *bprm = data; \ | 172 | struct bitfield_fetch_param *bprm = data; \ |
| @@ -374,58 +211,8 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data) | |||
| 374 | kfree(data); | 211 | kfree(data); |
| 375 | } | 212 | } |
| 376 | 213 | ||
| 377 | /* Default (unsigned long) fetch type */ | 214 | static const struct fetch_type *find_fetch_type(const char *type, |
| 378 | #define __DEFAULT_FETCH_TYPE(t) u##t | 215 | const struct fetch_type *ftbl) |
| 379 | #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) | ||
| 380 | #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) | ||
| 381 | #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) | ||
| 382 | |||
| 383 | #define ASSIGN_FETCH_FUNC(method, type) \ | ||
| 384 | [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type) | ||
| 385 | |||
| 386 | #define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ | ||
| 387 | {.name = _name, \ | ||
| 388 | .size = _size, \ | ||
| 389 | .is_signed = sign, \ | ||
| 390 | .print = PRINT_TYPE_FUNC_NAME(ptype), \ | ||
| 391 | .fmt = PRINT_TYPE_FMT_NAME(ptype), \ | ||
| 392 | .fmttype = _fmttype, \ | ||
| 393 | .fetch = { \ | ||
| 394 | ASSIGN_FETCH_FUNC(reg, ftype), \ | ||
| 395 | ASSIGN_FETCH_FUNC(stack, ftype), \ | ||
| 396 | ASSIGN_FETCH_FUNC(retval, ftype), \ | ||
| 397 | ASSIGN_FETCH_FUNC(memory, ftype), \ | ||
| 398 | ASSIGN_FETCH_FUNC(symbol, ftype), \ | ||
| 399 | ASSIGN_FETCH_FUNC(deref, ftype), \ | ||
| 400 | ASSIGN_FETCH_FUNC(bitfield, ftype), \ | ||
| 401 | } \ | ||
| 402 | } | ||
| 403 | |||
| 404 | #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ | ||
| 405 | __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype) | ||
| 406 | |||
| 407 | #define FETCH_TYPE_STRING 0 | ||
| 408 | #define FETCH_TYPE_STRSIZE 1 | ||
| 409 | |||
| 410 | /* Fetch type information table */ | ||
| 411 | static const struct fetch_type fetch_type_table[] = { | ||
| 412 | /* Special types */ | ||
| 413 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, | ||
| 414 | sizeof(u32), 1, "__data_loc char[]"), | ||
| 415 | [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, | ||
| 416 | string_size, sizeof(u32), 0, "u32"), | ||
| 417 | /* Basic types */ | ||
| 418 | ASSIGN_FETCH_TYPE(u8, u8, 0), | ||
| 419 | ASSIGN_FETCH_TYPE(u16, u16, 0), | ||
| 420 | ASSIGN_FETCH_TYPE(u32, u32, 0), | ||
| 421 | ASSIGN_FETCH_TYPE(u64, u64, 0), | ||
| 422 | ASSIGN_FETCH_TYPE(s8, u8, 1), | ||
| 423 | ASSIGN_FETCH_TYPE(s16, u16, 1), | ||
| 424 | ASSIGN_FETCH_TYPE(s32, u32, 1), | ||
| 425 | ASSIGN_FETCH_TYPE(s64, u64, 1), | ||
| 426 | }; | ||
| 427 | |||
| 428 | static const struct fetch_type *find_fetch_type(const char *type) | ||
| 429 | { | 216 | { |
| 430 | int i; | 217 | int i; |
| 431 | 218 | ||
| @@ -446,44 +233,52 @@ static const struct fetch_type *find_fetch_type(const char *type) | |||
| 446 | 233 | ||
| 447 | switch (bs) { | 234 | switch (bs) { |
| 448 | case 8: | 235 | case 8: |
| 449 | return find_fetch_type("u8"); | 236 | return find_fetch_type("u8", ftbl); |
| 450 | case 16: | 237 | case 16: |
| 451 | return find_fetch_type("u16"); | 238 | return find_fetch_type("u16", ftbl); |
| 452 | case 32: | 239 | case 32: |
| 453 | return find_fetch_type("u32"); | 240 | return find_fetch_type("u32", ftbl); |
| 454 | case 64: | 241 | case 64: |
| 455 | return find_fetch_type("u64"); | 242 | return find_fetch_type("u64", ftbl); |
| 456 | default: | 243 | default: |
| 457 | goto fail; | 244 | goto fail; |
| 458 | } | 245 | } |
| 459 | } | 246 | } |
| 460 | 247 | ||
| 461 | for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++) | 248 | for (i = 0; ftbl[i].name; i++) { |
| 462 | if (strcmp(type, fetch_type_table[i].name) == 0) | 249 | if (strcmp(type, ftbl[i].name) == 0) |
| 463 | return &fetch_type_table[i]; | 250 | return &ftbl[i]; |
| 251 | } | ||
| 464 | 252 | ||
| 465 | fail: | 253 | fail: |
| 466 | return NULL; | 254 | return NULL; |
| 467 | } | 255 | } |
| 468 | 256 | ||
| 469 | /* Special function : only accept unsigned long */ | 257 | /* Special function : only accept unsigned long */ |
| 470 | static __kprobes void fetch_stack_address(struct pt_regs *regs, | 258 | static __kprobes void fetch_kernel_stack_address(struct pt_regs *regs, |
| 471 | void *dummy, void *dest) | 259 | void *dummy, void *dest) |
| 472 | { | 260 | { |
| 473 | *(unsigned long *)dest = kernel_stack_pointer(regs); | 261 | *(unsigned long *)dest = kernel_stack_pointer(regs); |
| 474 | } | 262 | } |
| 475 | 263 | ||
| 264 | static __kprobes void fetch_user_stack_address(struct pt_regs *regs, | ||
| 265 | void *dummy, void *dest) | ||
| 266 | { | ||
| 267 | *(unsigned long *)dest = user_stack_pointer(regs); | ||
| 268 | } | ||
| 269 | |||
| 476 | static fetch_func_t get_fetch_size_function(const struct fetch_type *type, | 270 | static fetch_func_t get_fetch_size_function(const struct fetch_type *type, |
| 477 | fetch_func_t orig_fn) | 271 | fetch_func_t orig_fn, |
| 272 | const struct fetch_type *ftbl) | ||
| 478 | { | 273 | { |
| 479 | int i; | 274 | int i; |
| 480 | 275 | ||
| 481 | if (type != &fetch_type_table[FETCH_TYPE_STRING]) | 276 | if (type != &ftbl[FETCH_TYPE_STRING]) |
| 482 | return NULL; /* Only string type needs size function */ | 277 | return NULL; /* Only string type needs size function */ |
| 483 | 278 | ||
| 484 | for (i = 0; i < FETCH_MTD_END; i++) | 279 | for (i = 0; i < FETCH_MTD_END; i++) |
| 485 | if (type->fetch[i] == orig_fn) | 280 | if (type->fetch[i] == orig_fn) |
| 486 | return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i]; | 281 | return ftbl[FETCH_TYPE_STRSIZE].fetch[i]; |
| 487 | 282 | ||
| 488 | WARN_ON(1); /* This should not happen */ | 283 | WARN_ON(1); /* This should not happen */ |
| 489 | 284 | ||
| @@ -516,7 +311,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) | |||
| 516 | #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) | 311 | #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) |
| 517 | 312 | ||
| 518 | static int parse_probe_vars(char *arg, const struct fetch_type *t, | 313 | static int parse_probe_vars(char *arg, const struct fetch_type *t, |
| 519 | struct fetch_param *f, bool is_return) | 314 | struct fetch_param *f, bool is_return, |
| 315 | bool is_kprobe) | ||
| 520 | { | 316 | { |
| 521 | int ret = 0; | 317 | int ret = 0; |
| 522 | unsigned long param; | 318 | unsigned long param; |
| @@ -528,13 +324,16 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, | |||
| 528 | ret = -EINVAL; | 324 | ret = -EINVAL; |
| 529 | } else if (strncmp(arg, "stack", 5) == 0) { | 325 | } else if (strncmp(arg, "stack", 5) == 0) { |
| 530 | if (arg[5] == '\0') { | 326 | if (arg[5] == '\0') { |
| 531 | if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0) | 327 | if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR)) |
| 532 | f->fn = fetch_stack_address; | 328 | return -EINVAL; |
| 329 | |||
| 330 | if (is_kprobe) | ||
| 331 | f->fn = fetch_kernel_stack_address; | ||
| 533 | else | 332 | else |
| 534 | ret = -EINVAL; | 333 | f->fn = fetch_user_stack_address; |
| 535 | } else if (isdigit(arg[5])) { | 334 | } else if (isdigit(arg[5])) { |
| 536 | ret = kstrtoul(arg + 5, 10, ¶m); | 335 | ret = kstrtoul(arg + 5, 10, ¶m); |
| 537 | if (ret || param > PARAM_MAX_STACK) | 336 | if (ret || (is_kprobe && param > PARAM_MAX_STACK)) |
| 538 | ret = -EINVAL; | 337 | ret = -EINVAL; |
| 539 | else { | 338 | else { |
| 540 | f->fn = t->fetch[FETCH_MTD_stack]; | 339 | f->fn = t->fetch[FETCH_MTD_stack]; |
| @@ -552,20 +351,18 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, | |||
| 552 | static int parse_probe_arg(char *arg, const struct fetch_type *t, | 351 | static int parse_probe_arg(char *arg, const struct fetch_type *t, |
| 553 | struct fetch_param *f, bool is_return, bool is_kprobe) | 352 | struct fetch_param *f, bool is_return, bool is_kprobe) |
| 554 | { | 353 | { |
| 354 | const struct fetch_type *ftbl; | ||
| 555 | unsigned long param; | 355 | unsigned long param; |
| 556 | long offset; | 356 | long offset; |
| 557 | char *tmp; | 357 | char *tmp; |
| 558 | int ret; | 358 | int ret = 0; |
| 559 | |||
| 560 | ret = 0; | ||
| 561 | 359 | ||
| 562 | /* Until uprobe_events supports only reg arguments */ | 360 | ftbl = is_kprobe ? kprobes_fetch_type_table : uprobes_fetch_type_table; |
| 563 | if (!is_kprobe && arg[0] != '%') | 361 | BUG_ON(ftbl == NULL); |
| 564 | return -EINVAL; | ||
| 565 | 362 | ||
| 566 | switch (arg[0]) { | 363 | switch (arg[0]) { |
| 567 | case '$': | 364 | case '$': |
| 568 | ret = parse_probe_vars(arg + 1, t, f, is_return); | 365 | ret = parse_probe_vars(arg + 1, t, f, is_return, is_kprobe); |
| 569 | break; | 366 | break; |
| 570 | 367 | ||
| 571 | case '%': /* named register */ | 368 | case '%': /* named register */ |
| @@ -577,7 +374,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, | |||
| 577 | } | 374 | } |
| 578 | break; | 375 | break; |
| 579 | 376 | ||
| 580 | case '@': /* memory or symbol */ | 377 | case '@': /* memory, file-offset or symbol */ |
| 581 | if (isdigit(arg[1])) { | 378 | if (isdigit(arg[1])) { |
| 582 | ret = kstrtoul(arg + 1, 0, ¶m); | 379 | ret = kstrtoul(arg + 1, 0, ¶m); |
| 583 | if (ret) | 380 | if (ret) |
| @@ -585,7 +382,22 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, | |||
| 585 | 382 | ||
| 586 | f->fn = t->fetch[FETCH_MTD_memory]; | 383 | f->fn = t->fetch[FETCH_MTD_memory]; |
| 587 | f->data = (void *)param; | 384 | f->data = (void *)param; |
| 385 | } else if (arg[1] == '+') { | ||
| 386 | /* kprobes don't support file offsets */ | ||
| 387 | if (is_kprobe) | ||
| 388 | return -EINVAL; | ||
| 389 | |||
| 390 | ret = kstrtol(arg + 2, 0, &offset); | ||
| 391 | if (ret) | ||
| 392 | break; | ||
| 393 | |||
| 394 | f->fn = t->fetch[FETCH_MTD_file_offset]; | ||
| 395 | f->data = (void *)offset; | ||
| 588 | } else { | 396 | } else { |
| 397 | /* uprobes don't support symbols */ | ||
| 398 | if (!is_kprobe) | ||
| 399 | return -EINVAL; | ||
| 400 | |||
| 589 | ret = traceprobe_split_symbol_offset(arg + 1, &offset); | 401 | ret = traceprobe_split_symbol_offset(arg + 1, &offset); |
| 590 | if (ret) | 402 | if (ret) |
| 591 | break; | 403 | break; |
| @@ -616,7 +428,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, | |||
| 616 | struct deref_fetch_param *dprm; | 428 | struct deref_fetch_param *dprm; |
| 617 | const struct fetch_type *t2; | 429 | const struct fetch_type *t2; |
| 618 | 430 | ||
| 619 | t2 = find_fetch_type(NULL); | 431 | t2 = find_fetch_type(NULL, ftbl); |
| 620 | *tmp = '\0'; | 432 | *tmp = '\0'; |
| 621 | dprm = kzalloc(sizeof(struct deref_fetch_param), GFP_KERNEL); | 433 | dprm = kzalloc(sizeof(struct deref_fetch_param), GFP_KERNEL); |
| 622 | 434 | ||
| @@ -624,6 +436,9 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, | |||
| 624 | return -ENOMEM; | 436 | return -ENOMEM; |
| 625 | 437 | ||
| 626 | dprm->offset = offset; | 438 | dprm->offset = offset; |
| 439 | dprm->fetch = t->fetch[FETCH_MTD_memory]; | ||
| 440 | dprm->fetch_size = get_fetch_size_function(t, | ||
| 441 | dprm->fetch, ftbl); | ||
| 627 | ret = parse_probe_arg(arg, t2, &dprm->orig, is_return, | 442 | ret = parse_probe_arg(arg, t2, &dprm->orig, is_return, |
| 628 | is_kprobe); | 443 | is_kprobe); |
| 629 | if (ret) | 444 | if (ret) |
| @@ -685,9 +500,13 @@ static int __parse_bitfield_probe_arg(const char *bf, | |||
| 685 | int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | 500 | int traceprobe_parse_probe_arg(char *arg, ssize_t *size, |
| 686 | struct probe_arg *parg, bool is_return, bool is_kprobe) | 501 | struct probe_arg *parg, bool is_return, bool is_kprobe) |
| 687 | { | 502 | { |
| 503 | const struct fetch_type *ftbl; | ||
| 688 | const char *t; | 504 | const char *t; |
| 689 | int ret; | 505 | int ret; |
| 690 | 506 | ||
| 507 | ftbl = is_kprobe ? kprobes_fetch_type_table : uprobes_fetch_type_table; | ||
| 508 | BUG_ON(ftbl == NULL); | ||
| 509 | |||
| 691 | if (strlen(arg) > MAX_ARGSTR_LEN) { | 510 | if (strlen(arg) > MAX_ARGSTR_LEN) { |
| 692 | pr_info("Argument is too long.: %s\n", arg); | 511 | pr_info("Argument is too long.: %s\n", arg); |
| 693 | return -ENOSPC; | 512 | return -ENOSPC; |
| @@ -702,7 +521,7 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | |||
| 702 | arg[t - parg->comm] = '\0'; | 521 | arg[t - parg->comm] = '\0'; |
| 703 | t++; | 522 | t++; |
| 704 | } | 523 | } |
| 705 | parg->type = find_fetch_type(t); | 524 | parg->type = find_fetch_type(t, ftbl); |
| 706 | if (!parg->type) { | 525 | if (!parg->type) { |
| 707 | pr_info("Unsupported type: %s\n", t); | 526 | pr_info("Unsupported type: %s\n", t); |
| 708 | return -EINVAL; | 527 | return -EINVAL; |
| @@ -716,7 +535,8 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | |||
| 716 | 535 | ||
| 717 | if (ret >= 0) { | 536 | if (ret >= 0) { |
| 718 | parg->fetch_size.fn = get_fetch_size_function(parg->type, | 537 | parg->fetch_size.fn = get_fetch_size_function(parg->type, |
| 719 | parg->fetch.fn); | 538 | parg->fetch.fn, |
| 539 | ftbl); | ||
| 720 | parg->fetch_size.data = parg->fetch.data; | 540 | parg->fetch_size.data = parg->fetch.data; |
| 721 | } | 541 | } |
| 722 | 542 | ||
| @@ -837,3 +657,65 @@ out: | |||
| 837 | 657 | ||
| 838 | return ret; | 658 | return ret; |
| 839 | } | 659 | } |
| 660 | |||
| 661 | static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, | ||
| 662 | bool is_return) | ||
| 663 | { | ||
| 664 | int i; | ||
| 665 | int pos = 0; | ||
| 666 | |||
| 667 | const char *fmt, *arg; | ||
| 668 | |||
| 669 | if (!is_return) { | ||
| 670 | fmt = "(%lx)"; | ||
| 671 | arg = "REC->" FIELD_STRING_IP; | ||
| 672 | } else { | ||
| 673 | fmt = "(%lx <- %lx)"; | ||
| 674 | arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP; | ||
| 675 | } | ||
| 676 | |||
| 677 | /* When len=0, we just calculate the needed length */ | ||
| 678 | #define LEN_OR_ZERO (len ? len - pos : 0) | ||
| 679 | |||
| 680 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); | ||
| 681 | |||
| 682 | for (i = 0; i < tp->nr_args; i++) { | ||
| 683 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", | ||
| 684 | tp->args[i].name, tp->args[i].type->fmt); | ||
| 685 | } | ||
| 686 | |||
| 687 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); | ||
| 688 | |||
| 689 | for (i = 0; i < tp->nr_args; i++) { | ||
| 690 | if (strcmp(tp->args[i].type->name, "string") == 0) | ||
| 691 | pos += snprintf(buf + pos, LEN_OR_ZERO, | ||
| 692 | ", __get_str(%s)", | ||
| 693 | tp->args[i].name); | ||
| 694 | else | ||
| 695 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | ||
| 696 | tp->args[i].name); | ||
| 697 | } | ||
| 698 | |||
| 699 | #undef LEN_OR_ZERO | ||
| 700 | |||
| 701 | /* return the length of print_fmt */ | ||
| 702 | return pos; | ||
| 703 | } | ||
| 704 | |||
| 705 | int set_print_fmt(struct trace_probe *tp, bool is_return) | ||
| 706 | { | ||
| 707 | int len; | ||
| 708 | char *print_fmt; | ||
| 709 | |||
| 710 | /* First: called with 0 length to calculate the needed length */ | ||
| 711 | len = __set_print_fmt(tp, NULL, 0, is_return); | ||
| 712 | print_fmt = kmalloc(len + 1, GFP_KERNEL); | ||
| 713 | if (!print_fmt) | ||
| 714 | return -ENOMEM; | ||
| 715 | |||
| 716 | /* Second: actually write the @print_fmt */ | ||
| 717 | __set_print_fmt(tp, print_fmt, len + 1, is_return); | ||
| 718 | tp->call.print_fmt = print_fmt; | ||
| 719 | |||
| 720 | return 0; | ||
| 721 | } | ||
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 5c7e09d10d74..b73574a5f429 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
| @@ -81,6 +81,17 @@ | |||
| 81 | */ | 81 | */ |
| 82 | #define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) | 82 | #define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) |
| 83 | 83 | ||
| 84 | static inline void *get_rloc_data(u32 *dl) | ||
| 85 | { | ||
| 86 | return (u8 *)dl + get_rloc_offs(*dl); | ||
| 87 | } | ||
| 88 | |||
| 89 | /* For data_loc conversion */ | ||
| 90 | static inline void *get_loc_data(u32 *dl, void *ent) | ||
| 91 | { | ||
| 92 | return (u8 *)ent + get_rloc_offs(*dl); | ||
| 93 | } | ||
| 94 | |||
| 84 | /* Data fetch function type */ | 95 | /* Data fetch function type */ |
| 85 | typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); | 96 | typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); |
| 86 | /* Printing function type */ | 97 | /* Printing function type */ |
| @@ -95,6 +106,7 @@ enum { | |||
| 95 | FETCH_MTD_symbol, | 106 | FETCH_MTD_symbol, |
| 96 | FETCH_MTD_deref, | 107 | FETCH_MTD_deref, |
| 97 | FETCH_MTD_bitfield, | 108 | FETCH_MTD_bitfield, |
| 109 | FETCH_MTD_file_offset, | ||
| 98 | FETCH_MTD_END, | 110 | FETCH_MTD_END, |
| 99 | }; | 111 | }; |
| 100 | 112 | ||
| @@ -115,6 +127,148 @@ struct fetch_param { | |||
| 115 | void *data; | 127 | void *data; |
| 116 | }; | 128 | }; |
| 117 | 129 | ||
| 130 | /* For defining macros, define string/string_size types */ | ||
| 131 | typedef u32 string; | ||
| 132 | typedef u32 string_size; | ||
| 133 | |||
| 134 | #define PRINT_TYPE_FUNC_NAME(type) print_type_##type | ||
| 135 | #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type | ||
| 136 | |||
| 137 | /* Printing in basic type function template */ | ||
| 138 | #define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \ | ||
| 139 | __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ | ||
| 140 | const char *name, \ | ||
| 141 | void *data, void *ent); \ | ||
| 142 | extern const char PRINT_TYPE_FMT_NAME(type)[] | ||
| 143 | |||
| 144 | DECLARE_BASIC_PRINT_TYPE_FUNC(u8); | ||
| 145 | DECLARE_BASIC_PRINT_TYPE_FUNC(u16); | ||
| 146 | DECLARE_BASIC_PRINT_TYPE_FUNC(u32); | ||
| 147 | DECLARE_BASIC_PRINT_TYPE_FUNC(u64); | ||
| 148 | DECLARE_BASIC_PRINT_TYPE_FUNC(s8); | ||
| 149 | DECLARE_BASIC_PRINT_TYPE_FUNC(s16); | ||
| 150 | DECLARE_BASIC_PRINT_TYPE_FUNC(s32); | ||
| 151 | DECLARE_BASIC_PRINT_TYPE_FUNC(s64); | ||
| 152 | DECLARE_BASIC_PRINT_TYPE_FUNC(string); | ||
| 153 | |||
| 154 | #define FETCH_FUNC_NAME(method, type) fetch_##method##_##type | ||
| 155 | |||
| 156 | /* Declare macro for basic types */ | ||
| 157 | #define DECLARE_FETCH_FUNC(method, type) \ | ||
| 158 | extern void FETCH_FUNC_NAME(method, type)(struct pt_regs *regs, \ | ||
| 159 | void *data, void *dest) | ||
| 160 | |||
| 161 | #define DECLARE_BASIC_FETCH_FUNCS(method) \ | ||
| 162 | DECLARE_FETCH_FUNC(method, u8); \ | ||
| 163 | DECLARE_FETCH_FUNC(method, u16); \ | ||
| 164 | DECLARE_FETCH_FUNC(method, u32); \ | ||
| 165 | DECLARE_FETCH_FUNC(method, u64) | ||
| 166 | |||
| 167 | DECLARE_BASIC_FETCH_FUNCS(reg); | ||
| 168 | #define fetch_reg_string NULL | ||
| 169 | #define fetch_reg_string_size NULL | ||
| 170 | |||
| 171 | DECLARE_BASIC_FETCH_FUNCS(retval); | ||
| 172 | #define fetch_retval_string NULL | ||
| 173 | #define fetch_retval_string_size NULL | ||
| 174 | |||
| 175 | DECLARE_BASIC_FETCH_FUNCS(symbol); | ||
| 176 | DECLARE_FETCH_FUNC(symbol, string); | ||
| 177 | DECLARE_FETCH_FUNC(symbol, string_size); | ||
| 178 | |||
| 179 | DECLARE_BASIC_FETCH_FUNCS(deref); | ||
| 180 | DECLARE_FETCH_FUNC(deref, string); | ||
| 181 | DECLARE_FETCH_FUNC(deref, string_size); | ||
| 182 | |||
| 183 | DECLARE_BASIC_FETCH_FUNCS(bitfield); | ||
| 184 | #define fetch_bitfield_string NULL | ||
| 185 | #define fetch_bitfield_string_size NULL | ||
| 186 | |||
| 187 | /* | ||
| 188 | * Define macro for basic types - we don't need to define s* types, because | ||
| 189 | * we have to care only about bitwidth at recording time. | ||
| 190 | */ | ||
| 191 | #define DEFINE_BASIC_FETCH_FUNCS(method) \ | ||
| 192 | DEFINE_FETCH_##method(u8) \ | ||
| 193 | DEFINE_FETCH_##method(u16) \ | ||
| 194 | DEFINE_FETCH_##method(u32) \ | ||
| 195 | DEFINE_FETCH_##method(u64) | ||
| 196 | |||
| 197 | /* Default (unsigned long) fetch type */ | ||
| 198 | #define __DEFAULT_FETCH_TYPE(t) u##t | ||
| 199 | #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) | ||
| 200 | #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) | ||
| 201 | #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) | ||
| 202 | |||
| 203 | #define ASSIGN_FETCH_FUNC(method, type) \ | ||
| 204 | [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type) | ||
| 205 | |||
| 206 | #define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ | ||
| 207 | {.name = _name, \ | ||
| 208 | .size = _size, \ | ||
| 209 | .is_signed = sign, \ | ||
| 210 | .print = PRINT_TYPE_FUNC_NAME(ptype), \ | ||
| 211 | .fmt = PRINT_TYPE_FMT_NAME(ptype), \ | ||
| 212 | .fmttype = _fmttype, \ | ||
| 213 | .fetch = { \ | ||
| 214 | ASSIGN_FETCH_FUNC(reg, ftype), \ | ||
| 215 | ASSIGN_FETCH_FUNC(stack, ftype), \ | ||
| 216 | ASSIGN_FETCH_FUNC(retval, ftype), \ | ||
| 217 | ASSIGN_FETCH_FUNC(memory, ftype), \ | ||
| 218 | ASSIGN_FETCH_FUNC(symbol, ftype), \ | ||
| 219 | ASSIGN_FETCH_FUNC(deref, ftype), \ | ||
| 220 | ASSIGN_FETCH_FUNC(bitfield, ftype), \ | ||
| 221 | ASSIGN_FETCH_FUNC(file_offset, ftype), \ | ||
| 222 | } \ | ||
| 223 | } | ||
| 224 | |||
| 225 | #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ | ||
| 226 | __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype) | ||
| 227 | |||
| 228 | #define ASSIGN_FETCH_TYPE_END {} | ||
| 229 | |||
| 230 | #define FETCH_TYPE_STRING 0 | ||
| 231 | #define FETCH_TYPE_STRSIZE 1 | ||
| 232 | |||
| 233 | /* | ||
| 234 | * Fetch type information table. | ||
| 235 | * It's declared as a weak symbol due to conditional compilation. | ||
| 236 | */ | ||
| 237 | extern __weak const struct fetch_type kprobes_fetch_type_table[]; | ||
| 238 | extern __weak const struct fetch_type uprobes_fetch_type_table[]; | ||
| 239 | |||
| 240 | #ifdef CONFIG_KPROBE_EVENT | ||
| 241 | struct symbol_cache; | ||
| 242 | unsigned long update_symbol_cache(struct symbol_cache *sc); | ||
| 243 | void free_symbol_cache(struct symbol_cache *sc); | ||
| 244 | struct symbol_cache *alloc_symbol_cache(const char *sym, long offset); | ||
| 245 | #else | ||
| 246 | /* uprobes do not support symbol fetch methods */ | ||
| 247 | #define fetch_symbol_u8 NULL | ||
| 248 | #define fetch_symbol_u16 NULL | ||
| 249 | #define fetch_symbol_u32 NULL | ||
| 250 | #define fetch_symbol_u64 NULL | ||
| 251 | #define fetch_symbol_string NULL | ||
| 252 | #define fetch_symbol_string_size NULL | ||
| 253 | |||
| 254 | struct symbol_cache { | ||
| 255 | }; | ||
| 256 | static inline unsigned long __used update_symbol_cache(struct symbol_cache *sc) | ||
| 257 | { | ||
| 258 | return 0; | ||
| 259 | } | ||
| 260 | |||
| 261 | static inline void __used free_symbol_cache(struct symbol_cache *sc) | ||
| 262 | { | ||
| 263 | } | ||
| 264 | |||
| 265 | static inline struct symbol_cache * __used | ||
| 266 | alloc_symbol_cache(const char *sym, long offset) | ||
| 267 | { | ||
| 268 | return NULL; | ||
| 269 | } | ||
| 270 | #endif /* CONFIG_KPROBE_EVENT */ | ||
| 271 | |||
| 118 | struct probe_arg { | 272 | struct probe_arg { |
| 119 | struct fetch_param fetch; | 273 | struct fetch_param fetch; |
| 120 | struct fetch_param fetch_size; | 274 | struct fetch_param fetch_size; |
| @@ -124,6 +278,26 @@ struct probe_arg { | |||
| 124 | const struct fetch_type *type; /* Type of this argument */ | 278 | const struct fetch_type *type; /* Type of this argument */ |
| 125 | }; | 279 | }; |
| 126 | 280 | ||
| 281 | struct trace_probe { | ||
| 282 | unsigned int flags; /* For TP_FLAG_* */ | ||
| 283 | struct ftrace_event_class class; | ||
| 284 | struct ftrace_event_call call; | ||
| 285 | struct list_head files; | ||
| 286 | ssize_t size; /* trace entry size */ | ||
| 287 | unsigned int nr_args; | ||
| 288 | struct probe_arg args[]; | ||
| 289 | }; | ||
| 290 | |||
| 291 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) | ||
| 292 | { | ||
| 293 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | ||
| 294 | } | ||
| 295 | |||
| 296 | static inline bool trace_probe_is_registered(struct trace_probe *tp) | ||
| 297 | { | ||
| 298 | return !!(tp->flags & TP_FLAG_REGISTERED); | ||
| 299 | } | ||
| 300 | |||
| 127 | static inline __kprobes void call_fetch(struct fetch_param *fprm, | 301 | static inline __kprobes void call_fetch(struct fetch_param *fprm, |
| 128 | struct pt_regs *regs, void *dest) | 302 | struct pt_regs *regs, void *dest) |
| 129 | { | 303 | { |
| @@ -158,3 +332,53 @@ extern ssize_t traceprobe_probes_write(struct file *file, | |||
| 158 | int (*createfn)(int, char**)); | 332 | int (*createfn)(int, char**)); |
| 159 | 333 | ||
| 160 | extern int traceprobe_command(const char *buf, int (*createfn)(int, char**)); | 334 | extern int traceprobe_command(const char *buf, int (*createfn)(int, char**)); |
| 335 | |||
| 336 | /* Sum up total data length for dynamic arraies (strings) */ | ||
| 337 | static inline __kprobes int | ||
| 338 | __get_data_size(struct trace_probe *tp, struct pt_regs *regs) | ||
| 339 | { | ||
| 340 | int i, ret = 0; | ||
| 341 | u32 len; | ||
| 342 | |||
| 343 | for (i = 0; i < tp->nr_args; i++) | ||
| 344 | if (unlikely(tp->args[i].fetch_size.fn)) { | ||
| 345 | call_fetch(&tp->args[i].fetch_size, regs, &len); | ||
| 346 | ret += len; | ||
| 347 | } | ||
| 348 | |||
| 349 | return ret; | ||
| 350 | } | ||
| 351 | |||
| 352 | /* Store the value of each argument */ | ||
| 353 | static inline __kprobes void | ||
| 354 | store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, | ||
| 355 | u8 *data, int maxlen) | ||
| 356 | { | ||
| 357 | int i; | ||
| 358 | u32 end = tp->size; | ||
| 359 | u32 *dl; /* Data (relative) location */ | ||
| 360 | |||
| 361 | for (i = 0; i < tp->nr_args; i++) { | ||
| 362 | if (unlikely(tp->args[i].fetch_size.fn)) { | ||
| 363 | /* | ||
| 364 | * First, we set the relative location and | ||
| 365 | * maximum data length to *dl | ||
| 366 | */ | ||
| 367 | dl = (u32 *)(data + tp->args[i].offset); | ||
| 368 | *dl = make_data_rloc(maxlen, end - tp->args[i].offset); | ||
| 369 | /* Then try to fetch string or dynamic array data */ | ||
| 370 | call_fetch(&tp->args[i].fetch, regs, dl); | ||
| 371 | /* Reduce maximum length */ | ||
| 372 | end += get_rloc_len(*dl); | ||
| 373 | maxlen -= get_rloc_len(*dl); | ||
| 374 | /* Trick here, convert data_rloc to data_loc */ | ||
| 375 | *dl = convert_rloc_to_loc(*dl, | ||
| 376 | ent_size + tp->args[i].offset); | ||
| 377 | } else | ||
| 378 | /* Just fetching data normally */ | ||
| 379 | call_fetch(&tp->args[i].fetch, regs, | ||
| 380 | data + tp->args[i].offset); | ||
| 381 | } | ||
| 382 | } | ||
| 383 | |||
| 384 | extern int set_print_fmt(struct trace_probe *tp, bool is_return); | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index fee77e15d815..6e32635e5e57 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
| 17 | #include <linux/ftrace.h> | 17 | #include <linux/ftrace.h> |
| 18 | #include <linux/sched/rt.h> | 18 | #include <linux/sched/rt.h> |
| 19 | #include <linux/sched/deadline.h> | ||
| 19 | #include <trace/events/sched.h> | 20 | #include <trace/events/sched.h> |
| 20 | #include "trace.h" | 21 | #include "trace.h" |
| 21 | 22 | ||
| @@ -27,6 +28,8 @@ static int wakeup_cpu; | |||
| 27 | static int wakeup_current_cpu; | 28 | static int wakeup_current_cpu; |
| 28 | static unsigned wakeup_prio = -1; | 29 | static unsigned wakeup_prio = -1; |
| 29 | static int wakeup_rt; | 30 | static int wakeup_rt; |
| 31 | static int wakeup_dl; | ||
| 32 | static int tracing_dl = 0; | ||
| 30 | 33 | ||
| 31 | static arch_spinlock_t wakeup_lock = | 34 | static arch_spinlock_t wakeup_lock = |
| 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 35 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| @@ -437,6 +440,7 @@ static void __wakeup_reset(struct trace_array *tr) | |||
| 437 | { | 440 | { |
| 438 | wakeup_cpu = -1; | 441 | wakeup_cpu = -1; |
| 439 | wakeup_prio = -1; | 442 | wakeup_prio = -1; |
| 443 | tracing_dl = 0; | ||
| 440 | 444 | ||
| 441 | if (wakeup_task) | 445 | if (wakeup_task) |
| 442 | put_task_struct(wakeup_task); | 446 | put_task_struct(wakeup_task); |
| @@ -472,9 +476,17 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
| 472 | tracing_record_cmdline(p); | 476 | tracing_record_cmdline(p); |
| 473 | tracing_record_cmdline(current); | 477 | tracing_record_cmdline(current); |
| 474 | 478 | ||
| 475 | if ((wakeup_rt && !rt_task(p)) || | 479 | /* |
| 476 | p->prio >= wakeup_prio || | 480 | * Semantic is like this: |
| 477 | p->prio >= current->prio) | 481 | * - wakeup tracer handles all tasks in the system, independently |
| 482 | * from their scheduling class; | ||
| 483 | * - wakeup_rt tracer handles tasks belonging to sched_dl and | ||
| 484 | * sched_rt class; | ||
| 485 | * - wakeup_dl handles tasks belonging to sched_dl class only. | ||
| 486 | */ | ||
| 487 | if (tracing_dl || (wakeup_dl && !dl_task(p)) || | ||
| 488 | (wakeup_rt && !dl_task(p) && !rt_task(p)) || | ||
| 489 | (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) | ||
| 478 | return; | 490 | return; |
| 479 | 491 | ||
| 480 | pc = preempt_count(); | 492 | pc = preempt_count(); |
| @@ -486,7 +498,8 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
| 486 | arch_spin_lock(&wakeup_lock); | 498 | arch_spin_lock(&wakeup_lock); |
| 487 | 499 | ||
| 488 | /* check for races. */ | 500 | /* check for races. */ |
| 489 | if (!tracer_enabled || p->prio >= wakeup_prio) | 501 | if (!tracer_enabled || tracing_dl || |
| 502 | (!dl_task(p) && p->prio >= wakeup_prio)) | ||
| 490 | goto out_locked; | 503 | goto out_locked; |
| 491 | 504 | ||
| 492 | /* reset the trace */ | 505 | /* reset the trace */ |
| @@ -496,6 +509,15 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
| 496 | wakeup_current_cpu = wakeup_cpu; | 509 | wakeup_current_cpu = wakeup_cpu; |
| 497 | wakeup_prio = p->prio; | 510 | wakeup_prio = p->prio; |
| 498 | 511 | ||
| 512 | /* | ||
| 513 | * Once you start tracing a -deadline task, don't bother tracing | ||
| 514 | * another task until the first one wakes up. | ||
| 515 | */ | ||
| 516 | if (dl_task(p)) | ||
| 517 | tracing_dl = 1; | ||
| 518 | else | ||
| 519 | tracing_dl = 0; | ||
| 520 | |||
| 499 | wakeup_task = p; | 521 | wakeup_task = p; |
| 500 | get_task_struct(wakeup_task); | 522 | get_task_struct(wakeup_task); |
| 501 | 523 | ||
| @@ -597,16 +619,25 @@ static int __wakeup_tracer_init(struct trace_array *tr) | |||
| 597 | 619 | ||
| 598 | static int wakeup_tracer_init(struct trace_array *tr) | 620 | static int wakeup_tracer_init(struct trace_array *tr) |
| 599 | { | 621 | { |
| 622 | wakeup_dl = 0; | ||
| 600 | wakeup_rt = 0; | 623 | wakeup_rt = 0; |
| 601 | return __wakeup_tracer_init(tr); | 624 | return __wakeup_tracer_init(tr); |
| 602 | } | 625 | } |
| 603 | 626 | ||
| 604 | static int wakeup_rt_tracer_init(struct trace_array *tr) | 627 | static int wakeup_rt_tracer_init(struct trace_array *tr) |
| 605 | { | 628 | { |
| 629 | wakeup_dl = 0; | ||
| 606 | wakeup_rt = 1; | 630 | wakeup_rt = 1; |
| 607 | return __wakeup_tracer_init(tr); | 631 | return __wakeup_tracer_init(tr); |
| 608 | } | 632 | } |
| 609 | 633 | ||
| 634 | static int wakeup_dl_tracer_init(struct trace_array *tr) | ||
| 635 | { | ||
| 636 | wakeup_dl = 1; | ||
| 637 | wakeup_rt = 0; | ||
| 638 | return __wakeup_tracer_init(tr); | ||
| 639 | } | ||
| 640 | |||
| 610 | static void wakeup_tracer_reset(struct trace_array *tr) | 641 | static void wakeup_tracer_reset(struct trace_array *tr) |
| 611 | { | 642 | { |
| 612 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | 643 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
| @@ -674,6 +705,28 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
| 674 | .use_max_tr = true, | 705 | .use_max_tr = true, |
| 675 | }; | 706 | }; |
| 676 | 707 | ||
| 708 | static struct tracer wakeup_dl_tracer __read_mostly = | ||
| 709 | { | ||
| 710 | .name = "wakeup_dl", | ||
| 711 | .init = wakeup_dl_tracer_init, | ||
| 712 | .reset = wakeup_tracer_reset, | ||
| 713 | .start = wakeup_tracer_start, | ||
| 714 | .stop = wakeup_tracer_stop, | ||
| 715 | .wait_pipe = poll_wait_pipe, | ||
| 716 | .print_max = true, | ||
| 717 | .print_header = wakeup_print_header, | ||
| 718 | .print_line = wakeup_print_line, | ||
| 719 | .flags = &tracer_flags, | ||
| 720 | .set_flag = wakeup_set_flag, | ||
| 721 | .flag_changed = wakeup_flag_changed, | ||
| 722 | #ifdef CONFIG_FTRACE_SELFTEST | ||
| 723 | .selftest = trace_selftest_startup_wakeup, | ||
| 724 | #endif | ||
| 725 | .open = wakeup_trace_open, | ||
| 726 | .close = wakeup_trace_close, | ||
| 727 | .use_max_tr = true, | ||
| 728 | }; | ||
| 729 | |||
| 677 | __init static int init_wakeup_tracer(void) | 730 | __init static int init_wakeup_tracer(void) |
| 678 | { | 731 | { |
| 679 | int ret; | 732 | int ret; |
| @@ -686,6 +739,10 @@ __init static int init_wakeup_tracer(void) | |||
| 686 | if (ret) | 739 | if (ret) |
| 687 | return ret; | 740 | return ret; |
| 688 | 741 | ||
| 742 | ret = register_tracer(&wakeup_dl_tracer); | ||
| 743 | if (ret) | ||
| 744 | return ret; | ||
| 745 | |||
| 689 | return 0; | 746 | return 0; |
| 690 | } | 747 | } |
| 691 | core_initcall(init_wakeup_tracer); | 748 | core_initcall(init_wakeup_tracer); |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index a7329b7902f8..e98fca60974f 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -1022,11 +1022,16 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |||
| 1022 | #ifdef CONFIG_SCHED_TRACER | 1022 | #ifdef CONFIG_SCHED_TRACER |
| 1023 | static int trace_wakeup_test_thread(void *data) | 1023 | static int trace_wakeup_test_thread(void *data) |
| 1024 | { | 1024 | { |
| 1025 | /* Make this a RT thread, doesn't need to be too high */ | 1025 | /* Make this a -deadline thread */ |
| 1026 | static const struct sched_param param = { .sched_priority = 5 }; | 1026 | static const struct sched_attr attr = { |
| 1027 | .sched_policy = SCHED_DEADLINE, | ||
| 1028 | .sched_runtime = 100000ULL, | ||
| 1029 | .sched_deadline = 10000000ULL, | ||
| 1030 | .sched_period = 10000000ULL | ||
| 1031 | }; | ||
| 1027 | struct completion *x = data; | 1032 | struct completion *x = data; |
| 1028 | 1033 | ||
| 1029 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 1034 | sched_setattr(current, &attr); |
| 1030 | 1035 | ||
| 1031 | /* Make it know we have a new prio */ | 1036 | /* Make it know we have a new prio */ |
| 1032 | complete(x); | 1037 | complete(x); |
| @@ -1040,8 +1045,8 @@ static int trace_wakeup_test_thread(void *data) | |||
| 1040 | /* we are awake, now wait to disappear */ | 1045 | /* we are awake, now wait to disappear */ |
| 1041 | while (!kthread_should_stop()) { | 1046 | while (!kthread_should_stop()) { |
| 1042 | /* | 1047 | /* |
| 1043 | * This is an RT task, do short sleeps to let | 1048 | * This will likely be the system top priority |
| 1044 | * others run. | 1049 | * task, do short sleeps to let others run. |
| 1045 | */ | 1050 | */ |
| 1046 | msleep(100); | 1051 | msleep(100); |
| 1047 | } | 1052 | } |
| @@ -1054,21 +1059,21 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
| 1054 | { | 1059 | { |
| 1055 | unsigned long save_max = tracing_max_latency; | 1060 | unsigned long save_max = tracing_max_latency; |
| 1056 | struct task_struct *p; | 1061 | struct task_struct *p; |
| 1057 | struct completion isrt; | 1062 | struct completion is_ready; |
| 1058 | unsigned long count; | 1063 | unsigned long count; |
| 1059 | int ret; | 1064 | int ret; |
| 1060 | 1065 | ||
| 1061 | init_completion(&isrt); | 1066 | init_completion(&is_ready); |
| 1062 | 1067 | ||
| 1063 | /* create a high prio thread */ | 1068 | /* create a -deadline thread */ |
| 1064 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | 1069 | p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test"); |
| 1065 | if (IS_ERR(p)) { | 1070 | if (IS_ERR(p)) { |
| 1066 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | 1071 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
| 1067 | return -1; | 1072 | return -1; |
| 1068 | } | 1073 | } |
| 1069 | 1074 | ||
| 1070 | /* make sure the thread is running at an RT prio */ | 1075 | /* make sure the thread is running at -deadline policy */ |
| 1071 | wait_for_completion(&isrt); | 1076 | wait_for_completion(&is_ready); |
| 1072 | 1077 | ||
| 1073 | /* start the tracing */ | 1078 | /* start the tracing */ |
| 1074 | ret = tracer_init(trace, tr); | 1079 | ret = tracer_init(trace, tr); |
| @@ -1082,19 +1087,19 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
| 1082 | 1087 | ||
| 1083 | while (p->on_rq) { | 1088 | while (p->on_rq) { |
| 1084 | /* | 1089 | /* |
| 1085 | * Sleep to make sure the RT thread is asleep too. | 1090 | * Sleep to make sure the -deadline thread is asleep too. |
| 1086 | * On virtual machines we can't rely on timings, | 1091 | * On virtual machines we can't rely on timings, |
| 1087 | * but we want to make sure this test still works. | 1092 | * but we want to make sure this test still works. |
| 1088 | */ | 1093 | */ |
| 1089 | msleep(100); | 1094 | msleep(100); |
| 1090 | } | 1095 | } |
| 1091 | 1096 | ||
| 1092 | init_completion(&isrt); | 1097 | init_completion(&is_ready); |
| 1093 | 1098 | ||
| 1094 | wake_up_process(p); | 1099 | wake_up_process(p); |
| 1095 | 1100 | ||
| 1096 | /* Wait for the task to wake up */ | 1101 | /* Wait for the task to wake up */ |
| 1097 | wait_for_completion(&isrt); | 1102 | wait_for_completion(&is_ready); |
| 1098 | 1103 | ||
| 1099 | /* stop the tracing. */ | 1104 | /* stop the tracing. */ |
| 1100 | tracing_stop(); | 1105 | tracing_stop(); |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index b20428c5efe2..e6be585cf06a 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -382,7 +382,7 @@ static const struct file_operations stack_trace_filter_fops = { | |||
| 382 | .open = stack_trace_filter_open, | 382 | .open = stack_trace_filter_open, |
| 383 | .read = seq_read, | 383 | .read = seq_read, |
| 384 | .write = ftrace_filter_write, | 384 | .write = ftrace_filter_write, |
| 385 | .llseek = ftrace_filter_lseek, | 385 | .llseek = tracing_lseek, |
| 386 | .release = ftrace_regex_release, | 386 | .release = ftrace_regex_release, |
| 387 | }; | 387 | }; |
| 388 | 388 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index ea90eb5f6f17..759d5e004517 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -321,7 +321,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 321 | if (!ftrace_file) | 321 | if (!ftrace_file) |
| 322 | return; | 322 | return; |
| 323 | 323 | ||
| 324 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | 324 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
| 325 | return; | 325 | return; |
| 326 | 326 | ||
| 327 | sys_data = syscall_nr_to_meta(syscall_nr); | 327 | sys_data = syscall_nr_to_meta(syscall_nr); |
| @@ -343,9 +343,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 343 | entry->nr = syscall_nr; | 343 | entry->nr = syscall_nr; |
| 344 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 344 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
| 345 | 345 | ||
| 346 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 346 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, |
| 347 | trace_current_buffer_unlock_commit(buffer, event, | 347 | irq_flags, pc); |
| 348 | irq_flags, pc); | ||
| 349 | } | 348 | } |
| 350 | 349 | ||
| 351 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | 350 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
| @@ -369,7 +368,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
| 369 | if (!ftrace_file) | 368 | if (!ftrace_file) |
| 370 | return; | 369 | return; |
| 371 | 370 | ||
| 372 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | 371 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
| 373 | return; | 372 | return; |
| 374 | 373 | ||
| 375 | sys_data = syscall_nr_to_meta(syscall_nr); | 374 | sys_data = syscall_nr_to_meta(syscall_nr); |
| @@ -390,9 +389,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
| 390 | entry->nr = syscall_nr; | 389 | entry->nr = syscall_nr; |
| 391 | entry->ret = syscall_get_return_value(current, regs); | 390 | entry->ret = syscall_get_return_value(current, regs); |
| 392 | 391 | ||
| 393 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) | 392 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, |
| 394 | trace_current_buffer_unlock_commit(buffer, event, | 393 | irq_flags, pc); |
| 395 | irq_flags, pc); | ||
| 396 | } | 394 | } |
| 397 | 395 | ||
| 398 | static int reg_event_syscall_enter(struct ftrace_event_file *file, | 396 | static int reg_event_syscall_enter(struct ftrace_event_file *file, |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index b6dcc42ef7f5..79e52d93860b 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -51,22 +51,17 @@ struct trace_uprobe_filter { | |||
| 51 | */ | 51 | */ |
| 52 | struct trace_uprobe { | 52 | struct trace_uprobe { |
| 53 | struct list_head list; | 53 | struct list_head list; |
| 54 | struct ftrace_event_class class; | ||
| 55 | struct ftrace_event_call call; | ||
| 56 | struct trace_uprobe_filter filter; | 54 | struct trace_uprobe_filter filter; |
| 57 | struct uprobe_consumer consumer; | 55 | struct uprobe_consumer consumer; |
| 58 | struct inode *inode; | 56 | struct inode *inode; |
| 59 | char *filename; | 57 | char *filename; |
| 60 | unsigned long offset; | 58 | unsigned long offset; |
| 61 | unsigned long nhit; | 59 | unsigned long nhit; |
| 62 | unsigned int flags; /* For TP_FLAG_* */ | 60 | struct trace_probe tp; |
| 63 | ssize_t size; /* trace entry size */ | ||
| 64 | unsigned int nr_args; | ||
| 65 | struct probe_arg args[]; | ||
| 66 | }; | 61 | }; |
| 67 | 62 | ||
| 68 | #define SIZEOF_TRACE_UPROBE(n) \ | 63 | #define SIZEOF_TRACE_UPROBE(n) \ |
| 69 | (offsetof(struct trace_uprobe, args) + \ | 64 | (offsetof(struct trace_uprobe, tp.args) + \ |
| 70 | (sizeof(struct probe_arg) * (n))) | 65 | (sizeof(struct probe_arg) * (n))) |
| 71 | 66 | ||
| 72 | static int register_uprobe_event(struct trace_uprobe *tu); | 67 | static int register_uprobe_event(struct trace_uprobe *tu); |
| @@ -75,10 +70,151 @@ static int unregister_uprobe_event(struct trace_uprobe *tu); | |||
| 75 | static DEFINE_MUTEX(uprobe_lock); | 70 | static DEFINE_MUTEX(uprobe_lock); |
| 76 | static LIST_HEAD(uprobe_list); | 71 | static LIST_HEAD(uprobe_list); |
| 77 | 72 | ||
| 73 | struct uprobe_dispatch_data { | ||
| 74 | struct trace_uprobe *tu; | ||
| 75 | unsigned long bp_addr; | ||
| 76 | }; | ||
| 77 | |||
| 78 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); | 78 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); |
| 79 | static int uretprobe_dispatcher(struct uprobe_consumer *con, | 79 | static int uretprobe_dispatcher(struct uprobe_consumer *con, |
| 80 | unsigned long func, struct pt_regs *regs); | 80 | unsigned long func, struct pt_regs *regs); |
| 81 | 81 | ||
| 82 | #ifdef CONFIG_STACK_GROWSUP | ||
| 83 | static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) | ||
| 84 | { | ||
| 85 | return addr - (n * sizeof(long)); | ||
| 86 | } | ||
| 87 | #else | ||
| 88 | static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) | ||
| 89 | { | ||
| 90 | return addr + (n * sizeof(long)); | ||
| 91 | } | ||
| 92 | #endif | ||
| 93 | |||
| 94 | static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) | ||
| 95 | { | ||
| 96 | unsigned long ret; | ||
| 97 | unsigned long addr = user_stack_pointer(regs); | ||
| 98 | |||
| 99 | addr = adjust_stack_addr(addr, n); | ||
| 100 | |||
| 101 | if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) | ||
| 102 | return 0; | ||
| 103 | |||
| 104 | return ret; | ||
| 105 | } | ||
| 106 | |||
| 107 | /* | ||
| 108 | * Uprobes-specific fetch functions | ||
| 109 | */ | ||
| 110 | #define DEFINE_FETCH_stack(type) \ | ||
| 111 | static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ | ||
| 112 | void *offset, void *dest) \ | ||
| 113 | { \ | ||
| 114 | *(type *)dest = (type)get_user_stack_nth(regs, \ | ||
| 115 | ((unsigned long)offset)); \ | ||
| 116 | } | ||
| 117 | DEFINE_BASIC_FETCH_FUNCS(stack) | ||
| 118 | /* No string on the stack entry */ | ||
| 119 | #define fetch_stack_string NULL | ||
| 120 | #define fetch_stack_string_size NULL | ||
| 121 | |||
| 122 | #define DEFINE_FETCH_memory(type) \ | ||
| 123 | static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ | ||
| 124 | void *addr, void *dest) \ | ||
| 125 | { \ | ||
| 126 | type retval; \ | ||
| 127 | void __user *vaddr = (void __force __user *) addr; \ | ||
| 128 | \ | ||
| 129 | if (copy_from_user(&retval, vaddr, sizeof(type))) \ | ||
| 130 | *(type *)dest = 0; \ | ||
| 131 | else \ | ||
| 132 | *(type *) dest = retval; \ | ||
| 133 | } | ||
| 134 | DEFINE_BASIC_FETCH_FUNCS(memory) | ||
| 135 | /* | ||
| 136 | * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max | ||
| 137 | * length and relative data location. | ||
| 138 | */ | ||
| 139 | static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, | ||
| 140 | void *addr, void *dest) | ||
| 141 | { | ||
| 142 | long ret; | ||
| 143 | u32 rloc = *(u32 *)dest; | ||
| 144 | int maxlen = get_rloc_len(rloc); | ||
| 145 | u8 *dst = get_rloc_data(dest); | ||
| 146 | void __user *src = (void __force __user *) addr; | ||
| 147 | |||
| 148 | if (!maxlen) | ||
| 149 | return; | ||
| 150 | |||
| 151 | ret = strncpy_from_user(dst, src, maxlen); | ||
| 152 | |||
| 153 | if (ret < 0) { /* Failed to fetch string */ | ||
| 154 | ((u8 *)get_rloc_data(dest))[0] = '\0'; | ||
| 155 | *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc)); | ||
| 156 | } else { | ||
| 157 | *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc)); | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, | ||
| 162 | void *addr, void *dest) | ||
| 163 | { | ||
| 164 | int len; | ||
| 165 | void __user *vaddr = (void __force __user *) addr; | ||
| 166 | |||
| 167 | len = strnlen_user(vaddr, MAX_STRING_SIZE); | ||
| 168 | |||
| 169 | if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */ | ||
| 170 | *(u32 *)dest = 0; | ||
| 171 | else | ||
| 172 | *(u32 *)dest = len; | ||
| 173 | } | ||
| 174 | |||
| 175 | static unsigned long translate_user_vaddr(void *file_offset) | ||
| 176 | { | ||
| 177 | unsigned long base_addr; | ||
| 178 | struct uprobe_dispatch_data *udd; | ||
| 179 | |||
| 180 | udd = (void *) current->utask->vaddr; | ||
| 181 | |||
| 182 | base_addr = udd->bp_addr - udd->tu->offset; | ||
| 183 | return base_addr + (unsigned long)file_offset; | ||
| 184 | } | ||
| 185 | |||
| 186 | #define DEFINE_FETCH_file_offset(type) \ | ||
| 187 | static __kprobes void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,\ | ||
| 188 | void *offset, void *dest) \ | ||
| 189 | { \ | ||
| 190 | void *vaddr = (void *)translate_user_vaddr(offset); \ | ||
| 191 | \ | ||
| 192 | FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \ | ||
| 193 | } | ||
| 194 | DEFINE_BASIC_FETCH_FUNCS(file_offset) | ||
| 195 | DEFINE_FETCH_file_offset(string) | ||
| 196 | DEFINE_FETCH_file_offset(string_size) | ||
| 197 | |||
| 198 | /* Fetch type information table */ | ||
| 199 | const struct fetch_type uprobes_fetch_type_table[] = { | ||
| 200 | /* Special types */ | ||
| 201 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, | ||
| 202 | sizeof(u32), 1, "__data_loc char[]"), | ||
| 203 | [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, | ||
| 204 | string_size, sizeof(u32), 0, "u32"), | ||
| 205 | /* Basic types */ | ||
| 206 | ASSIGN_FETCH_TYPE(u8, u8, 0), | ||
| 207 | ASSIGN_FETCH_TYPE(u16, u16, 0), | ||
| 208 | ASSIGN_FETCH_TYPE(u32, u32, 0), | ||
| 209 | ASSIGN_FETCH_TYPE(u64, u64, 0), | ||
| 210 | ASSIGN_FETCH_TYPE(s8, u8, 1), | ||
| 211 | ASSIGN_FETCH_TYPE(s16, u16, 1), | ||
| 212 | ASSIGN_FETCH_TYPE(s32, u32, 1), | ||
| 213 | ASSIGN_FETCH_TYPE(s64, u64, 1), | ||
| 214 | |||
| 215 | ASSIGN_FETCH_TYPE_END | ||
| 216 | }; | ||
| 217 | |||
| 82 | static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) | 218 | static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) |
| 83 | { | 219 | { |
| 84 | rwlock_init(&filter->rwlock); | 220 | rwlock_init(&filter->rwlock); |
| @@ -114,13 +250,13 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
| 114 | if (!tu) | 250 | if (!tu) |
| 115 | return ERR_PTR(-ENOMEM); | 251 | return ERR_PTR(-ENOMEM); |
| 116 | 252 | ||
| 117 | tu->call.class = &tu->class; | 253 | tu->tp.call.class = &tu->tp.class; |
| 118 | tu->call.name = kstrdup(event, GFP_KERNEL); | 254 | tu->tp.call.name = kstrdup(event, GFP_KERNEL); |
| 119 | if (!tu->call.name) | 255 | if (!tu->tp.call.name) |
| 120 | goto error; | 256 | goto error; |
| 121 | 257 | ||
| 122 | tu->class.system = kstrdup(group, GFP_KERNEL); | 258 | tu->tp.class.system = kstrdup(group, GFP_KERNEL); |
| 123 | if (!tu->class.system) | 259 | if (!tu->tp.class.system) |
| 124 | goto error; | 260 | goto error; |
| 125 | 261 | ||
| 126 | INIT_LIST_HEAD(&tu->list); | 262 | INIT_LIST_HEAD(&tu->list); |
| @@ -128,11 +264,11 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
| 128 | if (is_ret) | 264 | if (is_ret) |
| 129 | tu->consumer.ret_handler = uretprobe_dispatcher; | 265 | tu->consumer.ret_handler = uretprobe_dispatcher; |
| 130 | init_trace_uprobe_filter(&tu->filter); | 266 | init_trace_uprobe_filter(&tu->filter); |
| 131 | tu->call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER; | 267 | tu->tp.call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER; |
| 132 | return tu; | 268 | return tu; |
| 133 | 269 | ||
| 134 | error: | 270 | error: |
| 135 | kfree(tu->call.name); | 271 | kfree(tu->tp.call.name); |
| 136 | kfree(tu); | 272 | kfree(tu); |
| 137 | 273 | ||
| 138 | return ERR_PTR(-ENOMEM); | 274 | return ERR_PTR(-ENOMEM); |
| @@ -142,12 +278,12 @@ static void free_trace_uprobe(struct trace_uprobe *tu) | |||
| 142 | { | 278 | { |
| 143 | int i; | 279 | int i; |
| 144 | 280 | ||
| 145 | for (i = 0; i < tu->nr_args; i++) | 281 | for (i = 0; i < tu->tp.nr_args; i++) |
| 146 | traceprobe_free_probe_arg(&tu->args[i]); | 282 | traceprobe_free_probe_arg(&tu->tp.args[i]); |
| 147 | 283 | ||
| 148 | iput(tu->inode); | 284 | iput(tu->inode); |
| 149 | kfree(tu->call.class->system); | 285 | kfree(tu->tp.call.class->system); |
| 150 | kfree(tu->call.name); | 286 | kfree(tu->tp.call.name); |
| 151 | kfree(tu->filename); | 287 | kfree(tu->filename); |
| 152 | kfree(tu); | 288 | kfree(tu); |
| 153 | } | 289 | } |
| @@ -157,8 +293,8 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
| 157 | struct trace_uprobe *tu; | 293 | struct trace_uprobe *tu; |
| 158 | 294 | ||
| 159 | list_for_each_entry(tu, &uprobe_list, list) | 295 | list_for_each_entry(tu, &uprobe_list, list) |
| 160 | if (strcmp(tu->call.name, event) == 0 && | 296 | if (strcmp(tu->tp.call.name, event) == 0 && |
| 161 | strcmp(tu->call.class->system, group) == 0) | 297 | strcmp(tu->tp.call.class->system, group) == 0) |
| 162 | return tu; | 298 | return tu; |
| 163 | 299 | ||
| 164 | return NULL; | 300 | return NULL; |
| @@ -181,16 +317,16 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu) | |||
| 181 | /* Register a trace_uprobe and probe_event */ | 317 | /* Register a trace_uprobe and probe_event */ |
| 182 | static int register_trace_uprobe(struct trace_uprobe *tu) | 318 | static int register_trace_uprobe(struct trace_uprobe *tu) |
| 183 | { | 319 | { |
| 184 | struct trace_uprobe *old_tp; | 320 | struct trace_uprobe *old_tu; |
| 185 | int ret; | 321 | int ret; |
| 186 | 322 | ||
| 187 | mutex_lock(&uprobe_lock); | 323 | mutex_lock(&uprobe_lock); |
| 188 | 324 | ||
| 189 | /* register as an event */ | 325 | /* register as an event */ |
| 190 | old_tp = find_probe_event(tu->call.name, tu->call.class->system); | 326 | old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); |
| 191 | if (old_tp) { | 327 | if (old_tu) { |
| 192 | /* delete old event */ | 328 | /* delete old event */ |
| 193 | ret = unregister_trace_uprobe(old_tp); | 329 | ret = unregister_trace_uprobe(old_tu); |
| 194 | if (ret) | 330 | if (ret) |
| 195 | goto end; | 331 | goto end; |
| 196 | } | 332 | } |
| @@ -211,7 +347,7 @@ end: | |||
| 211 | 347 | ||
| 212 | /* | 348 | /* |
| 213 | * Argument syntax: | 349 | * Argument syntax: |
| 214 | * - Add uprobe: p|r[:[GRP/]EVENT] PATH:SYMBOL [FETCHARGS] | 350 | * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] |
| 215 | * | 351 | * |
| 216 | * - Remove uprobe: -:[GRP/]EVENT | 352 | * - Remove uprobe: -:[GRP/]EVENT |
| 217 | */ | 353 | */ |
| @@ -360,34 +496,36 @@ static int create_trace_uprobe(int argc, char **argv) | |||
| 360 | /* parse arguments */ | 496 | /* parse arguments */ |
| 361 | ret = 0; | 497 | ret = 0; |
| 362 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { | 498 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
| 499 | struct probe_arg *parg = &tu->tp.args[i]; | ||
| 500 | |||
| 363 | /* Increment count for freeing args in error case */ | 501 | /* Increment count for freeing args in error case */ |
| 364 | tu->nr_args++; | 502 | tu->tp.nr_args++; |
| 365 | 503 | ||
| 366 | /* Parse argument name */ | 504 | /* Parse argument name */ |
| 367 | arg = strchr(argv[i], '='); | 505 | arg = strchr(argv[i], '='); |
| 368 | if (arg) { | 506 | if (arg) { |
| 369 | *arg++ = '\0'; | 507 | *arg++ = '\0'; |
| 370 | tu->args[i].name = kstrdup(argv[i], GFP_KERNEL); | 508 | parg->name = kstrdup(argv[i], GFP_KERNEL); |
| 371 | } else { | 509 | } else { |
| 372 | arg = argv[i]; | 510 | arg = argv[i]; |
| 373 | /* If argument name is omitted, set "argN" */ | 511 | /* If argument name is omitted, set "argN" */ |
| 374 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); | 512 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); |
| 375 | tu->args[i].name = kstrdup(buf, GFP_KERNEL); | 513 | parg->name = kstrdup(buf, GFP_KERNEL); |
| 376 | } | 514 | } |
| 377 | 515 | ||
| 378 | if (!tu->args[i].name) { | 516 | if (!parg->name) { |
| 379 | pr_info("Failed to allocate argument[%d] name.\n", i); | 517 | pr_info("Failed to allocate argument[%d] name.\n", i); |
| 380 | ret = -ENOMEM; | 518 | ret = -ENOMEM; |
| 381 | goto error; | 519 | goto error; |
| 382 | } | 520 | } |
| 383 | 521 | ||
| 384 | if (!is_good_name(tu->args[i].name)) { | 522 | if (!is_good_name(parg->name)) { |
| 385 | pr_info("Invalid argument[%d] name: %s\n", i, tu->args[i].name); | 523 | pr_info("Invalid argument[%d] name: %s\n", i, parg->name); |
| 386 | ret = -EINVAL; | 524 | ret = -EINVAL; |
| 387 | goto error; | 525 | goto error; |
| 388 | } | 526 | } |
| 389 | 527 | ||
| 390 | if (traceprobe_conflict_field_name(tu->args[i].name, tu->args, i)) { | 528 | if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) { |
| 391 | pr_info("Argument[%d] name '%s' conflicts with " | 529 | pr_info("Argument[%d] name '%s' conflicts with " |
| 392 | "another field.\n", i, argv[i]); | 530 | "another field.\n", i, argv[i]); |
| 393 | ret = -EINVAL; | 531 | ret = -EINVAL; |
| @@ -395,7 +533,8 @@ static int create_trace_uprobe(int argc, char **argv) | |||
| 395 | } | 533 | } |
| 396 | 534 | ||
| 397 | /* Parse fetch argument */ | 535 | /* Parse fetch argument */ |
| 398 | ret = traceprobe_parse_probe_arg(arg, &tu->size, &tu->args[i], false, false); | 536 | ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, |
| 537 | is_return, false); | ||
| 399 | if (ret) { | 538 | if (ret) { |
| 400 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); | 539 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
| 401 | goto error; | 540 | goto error; |
| @@ -459,11 +598,11 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 459 | char c = is_ret_probe(tu) ? 'r' : 'p'; | 598 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
| 460 | int i; | 599 | int i; |
| 461 | 600 | ||
| 462 | seq_printf(m, "%c:%s/%s", c, tu->call.class->system, tu->call.name); | 601 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); |
| 463 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 602 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
| 464 | 603 | ||
| 465 | for (i = 0; i < tu->nr_args; i++) | 604 | for (i = 0; i < tu->tp.nr_args; i++) |
| 466 | seq_printf(m, " %s=%s", tu->args[i].name, tu->args[i].comm); | 605 | seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); |
| 467 | 606 | ||
| 468 | seq_printf(m, "\n"); | 607 | seq_printf(m, "\n"); |
| 469 | return 0; | 608 | return 0; |
| @@ -509,7 +648,7 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
| 509 | { | 648 | { |
| 510 | struct trace_uprobe *tu = v; | 649 | struct trace_uprobe *tu = v; |
| 511 | 650 | ||
| 512 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->call.name, tu->nhit); | 651 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); |
| 513 | return 0; | 652 | return 0; |
| 514 | } | 653 | } |
| 515 | 654 | ||
| @@ -533,21 +672,117 @@ static const struct file_operations uprobe_profile_ops = { | |||
| 533 | .release = seq_release, | 672 | .release = seq_release, |
| 534 | }; | 673 | }; |
| 535 | 674 | ||
| 675 | struct uprobe_cpu_buffer { | ||
| 676 | struct mutex mutex; | ||
| 677 | void *buf; | ||
| 678 | }; | ||
| 679 | static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; | ||
| 680 | static int uprobe_buffer_refcnt; | ||
| 681 | |||
| 682 | static int uprobe_buffer_init(void) | ||
| 683 | { | ||
| 684 | int cpu, err_cpu; | ||
| 685 | |||
| 686 | uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); | ||
| 687 | if (uprobe_cpu_buffer == NULL) | ||
| 688 | return -ENOMEM; | ||
| 689 | |||
| 690 | for_each_possible_cpu(cpu) { | ||
| 691 | struct page *p = alloc_pages_node(cpu_to_node(cpu), | ||
| 692 | GFP_KERNEL, 0); | ||
| 693 | if (p == NULL) { | ||
| 694 | err_cpu = cpu; | ||
| 695 | goto err; | ||
| 696 | } | ||
| 697 | per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); | ||
| 698 | mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); | ||
| 699 | } | ||
| 700 | |||
| 701 | return 0; | ||
| 702 | |||
| 703 | err: | ||
| 704 | for_each_possible_cpu(cpu) { | ||
| 705 | if (cpu == err_cpu) | ||
| 706 | break; | ||
| 707 | free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); | ||
| 708 | } | ||
| 709 | |||
| 710 | free_percpu(uprobe_cpu_buffer); | ||
| 711 | return -ENOMEM; | ||
| 712 | } | ||
| 713 | |||
| 714 | static int uprobe_buffer_enable(void) | ||
| 715 | { | ||
| 716 | int ret = 0; | ||
| 717 | |||
| 718 | BUG_ON(!mutex_is_locked(&event_mutex)); | ||
| 719 | |||
| 720 | if (uprobe_buffer_refcnt++ == 0) { | ||
| 721 | ret = uprobe_buffer_init(); | ||
| 722 | if (ret < 0) | ||
| 723 | uprobe_buffer_refcnt--; | ||
| 724 | } | ||
| 725 | |||
| 726 | return ret; | ||
| 727 | } | ||
| 728 | |||
| 729 | static void uprobe_buffer_disable(void) | ||
| 730 | { | ||
| 731 | BUG_ON(!mutex_is_locked(&event_mutex)); | ||
| 732 | |||
| 733 | if (--uprobe_buffer_refcnt == 0) { | ||
| 734 | free_percpu(uprobe_cpu_buffer); | ||
| 735 | uprobe_cpu_buffer = NULL; | ||
| 736 | } | ||
| 737 | } | ||
| 738 | |||
| 739 | static struct uprobe_cpu_buffer *uprobe_buffer_get(void) | ||
| 740 | { | ||
| 741 | struct uprobe_cpu_buffer *ucb; | ||
| 742 | int cpu; | ||
| 743 | |||
| 744 | cpu = raw_smp_processor_id(); | ||
| 745 | ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); | ||
| 746 | |||
| 747 | /* | ||
| 748 | * Use per-cpu buffers for fastest access, but we might migrate | ||
| 749 | * so the mutex makes sure we have sole access to it. | ||
| 750 | */ | ||
| 751 | mutex_lock(&ucb->mutex); | ||
| 752 | |||
| 753 | return ucb; | ||
| 754 | } | ||
| 755 | |||
| 756 | static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | ||
| 757 | { | ||
| 758 | mutex_unlock(&ucb->mutex); | ||
| 759 | } | ||
| 760 | |||
| 536 | static void uprobe_trace_print(struct trace_uprobe *tu, | 761 | static void uprobe_trace_print(struct trace_uprobe *tu, |
| 537 | unsigned long func, struct pt_regs *regs) | 762 | unsigned long func, struct pt_regs *regs) |
| 538 | { | 763 | { |
| 539 | struct uprobe_trace_entry_head *entry; | 764 | struct uprobe_trace_entry_head *entry; |
| 540 | struct ring_buffer_event *event; | 765 | struct ring_buffer_event *event; |
| 541 | struct ring_buffer *buffer; | 766 | struct ring_buffer *buffer; |
| 767 | struct uprobe_cpu_buffer *ucb; | ||
| 542 | void *data; | 768 | void *data; |
| 543 | int size, i; | 769 | int size, dsize, esize; |
| 544 | struct ftrace_event_call *call = &tu->call; | 770 | struct ftrace_event_call *call = &tu->tp.call; |
| 771 | |||
| 772 | dsize = __get_data_size(&tu->tp, regs); | ||
| 773 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 545 | 774 | ||
| 546 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 775 | if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) |
| 776 | return; | ||
| 777 | |||
| 778 | ucb = uprobe_buffer_get(); | ||
| 779 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 780 | |||
| 781 | size = esize + tu->tp.size + dsize; | ||
| 547 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 782 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
| 548 | size + tu->size, 0, 0); | 783 | size, 0, 0); |
| 549 | if (!event) | 784 | if (!event) |
| 550 | return; | 785 | goto out; |
| 551 | 786 | ||
| 552 | entry = ring_buffer_event_data(event); | 787 | entry = ring_buffer_event_data(event); |
| 553 | if (is_ret_probe(tu)) { | 788 | if (is_ret_probe(tu)) { |
| @@ -559,11 +794,13 @@ static void uprobe_trace_print(struct trace_uprobe *tu, | |||
| 559 | data = DATAOF_TRACE_ENTRY(entry, false); | 794 | data = DATAOF_TRACE_ENTRY(entry, false); |
| 560 | } | 795 | } |
| 561 | 796 | ||
| 562 | for (i = 0; i < tu->nr_args; i++) | 797 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
| 563 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); | ||
| 564 | 798 | ||
| 565 | if (!call_filter_check_discard(call, entry, buffer, event)) | 799 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 566 | trace_buffer_unlock_commit(buffer, event, 0, 0); | 800 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 801 | |||
| 802 | out: | ||
| 803 | uprobe_buffer_put(ucb); | ||
| 567 | } | 804 | } |
| 568 | 805 | ||
| 569 | /* uprobe handler */ | 806 | /* uprobe handler */ |
| @@ -591,23 +828,24 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
| 591 | int i; | 828 | int i; |
| 592 | 829 | ||
| 593 | entry = (struct uprobe_trace_entry_head *)iter->ent; | 830 | entry = (struct uprobe_trace_entry_head *)iter->ent; |
| 594 | tu = container_of(event, struct trace_uprobe, call.event); | 831 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
| 595 | 832 | ||
| 596 | if (is_ret_probe(tu)) { | 833 | if (is_ret_probe(tu)) { |
| 597 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->call.name, | 834 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, |
| 598 | entry->vaddr[1], entry->vaddr[0])) | 835 | entry->vaddr[1], entry->vaddr[0])) |
| 599 | goto partial; | 836 | goto partial; |
| 600 | data = DATAOF_TRACE_ENTRY(entry, true); | 837 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 601 | } else { | 838 | } else { |
| 602 | if (!trace_seq_printf(s, "%s: (0x%lx)", tu->call.name, | 839 | if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, |
| 603 | entry->vaddr[0])) | 840 | entry->vaddr[0])) |
| 604 | goto partial; | 841 | goto partial; |
| 605 | data = DATAOF_TRACE_ENTRY(entry, false); | 842 | data = DATAOF_TRACE_ENTRY(entry, false); |
| 606 | } | 843 | } |
| 607 | 844 | ||
| 608 | for (i = 0; i < tu->nr_args; i++) { | 845 | for (i = 0; i < tu->tp.nr_args; i++) { |
| 609 | if (!tu->args[i].type->print(s, tu->args[i].name, | 846 | struct probe_arg *parg = &tu->tp.args[i]; |
| 610 | data + tu->args[i].offset, entry)) | 847 | |
| 848 | if (!parg->type->print(s, parg->name, data + parg->offset, entry)) | ||
| 611 | goto partial; | 849 | goto partial; |
| 612 | } | 850 | } |
| 613 | 851 | ||
| @@ -618,11 +856,6 @@ partial: | |||
| 618 | return TRACE_TYPE_PARTIAL_LINE; | 856 | return TRACE_TYPE_PARTIAL_LINE; |
| 619 | } | 857 | } |
| 620 | 858 | ||
| 621 | static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu) | ||
| 622 | { | ||
| 623 | return tu->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE); | ||
| 624 | } | ||
| 625 | |||
| 626 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, | 859 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, |
| 627 | enum uprobe_filter_ctx ctx, | 860 | enum uprobe_filter_ctx ctx, |
| 628 | struct mm_struct *mm); | 861 | struct mm_struct *mm); |
| @@ -632,29 +865,35 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | |||
| 632 | { | 865 | { |
| 633 | int ret = 0; | 866 | int ret = 0; |
| 634 | 867 | ||
| 635 | if (is_trace_uprobe_enabled(tu)) | 868 | if (trace_probe_is_enabled(&tu->tp)) |
| 636 | return -EINTR; | 869 | return -EINTR; |
| 637 | 870 | ||
| 871 | ret = uprobe_buffer_enable(); | ||
| 872 | if (ret < 0) | ||
| 873 | return ret; | ||
| 874 | |||
| 638 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 875 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 639 | 876 | ||
| 640 | tu->flags |= flag; | 877 | tu->tp.flags |= flag; |
| 641 | tu->consumer.filter = filter; | 878 | tu->consumer.filter = filter; |
| 642 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 879 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
| 643 | if (ret) | 880 | if (ret) |
| 644 | tu->flags &= ~flag; | 881 | tu->tp.flags &= ~flag; |
| 645 | 882 | ||
| 646 | return ret; | 883 | return ret; |
| 647 | } | 884 | } |
| 648 | 885 | ||
| 649 | static void probe_event_disable(struct trace_uprobe *tu, int flag) | 886 | static void probe_event_disable(struct trace_uprobe *tu, int flag) |
| 650 | { | 887 | { |
| 651 | if (!is_trace_uprobe_enabled(tu)) | 888 | if (!trace_probe_is_enabled(&tu->tp)) |
| 652 | return; | 889 | return; |
| 653 | 890 | ||
| 654 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 891 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 655 | 892 | ||
| 656 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | 893 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
| 657 | tu->flags &= ~flag; | 894 | tu->tp.flags &= ~flag; |
| 895 | |||
| 896 | uprobe_buffer_disable(); | ||
| 658 | } | 897 | } |
| 659 | 898 | ||
| 660 | static int uprobe_event_define_fields(struct ftrace_event_call *event_call) | 899 | static int uprobe_event_define_fields(struct ftrace_event_call *event_call) |
| @@ -672,12 +911,12 @@ static int uprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 672 | size = SIZEOF_TRACE_ENTRY(false); | 911 | size = SIZEOF_TRACE_ENTRY(false); |
| 673 | } | 912 | } |
| 674 | /* Set argument names as fields */ | 913 | /* Set argument names as fields */ |
| 675 | for (i = 0; i < tu->nr_args; i++) { | 914 | for (i = 0; i < tu->tp.nr_args; i++) { |
| 676 | ret = trace_define_field(event_call, tu->args[i].type->fmttype, | 915 | struct probe_arg *parg = &tu->tp.args[i]; |
| 677 | tu->args[i].name, | 916 | |
| 678 | size + tu->args[i].offset, | 917 | ret = trace_define_field(event_call, parg->type->fmttype, |
| 679 | tu->args[i].type->size, | 918 | parg->name, size + parg->offset, |
| 680 | tu->args[i].type->is_signed, | 919 | parg->type->size, parg->type->is_signed, |
| 681 | FILTER_OTHER); | 920 | FILTER_OTHER); |
| 682 | 921 | ||
| 683 | if (ret) | 922 | if (ret) |
| @@ -686,59 +925,6 @@ static int uprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 686 | return 0; | 925 | return 0; |
| 687 | } | 926 | } |
| 688 | 927 | ||
| 689 | #define LEN_OR_ZERO (len ? len - pos : 0) | ||
| 690 | static int __set_print_fmt(struct trace_uprobe *tu, char *buf, int len) | ||
| 691 | { | ||
| 692 | const char *fmt, *arg; | ||
| 693 | int i; | ||
| 694 | int pos = 0; | ||
| 695 | |||
| 696 | if (is_ret_probe(tu)) { | ||
| 697 | fmt = "(%lx <- %lx)"; | ||
| 698 | arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP; | ||
| 699 | } else { | ||
| 700 | fmt = "(%lx)"; | ||
| 701 | arg = "REC->" FIELD_STRING_IP; | ||
| 702 | } | ||
| 703 | |||
| 704 | /* When len=0, we just calculate the needed length */ | ||
| 705 | |||
| 706 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); | ||
| 707 | |||
| 708 | for (i = 0; i < tu->nr_args; i++) { | ||
| 709 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", | ||
| 710 | tu->args[i].name, tu->args[i].type->fmt); | ||
| 711 | } | ||
| 712 | |||
| 713 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); | ||
| 714 | |||
| 715 | for (i = 0; i < tu->nr_args; i++) { | ||
| 716 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | ||
| 717 | tu->args[i].name); | ||
| 718 | } | ||
| 719 | |||
| 720 | return pos; /* return the length of print_fmt */ | ||
| 721 | } | ||
| 722 | #undef LEN_OR_ZERO | ||
| 723 | |||
| 724 | static int set_print_fmt(struct trace_uprobe *tu) | ||
| 725 | { | ||
| 726 | char *print_fmt; | ||
| 727 | int len; | ||
| 728 | |||
| 729 | /* First: called with 0 length to calculate the needed length */ | ||
| 730 | len = __set_print_fmt(tu, NULL, 0); | ||
| 731 | print_fmt = kmalloc(len + 1, GFP_KERNEL); | ||
| 732 | if (!print_fmt) | ||
| 733 | return -ENOMEM; | ||
| 734 | |||
| 735 | /* Second: actually write the @print_fmt */ | ||
| 736 | __set_print_fmt(tu, print_fmt, len + 1); | ||
| 737 | tu->call.print_fmt = print_fmt; | ||
| 738 | |||
| 739 | return 0; | ||
| 740 | } | ||
| 741 | |||
| 742 | #ifdef CONFIG_PERF_EVENTS | 928 | #ifdef CONFIG_PERF_EVENTS |
| 743 | static bool | 929 | static bool |
| 744 | __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) | 930 | __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) |
| @@ -831,14 +1017,27 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, | |||
| 831 | static void uprobe_perf_print(struct trace_uprobe *tu, | 1017 | static void uprobe_perf_print(struct trace_uprobe *tu, |
| 832 | unsigned long func, struct pt_regs *regs) | 1018 | unsigned long func, struct pt_regs *regs) |
| 833 | { | 1019 | { |
| 834 | struct ftrace_event_call *call = &tu->call; | 1020 | struct ftrace_event_call *call = &tu->tp.call; |
| 835 | struct uprobe_trace_entry_head *entry; | 1021 | struct uprobe_trace_entry_head *entry; |
| 836 | struct hlist_head *head; | 1022 | struct hlist_head *head; |
| 1023 | struct uprobe_cpu_buffer *ucb; | ||
| 837 | void *data; | 1024 | void *data; |
| 838 | int size, rctx, i; | 1025 | int size, dsize, esize; |
| 1026 | int rctx; | ||
| 839 | 1027 | ||
| 840 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 1028 | dsize = __get_data_size(&tu->tp, regs); |
| 841 | size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 1029 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 1030 | |||
| 1031 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1032 | return; | ||
| 1033 | |||
| 1034 | size = esize + tu->tp.size + dsize; | ||
| 1035 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); | ||
| 1036 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | ||
| 1037 | return; | ||
| 1038 | |||
| 1039 | ucb = uprobe_buffer_get(); | ||
| 1040 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 842 | 1041 | ||
| 843 | preempt_disable(); | 1042 | preempt_disable(); |
| 844 | head = this_cpu_ptr(call->perf_events); | 1043 | head = this_cpu_ptr(call->perf_events); |
| @@ -858,12 +1057,18 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
| 858 | data = DATAOF_TRACE_ENTRY(entry, false); | 1057 | data = DATAOF_TRACE_ENTRY(entry, false); |
| 859 | } | 1058 | } |
| 860 | 1059 | ||
| 861 | for (i = 0; i < tu->nr_args; i++) | 1060 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
| 862 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); | 1061 | |
| 1062 | if (size - esize > tu->tp.size + dsize) { | ||
| 1063 | int len = tu->tp.size + dsize; | ||
| 1064 | |||
| 1065 | memset(data + len, 0, size - esize - len); | ||
| 1066 | } | ||
| 863 | 1067 | ||
| 864 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1068 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 865 | out: | 1069 | out: |
| 866 | preempt_enable(); | 1070 | preempt_enable(); |
| 1071 | uprobe_buffer_put(ucb); | ||
| 867 | } | 1072 | } |
| 868 | 1073 | ||
| 869 | /* uprobe profile handler */ | 1074 | /* uprobe profile handler */ |
| @@ -921,16 +1126,22 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, | |||
| 921 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | 1126 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) |
| 922 | { | 1127 | { |
| 923 | struct trace_uprobe *tu; | 1128 | struct trace_uprobe *tu; |
| 1129 | struct uprobe_dispatch_data udd; | ||
| 924 | int ret = 0; | 1130 | int ret = 0; |
| 925 | 1131 | ||
| 926 | tu = container_of(con, struct trace_uprobe, consumer); | 1132 | tu = container_of(con, struct trace_uprobe, consumer); |
| 927 | tu->nhit++; | 1133 | tu->nhit++; |
| 928 | 1134 | ||
| 929 | if (tu->flags & TP_FLAG_TRACE) | 1135 | udd.tu = tu; |
| 1136 | udd.bp_addr = instruction_pointer(regs); | ||
| 1137 | |||
| 1138 | current->utask->vaddr = (unsigned long) &udd; | ||
| 1139 | |||
| 1140 | if (tu->tp.flags & TP_FLAG_TRACE) | ||
| 930 | ret |= uprobe_trace_func(tu, regs); | 1141 | ret |= uprobe_trace_func(tu, regs); |
| 931 | 1142 | ||
| 932 | #ifdef CONFIG_PERF_EVENTS | 1143 | #ifdef CONFIG_PERF_EVENTS |
| 933 | if (tu->flags & TP_FLAG_PROFILE) | 1144 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 934 | ret |= uprobe_perf_func(tu, regs); | 1145 | ret |= uprobe_perf_func(tu, regs); |
| 935 | #endif | 1146 | #endif |
| 936 | return ret; | 1147 | return ret; |
| @@ -940,14 +1151,20 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 940 | unsigned long func, struct pt_regs *regs) | 1151 | unsigned long func, struct pt_regs *regs) |
| 941 | { | 1152 | { |
| 942 | struct trace_uprobe *tu; | 1153 | struct trace_uprobe *tu; |
| 1154 | struct uprobe_dispatch_data udd; | ||
| 943 | 1155 | ||
| 944 | tu = container_of(con, struct trace_uprobe, consumer); | 1156 | tu = container_of(con, struct trace_uprobe, consumer); |
| 945 | 1157 | ||
| 946 | if (tu->flags & TP_FLAG_TRACE) | 1158 | udd.tu = tu; |
| 1159 | udd.bp_addr = func; | ||
| 1160 | |||
| 1161 | current->utask->vaddr = (unsigned long) &udd; | ||
| 1162 | |||
| 1163 | if (tu->tp.flags & TP_FLAG_TRACE) | ||
| 947 | uretprobe_trace_func(tu, func, regs); | 1164 | uretprobe_trace_func(tu, func, regs); |
| 948 | 1165 | ||
| 949 | #ifdef CONFIG_PERF_EVENTS | 1166 | #ifdef CONFIG_PERF_EVENTS |
| 950 | if (tu->flags & TP_FLAG_PROFILE) | 1167 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 951 | uretprobe_perf_func(tu, func, regs); | 1168 | uretprobe_perf_func(tu, func, regs); |
| 952 | #endif | 1169 | #endif |
| 953 | return 0; | 1170 | return 0; |
| @@ -959,7 +1176,7 @@ static struct trace_event_functions uprobe_funcs = { | |||
| 959 | 1176 | ||
| 960 | static int register_uprobe_event(struct trace_uprobe *tu) | 1177 | static int register_uprobe_event(struct trace_uprobe *tu) |
| 961 | { | 1178 | { |
| 962 | struct ftrace_event_call *call = &tu->call; | 1179 | struct ftrace_event_call *call = &tu->tp.call; |
| 963 | int ret; | 1180 | int ret; |
| 964 | 1181 | ||
| 965 | /* Initialize ftrace_event_call */ | 1182 | /* Initialize ftrace_event_call */ |
| @@ -967,7 +1184,7 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
| 967 | call->event.funcs = &uprobe_funcs; | 1184 | call->event.funcs = &uprobe_funcs; |
| 968 | call->class->define_fields = uprobe_event_define_fields; | 1185 | call->class->define_fields = uprobe_event_define_fields; |
| 969 | 1186 | ||
| 970 | if (set_print_fmt(tu) < 0) | 1187 | if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) |
| 971 | return -ENOMEM; | 1188 | return -ENOMEM; |
| 972 | 1189 | ||
| 973 | ret = register_ftrace_event(&call->event); | 1190 | ret = register_ftrace_event(&call->event); |
| @@ -994,11 +1211,11 @@ static int unregister_uprobe_event(struct trace_uprobe *tu) | |||
| 994 | int ret; | 1211 | int ret; |
| 995 | 1212 | ||
| 996 | /* tu->event is unregistered in trace_remove_event_call() */ | 1213 | /* tu->event is unregistered in trace_remove_event_call() */ |
| 997 | ret = trace_remove_event_call(&tu->call); | 1214 | ret = trace_remove_event_call(&tu->tp.call); |
| 998 | if (ret) | 1215 | if (ret) |
| 999 | return ret; | 1216 | return ret; |
| 1000 | kfree(tu->call.print_fmt); | 1217 | kfree(tu->tp.call.print_fmt); |
| 1001 | tu->call.print_fmt = NULL; | 1218 | tu->tp.call.print_fmt = NULL; |
| 1002 | return 0; | 1219 | return 0; |
| 1003 | } | 1220 | } |
| 1004 | 1221 | ||
