diff options
| author | Mauro Carvalho Chehab <m.chehab@samsung.com> | 2014-04-14 11:00:36 -0400 |
|---|---|---|
| committer | Mauro Carvalho Chehab <m.chehab@samsung.com> | 2014-04-14 11:00:36 -0400 |
| commit | 277a163c83d7ba93fba1e8980d29a9f8bfcfba6c (patch) | |
| tree | ccfd357d152292958957b6b8a993892e7a8cc95f /kernel/trace | |
| parent | a83b93a7480441a47856dc9104bea970e84cda87 (diff) | |
| parent | c9eaa447e77efe77b7fa4c953bd62de8297fd6c5 (diff) | |
Merge tag 'v3.15-rc1' into patchwork
Linux 3.15-rc1
* tag 'v3.15-rc1': (12180 commits)
Linux 3.15-rc1
mm: Initialize error in shmem_file_aio_read()
cifs: Use min_t() when comparing "size_t" and "unsigned long"
sym53c8xx_2: Set DID_REQUEUE return code when aborting squeue
powerpc: Don't try to set LPCR unless we're in hypervisor mode
futex: update documentation for ordering guarantees
ceph: fix pr_fmt() redefinition
vti: don't allow to add the same tunnel twice
gre: don't allow to add the same tunnel twice
drivers: net: xen-netfront: fix array initialization bug
missing bits of "splice: fix racy pipe->buffers uses"
cifs: fix the race in cifs_writev()
ceph_sync_{,direct_}write: fix an oops on ceph_osdc_new_request() failure
pktgen: be friendly to LLTX devices
r8152: check RTL8152_UNPLUG
net: sun4i-emac: add promiscuous support
net/apne: replace IS_ERR and PTR_ERR with PTR_ERR_OR_ZERO
blackfin: cleanup board files
bf609: clock: drop unused clock bit set/clear functions
Blackfin: bf537: rename "CONFIG_ADT75"
...
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Kconfig | 1 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 23 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 162 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 19 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer_benchmark.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 224 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 41 | ||||
| -rw-r--r-- | kernel/trace/trace_event_perf.c | 22 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 101 | ||||
| -rw-r--r-- | kernel/trace/trace_events_trigger.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_export.c | 13 | ||||
| -rw-r--r-- | kernel/trace/trace_functions.c | 143 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 14 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 38 | ||||
| -rw-r--r-- | kernel/trace/trace_nop.c | 5 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 33 | ||||
| -rw-r--r-- | kernel/trace/trace_probe.h | 17 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 211 |
21 files changed, 758 insertions, 333 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 015f85aaca08..8639819f6cef 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -424,6 +424,7 @@ config UPROBE_EVENT | |||
| 424 | bool "Enable uprobes-based dynamic events" | 424 | bool "Enable uprobes-based dynamic events" |
| 425 | depends on ARCH_SUPPORTS_UPROBES | 425 | depends on ARCH_SUPPORTS_UPROBES |
| 426 | depends on MMU | 426 | depends on MMU |
| 427 | depends on PERF_EVENTS | ||
| 427 | select UPROBES | 428 | select UPROBES |
| 428 | select PROBE_EVENTS | 429 | select PROBE_EVENTS |
| 429 | select TRACING | 430 | select TRACING |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b418cb0d7242..c1bd4ada2a04 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q) | |||
| 702 | * blk_add_trace_rq - Add a trace for a request oriented action | 702 | * blk_add_trace_rq - Add a trace for a request oriented action |
| 703 | * @q: queue the io is for | 703 | * @q: queue the io is for |
| 704 | * @rq: the source request | 704 | * @rq: the source request |
| 705 | * @nr_bytes: number of completed bytes | ||
| 705 | * @what: the action | 706 | * @what: the action |
| 706 | * | 707 | * |
| 707 | * Description: | 708 | * Description: |
| @@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q) | |||
| 709 | * | 710 | * |
| 710 | **/ | 711 | **/ |
| 711 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | 712 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, |
| 712 | u32 what) | 713 | unsigned int nr_bytes, u32 what) |
| 713 | { | 714 | { |
| 714 | struct blk_trace *bt = q->blk_trace; | 715 | struct blk_trace *bt = q->blk_trace; |
| 715 | 716 | ||
| @@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
| 718 | 719 | ||
| 719 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 720 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
| 720 | what |= BLK_TC_ACT(BLK_TC_PC); | 721 | what |= BLK_TC_ACT(BLK_TC_PC); |
| 721 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags, | 722 | __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, |
| 722 | what, rq->errors, rq->cmd_len, rq->cmd); | 723 | what, rq->errors, rq->cmd_len, rq->cmd); |
| 723 | } else { | 724 | } else { |
| 724 | what |= BLK_TC_ACT(BLK_TC_FS); | 725 | what |= BLK_TC_ACT(BLK_TC_FS); |
| 725 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | 726 | __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, |
| 726 | rq->cmd_flags, what, rq->errors, 0, NULL); | 727 | rq->cmd_flags, what, rq->errors, 0, NULL); |
| 727 | } | 728 | } |
| 728 | } | 729 | } |
| @@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
| 730 | static void blk_add_trace_rq_abort(void *ignore, | 731 | static void blk_add_trace_rq_abort(void *ignore, |
| 731 | struct request_queue *q, struct request *rq) | 732 | struct request_queue *q, struct request *rq) |
| 732 | { | 733 | { |
| 733 | blk_add_trace_rq(q, rq, BLK_TA_ABORT); | 734 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT); |
| 734 | } | 735 | } |
| 735 | 736 | ||
| 736 | static void blk_add_trace_rq_insert(void *ignore, | 737 | static void blk_add_trace_rq_insert(void *ignore, |
| 737 | struct request_queue *q, struct request *rq) | 738 | struct request_queue *q, struct request *rq) |
| 738 | { | 739 | { |
| 739 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | 740 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT); |
| 740 | } | 741 | } |
| 741 | 742 | ||
| 742 | static void blk_add_trace_rq_issue(void *ignore, | 743 | static void blk_add_trace_rq_issue(void *ignore, |
| 743 | struct request_queue *q, struct request *rq) | 744 | struct request_queue *q, struct request *rq) |
| 744 | { | 745 | { |
| 745 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 746 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE); |
| 746 | } | 747 | } |
| 747 | 748 | ||
| 748 | static void blk_add_trace_rq_requeue(void *ignore, | 749 | static void blk_add_trace_rq_requeue(void *ignore, |
| 749 | struct request_queue *q, | 750 | struct request_queue *q, |
| 750 | struct request *rq) | 751 | struct request *rq) |
| 751 | { | 752 | { |
| 752 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 753 | blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE); |
| 753 | } | 754 | } |
| 754 | 755 | ||
| 755 | static void blk_add_trace_rq_complete(void *ignore, | 756 | static void blk_add_trace_rq_complete(void *ignore, |
| 756 | struct request_queue *q, | 757 | struct request_queue *q, |
| 757 | struct request *rq) | 758 | struct request *rq, |
| 759 | unsigned int nr_bytes) | ||
| 758 | { | 760 | { |
| 759 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | 761 | blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE); |
| 760 | } | 762 | } |
| 761 | 763 | ||
| 762 | /** | 764 | /** |
| @@ -1427,7 +1429,8 @@ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) | |||
| 1427 | return print_one_line(iter, true); | 1429 | return print_one_line(iter, true); |
| 1428 | } | 1430 | } |
| 1429 | 1431 | ||
| 1430 | static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) | 1432 | static int |
| 1433 | blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 1431 | { | 1434 | { |
| 1432 | /* don't output context-info for blk_classic output */ | 1435 | /* don't output context-info for blk_classic output */ |
| 1433 | if (bit == TRACE_BLK_OPT_CLASSIC) { | 1436 | if (bit == TRACE_BLK_OPT_CLASSIC) { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index cd7f76d1eb86..1fd4b9479210 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -237,14 +237,13 @@ static int control_ops_alloc(struct ftrace_ops *ops) | |||
| 237 | return 0; | 237 | return 0; |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | static void control_ops_free(struct ftrace_ops *ops) | ||
| 241 | { | ||
| 242 | free_percpu(ops->disabled); | ||
| 243 | } | ||
| 244 | |||
| 245 | static void update_global_ops(void) | 240 | static void update_global_ops(void) |
| 246 | { | 241 | { |
| 247 | ftrace_func_t func; | 242 | ftrace_func_t func = ftrace_global_list_func; |
| 243 | void *private = NULL; | ||
| 244 | |||
| 245 | /* The list has its own recursion protection. */ | ||
| 246 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 248 | 247 | ||
| 249 | /* | 248 | /* |
| 250 | * If there's only one function registered, then call that | 249 | * If there's only one function registered, then call that |
| @@ -254,23 +253,17 @@ static void update_global_ops(void) | |||
| 254 | if (ftrace_global_list == &ftrace_list_end || | 253 | if (ftrace_global_list == &ftrace_list_end || |
| 255 | ftrace_global_list->next == &ftrace_list_end) { | 254 | ftrace_global_list->next == &ftrace_list_end) { |
| 256 | func = ftrace_global_list->func; | 255 | func = ftrace_global_list->func; |
| 256 | private = ftrace_global_list->private; | ||
| 257 | /* | 257 | /* |
| 258 | * As we are calling the function directly. | 258 | * As we are calling the function directly. |
| 259 | * If it does not have recursion protection, | 259 | * If it does not have recursion protection, |
| 260 | * the function_trace_op needs to be updated | 260 | * the function_trace_op needs to be updated |
| 261 | * accordingly. | 261 | * accordingly. |
| 262 | */ | 262 | */ |
| 263 | if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) | 263 | if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)) |
| 264 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 265 | else | ||
| 266 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; | 264 | global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; |
| 267 | } else { | ||
| 268 | func = ftrace_global_list_func; | ||
| 269 | /* The list has its own recursion protection. */ | ||
| 270 | global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 271 | } | 265 | } |
| 272 | 266 | ||
| 273 | |||
| 274 | /* If we filter on pids, update to use the pid function */ | 267 | /* If we filter on pids, update to use the pid function */ |
| 275 | if (!list_empty(&ftrace_pids)) { | 268 | if (!list_empty(&ftrace_pids)) { |
| 276 | set_ftrace_pid_function(func); | 269 | set_ftrace_pid_function(func); |
| @@ -278,6 +271,7 @@ static void update_global_ops(void) | |||
| 278 | } | 271 | } |
| 279 | 272 | ||
| 280 | global_ops.func = func; | 273 | global_ops.func = func; |
| 274 | global_ops.private = private; | ||
| 281 | } | 275 | } |
| 282 | 276 | ||
| 283 | static void ftrace_sync(struct work_struct *work) | 277 | static void ftrace_sync(struct work_struct *work) |
| @@ -437,6 +431,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, | |||
| 437 | 431 | ||
| 438 | static int __register_ftrace_function(struct ftrace_ops *ops) | 432 | static int __register_ftrace_function(struct ftrace_ops *ops) |
| 439 | { | 433 | { |
| 434 | if (ops->flags & FTRACE_OPS_FL_DELETED) | ||
| 435 | return -EINVAL; | ||
| 436 | |||
| 440 | if (FTRACE_WARN_ON(ops == &global_ops)) | 437 | if (FTRACE_WARN_ON(ops == &global_ops)) |
| 441 | return -EINVAL; | 438 | return -EINVAL; |
| 442 | 439 | ||
| @@ -1172,8 +1169,6 @@ struct ftrace_page { | |||
| 1172 | int size; | 1169 | int size; |
| 1173 | }; | 1170 | }; |
| 1174 | 1171 | ||
| 1175 | static struct ftrace_page *ftrace_new_pgs; | ||
| 1176 | |||
| 1177 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) | 1172 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
| 1178 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) | 1173 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
| 1179 | 1174 | ||
| @@ -1560,7 +1555,7 @@ unsigned long ftrace_location(unsigned long ip) | |||
| 1560 | * the function tracer. It checks the ftrace internal tables to | 1555 | * the function tracer. It checks the ftrace internal tables to |
| 1561 | * determine if the address belongs or not. | 1556 | * determine if the address belongs or not. |
| 1562 | */ | 1557 | */ |
| 1563 | int ftrace_text_reserved(void *start, void *end) | 1558 | int ftrace_text_reserved(const void *start, const void *end) |
| 1564 | { | 1559 | { |
| 1565 | unsigned long ret; | 1560 | unsigned long ret; |
| 1566 | 1561 | ||
| @@ -1994,6 +1989,7 @@ int __weak ftrace_arch_code_modify_post_process(void) | |||
| 1994 | void ftrace_modify_all_code(int command) | 1989 | void ftrace_modify_all_code(int command) |
| 1995 | { | 1990 | { |
| 1996 | int update = command & FTRACE_UPDATE_TRACE_FUNC; | 1991 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
| 1992 | int err = 0; | ||
| 1997 | 1993 | ||
| 1998 | /* | 1994 | /* |
| 1999 | * If the ftrace_caller calls a ftrace_ops func directly, | 1995 | * If the ftrace_caller calls a ftrace_ops func directly, |
| @@ -2005,8 +2001,11 @@ void ftrace_modify_all_code(int command) | |||
| 2005 | * to make sure the ops are having the right functions | 2001 | * to make sure the ops are having the right functions |
| 2006 | * traced. | 2002 | * traced. |
| 2007 | */ | 2003 | */ |
| 2008 | if (update) | 2004 | if (update) { |
| 2009 | ftrace_update_ftrace_func(ftrace_ops_list_func); | 2005 | err = ftrace_update_ftrace_func(ftrace_ops_list_func); |
| 2006 | if (FTRACE_WARN_ON(err)) | ||
| 2007 | return; | ||
| 2008 | } | ||
| 2010 | 2009 | ||
| 2011 | if (command & FTRACE_UPDATE_CALLS) | 2010 | if (command & FTRACE_UPDATE_CALLS) |
| 2012 | ftrace_replace_code(1); | 2011 | ftrace_replace_code(1); |
| @@ -2019,13 +2018,16 @@ void ftrace_modify_all_code(int command) | |||
| 2019 | /* If irqs are disabled, we are in stop machine */ | 2018 | /* If irqs are disabled, we are in stop machine */ |
| 2020 | if (!irqs_disabled()) | 2019 | if (!irqs_disabled()) |
| 2021 | smp_call_function(ftrace_sync_ipi, NULL, 1); | 2020 | smp_call_function(ftrace_sync_ipi, NULL, 1); |
| 2022 | ftrace_update_ftrace_func(ftrace_trace_function); | 2021 | err = ftrace_update_ftrace_func(ftrace_trace_function); |
| 2022 | if (FTRACE_WARN_ON(err)) | ||
| 2023 | return; | ||
| 2023 | } | 2024 | } |
| 2024 | 2025 | ||
| 2025 | if (command & FTRACE_START_FUNC_RET) | 2026 | if (command & FTRACE_START_FUNC_RET) |
| 2026 | ftrace_enable_ftrace_graph_caller(); | 2027 | err = ftrace_enable_ftrace_graph_caller(); |
| 2027 | else if (command & FTRACE_STOP_FUNC_RET) | 2028 | else if (command & FTRACE_STOP_FUNC_RET) |
| 2028 | ftrace_disable_ftrace_graph_caller(); | 2029 | err = ftrace_disable_ftrace_graph_caller(); |
| 2030 | FTRACE_WARN_ON(err); | ||
| 2029 | } | 2031 | } |
| 2030 | 2032 | ||
| 2031 | static int __ftrace_modify_code(void *data) | 2033 | static int __ftrace_modify_code(void *data) |
| @@ -2093,6 +2095,11 @@ static ftrace_func_t saved_ftrace_func; | |||
| 2093 | static int ftrace_start_up; | 2095 | static int ftrace_start_up; |
| 2094 | static int global_start_up; | 2096 | static int global_start_up; |
| 2095 | 2097 | ||
| 2098 | static void control_ops_free(struct ftrace_ops *ops) | ||
| 2099 | { | ||
| 2100 | free_percpu(ops->disabled); | ||
| 2101 | } | ||
| 2102 | |||
| 2096 | static void ftrace_startup_enable(int command) | 2103 | static void ftrace_startup_enable(int command) |
| 2097 | { | 2104 | { |
| 2098 | if (saved_ftrace_func != ftrace_trace_function) { | 2105 | if (saved_ftrace_func != ftrace_trace_function) { |
| @@ -2244,7 +2251,6 @@ static void ftrace_shutdown_sysctl(void) | |||
| 2244 | } | 2251 | } |
| 2245 | 2252 | ||
| 2246 | static cycle_t ftrace_update_time; | 2253 | static cycle_t ftrace_update_time; |
| 2247 | static unsigned long ftrace_update_cnt; | ||
| 2248 | unsigned long ftrace_update_tot_cnt; | 2254 | unsigned long ftrace_update_tot_cnt; |
| 2249 | 2255 | ||
| 2250 | static inline int ops_traces_mod(struct ftrace_ops *ops) | 2256 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
| @@ -2300,11 +2306,12 @@ static int referenced_filters(struct dyn_ftrace *rec) | |||
| 2300 | return cnt; | 2306 | return cnt; |
| 2301 | } | 2307 | } |
| 2302 | 2308 | ||
| 2303 | static int ftrace_update_code(struct module *mod) | 2309 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
| 2304 | { | 2310 | { |
| 2305 | struct ftrace_page *pg; | 2311 | struct ftrace_page *pg; |
| 2306 | struct dyn_ftrace *p; | 2312 | struct dyn_ftrace *p; |
| 2307 | cycle_t start, stop; | 2313 | cycle_t start, stop; |
| 2314 | unsigned long update_cnt = 0; | ||
| 2308 | unsigned long ref = 0; | 2315 | unsigned long ref = 0; |
| 2309 | bool test = false; | 2316 | bool test = false; |
| 2310 | int i; | 2317 | int i; |
| @@ -2330,9 +2337,8 @@ static int ftrace_update_code(struct module *mod) | |||
| 2330 | } | 2337 | } |
| 2331 | 2338 | ||
| 2332 | start = ftrace_now(raw_smp_processor_id()); | 2339 | start = ftrace_now(raw_smp_processor_id()); |
| 2333 | ftrace_update_cnt = 0; | ||
| 2334 | 2340 | ||
| 2335 | for (pg = ftrace_new_pgs; pg; pg = pg->next) { | 2341 | for (pg = new_pgs; pg; pg = pg->next) { |
| 2336 | 2342 | ||
| 2337 | for (i = 0; i < pg->index; i++) { | 2343 | for (i = 0; i < pg->index; i++) { |
| 2338 | int cnt = ref; | 2344 | int cnt = ref; |
| @@ -2353,7 +2359,7 @@ static int ftrace_update_code(struct module *mod) | |||
| 2353 | if (!ftrace_code_disable(mod, p)) | 2359 | if (!ftrace_code_disable(mod, p)) |
| 2354 | break; | 2360 | break; |
| 2355 | 2361 | ||
| 2356 | ftrace_update_cnt++; | 2362 | update_cnt++; |
| 2357 | 2363 | ||
| 2358 | /* | 2364 | /* |
| 2359 | * If the tracing is enabled, go ahead and enable the record. | 2365 | * If the tracing is enabled, go ahead and enable the record. |
| @@ -2372,11 +2378,9 @@ static int ftrace_update_code(struct module *mod) | |||
| 2372 | } | 2378 | } |
| 2373 | } | 2379 | } |
| 2374 | 2380 | ||
| 2375 | ftrace_new_pgs = NULL; | ||
| 2376 | |||
| 2377 | stop = ftrace_now(raw_smp_processor_id()); | 2381 | stop = ftrace_now(raw_smp_processor_id()); |
| 2378 | ftrace_update_time = stop - start; | 2382 | ftrace_update_time = stop - start; |
| 2379 | ftrace_update_tot_cnt += ftrace_update_cnt; | 2383 | ftrace_update_tot_cnt += update_cnt; |
| 2380 | 2384 | ||
| 2381 | return 0; | 2385 | return 0; |
| 2382 | } | 2386 | } |
| @@ -2468,22 +2472,6 @@ ftrace_allocate_pages(unsigned long num_to_init) | |||
| 2468 | return NULL; | 2472 | return NULL; |
| 2469 | } | 2473 | } |
| 2470 | 2474 | ||
| 2471 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | ||
| 2472 | { | ||
| 2473 | int cnt; | ||
| 2474 | |||
| 2475 | if (!num_to_init) { | ||
| 2476 | pr_info("ftrace: No functions to be traced?\n"); | ||
| 2477 | return -1; | ||
| 2478 | } | ||
| 2479 | |||
| 2480 | cnt = num_to_init / ENTRIES_PER_PAGE; | ||
| 2481 | pr_info("ftrace: allocating %ld entries in %d pages\n", | ||
| 2482 | num_to_init, cnt + 1); | ||
| 2483 | |||
| 2484 | return 0; | ||
| 2485 | } | ||
| 2486 | |||
| 2487 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 2475 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
| 2488 | 2476 | ||
| 2489 | struct ftrace_iterator { | 2477 | struct ftrace_iterator { |
| @@ -2871,7 +2859,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
| 2871 | static int | 2859 | static int |
| 2872 | ftrace_filter_open(struct inode *inode, struct file *file) | 2860 | ftrace_filter_open(struct inode *inode, struct file *file) |
| 2873 | { | 2861 | { |
| 2874 | return ftrace_regex_open(&global_ops, | 2862 | struct ftrace_ops *ops = inode->i_private; |
| 2863 | |||
| 2864 | return ftrace_regex_open(ops, | ||
| 2875 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, | 2865 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, |
| 2876 | inode, file); | 2866 | inode, file); |
| 2877 | } | 2867 | } |
| @@ -2879,7 +2869,9 @@ ftrace_filter_open(struct inode *inode, struct file *file) | |||
| 2879 | static int | 2869 | static int |
| 2880 | ftrace_notrace_open(struct inode *inode, struct file *file) | 2870 | ftrace_notrace_open(struct inode *inode, struct file *file) |
| 2881 | { | 2871 | { |
| 2882 | return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, | 2872 | struct ftrace_ops *ops = inode->i_private; |
| 2873 | |||
| 2874 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, | ||
| 2883 | inode, file); | 2875 | inode, file); |
| 2884 | } | 2876 | } |
| 2885 | 2877 | ||
| @@ -4109,6 +4101,36 @@ static const struct file_operations ftrace_graph_notrace_fops = { | |||
| 4109 | }; | 4101 | }; |
| 4110 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4102 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 4111 | 4103 | ||
| 4104 | void ftrace_create_filter_files(struct ftrace_ops *ops, | ||
| 4105 | struct dentry *parent) | ||
| 4106 | { | ||
| 4107 | |||
| 4108 | trace_create_file("set_ftrace_filter", 0644, parent, | ||
| 4109 | ops, &ftrace_filter_fops); | ||
| 4110 | |||
| 4111 | trace_create_file("set_ftrace_notrace", 0644, parent, | ||
| 4112 | ops, &ftrace_notrace_fops); | ||
| 4113 | } | ||
| 4114 | |||
| 4115 | /* | ||
| 4116 | * The name "destroy_filter_files" is really a misnomer. Although | ||
| 4117 | * in the future, it may actualy delete the files, but this is | ||
| 4118 | * really intended to make sure the ops passed in are disabled | ||
| 4119 | * and that when this function returns, the caller is free to | ||
| 4120 | * free the ops. | ||
| 4121 | * | ||
| 4122 | * The "destroy" name is only to match the "create" name that this | ||
| 4123 | * should be paired with. | ||
| 4124 | */ | ||
| 4125 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) | ||
| 4126 | { | ||
| 4127 | mutex_lock(&ftrace_lock); | ||
| 4128 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | ||
| 4129 | ftrace_shutdown(ops, 0); | ||
| 4130 | ops->flags |= FTRACE_OPS_FL_DELETED; | ||
| 4131 | mutex_unlock(&ftrace_lock); | ||
| 4132 | } | ||
| 4133 | |||
| 4112 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 4134 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
| 4113 | { | 4135 | { |
| 4114 | 4136 | ||
| @@ -4118,11 +4140,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
| 4118 | trace_create_file("enabled_functions", 0444, | 4140 | trace_create_file("enabled_functions", 0444, |
| 4119 | d_tracer, NULL, &ftrace_enabled_fops); | 4141 | d_tracer, NULL, &ftrace_enabled_fops); |
| 4120 | 4142 | ||
| 4121 | trace_create_file("set_ftrace_filter", 0644, d_tracer, | 4143 | ftrace_create_filter_files(&global_ops, d_tracer); |
| 4122 | NULL, &ftrace_filter_fops); | ||
| 4123 | |||
| 4124 | trace_create_file("set_ftrace_notrace", 0644, d_tracer, | ||
| 4125 | NULL, &ftrace_notrace_fops); | ||
| 4126 | 4144 | ||
| 4127 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 4145 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 4128 | trace_create_file("set_graph_function", 0444, d_tracer, | 4146 | trace_create_file("set_graph_function", 0444, d_tracer, |
| @@ -4238,9 +4256,6 @@ static int ftrace_process_locs(struct module *mod, | |||
| 4238 | /* Assign the last page to ftrace_pages */ | 4256 | /* Assign the last page to ftrace_pages */ |
| 4239 | ftrace_pages = pg; | 4257 | ftrace_pages = pg; |
| 4240 | 4258 | ||
| 4241 | /* These new locations need to be initialized */ | ||
| 4242 | ftrace_new_pgs = start_pg; | ||
| 4243 | |||
| 4244 | /* | 4259 | /* |
| 4245 | * We only need to disable interrupts on start up | 4260 | * We only need to disable interrupts on start up |
| 4246 | * because we are modifying code that an interrupt | 4261 | * because we are modifying code that an interrupt |
| @@ -4251,7 +4266,7 @@ static int ftrace_process_locs(struct module *mod, | |||
| 4251 | */ | 4266 | */ |
| 4252 | if (!mod) | 4267 | if (!mod) |
| 4253 | local_irq_save(flags); | 4268 | local_irq_save(flags); |
| 4254 | ftrace_update_code(mod); | 4269 | ftrace_update_code(mod, start_pg); |
| 4255 | if (!mod) | 4270 | if (!mod) |
| 4256 | local_irq_restore(flags); | 4271 | local_irq_restore(flags); |
| 4257 | ret = 0; | 4272 | ret = 0; |
| @@ -4360,30 +4375,27 @@ struct notifier_block ftrace_module_exit_nb = { | |||
| 4360 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | 4375 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ |
| 4361 | }; | 4376 | }; |
| 4362 | 4377 | ||
| 4363 | extern unsigned long __start_mcount_loc[]; | ||
| 4364 | extern unsigned long __stop_mcount_loc[]; | ||
| 4365 | |||
| 4366 | void __init ftrace_init(void) | 4378 | void __init ftrace_init(void) |
| 4367 | { | 4379 | { |
| 4368 | unsigned long count, addr, flags; | 4380 | extern unsigned long __start_mcount_loc[]; |
| 4381 | extern unsigned long __stop_mcount_loc[]; | ||
| 4382 | unsigned long count, flags; | ||
| 4369 | int ret; | 4383 | int ret; |
| 4370 | 4384 | ||
| 4371 | /* Keep the ftrace pointer to the stub */ | ||
| 4372 | addr = (unsigned long)ftrace_stub; | ||
| 4373 | |||
| 4374 | local_irq_save(flags); | 4385 | local_irq_save(flags); |
| 4375 | ftrace_dyn_arch_init(&addr); | 4386 | ret = ftrace_dyn_arch_init(); |
| 4376 | local_irq_restore(flags); | 4387 | local_irq_restore(flags); |
| 4377 | 4388 | if (ret) | |
| 4378 | /* ftrace_dyn_arch_init places the return code in addr */ | ||
| 4379 | if (addr) | ||
| 4380 | goto failed; | 4389 | goto failed; |
| 4381 | 4390 | ||
| 4382 | count = __stop_mcount_loc - __start_mcount_loc; | 4391 | count = __stop_mcount_loc - __start_mcount_loc; |
| 4383 | 4392 | if (!count) { | |
| 4384 | ret = ftrace_dyn_table_alloc(count); | 4393 | pr_info("ftrace: No functions to be traced?\n"); |
| 4385 | if (ret) | ||
| 4386 | goto failed; | 4394 | goto failed; |
| 4395 | } | ||
| 4396 | |||
| 4397 | pr_info("ftrace: allocating %ld entries in %ld pages\n", | ||
| 4398 | count, count / ENTRIES_PER_PAGE + 1); | ||
| 4387 | 4399 | ||
| 4388 | last_ftrace_enabled = ftrace_enabled = 1; | 4400 | last_ftrace_enabled = ftrace_enabled = 1; |
| 4389 | 4401 | ||
| @@ -4431,7 +4443,13 @@ static inline void ftrace_startup_enable(int command) { } | |||
| 4431 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | 4443 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ |
| 4432 | ___ret; \ | 4444 | ___ret; \ |
| 4433 | }) | 4445 | }) |
| 4434 | # define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) | 4446 | # define ftrace_shutdown(ops, command) \ |
| 4447 | ({ \ | ||
| 4448 | int ___ret = __unregister_ftrace_function(ops); \ | ||
| 4449 | if (!___ret) \ | ||
| 4450 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ | ||
| 4451 | ___ret; \ | ||
| 4452 | }) | ||
| 4435 | 4453 | ||
| 4436 | # define ftrace_startup_sysctl() do { } while (0) | 4454 | # define ftrace_startup_sysctl() do { } while (0) |
| 4437 | # define ftrace_shutdown_sysctl() do { } while (0) | 4455 | # define ftrace_shutdown_sysctl() do { } while (0) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index fc4da2d97f9b..c634868c2921 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1301 | * In that off case, we need to allocate for all possible cpus. | 1301 | * In that off case, we need to allocate for all possible cpus. |
| 1302 | */ | 1302 | */ |
| 1303 | #ifdef CONFIG_HOTPLUG_CPU | 1303 | #ifdef CONFIG_HOTPLUG_CPU |
| 1304 | get_online_cpus(); | 1304 | cpu_notifier_register_begin(); |
| 1305 | cpumask_copy(buffer->cpumask, cpu_online_mask); | 1305 | cpumask_copy(buffer->cpumask, cpu_online_mask); |
| 1306 | #else | 1306 | #else |
| 1307 | cpumask_copy(buffer->cpumask, cpu_possible_mask); | 1307 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
| @@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1324 | #ifdef CONFIG_HOTPLUG_CPU | 1324 | #ifdef CONFIG_HOTPLUG_CPU |
| 1325 | buffer->cpu_notify.notifier_call = rb_cpu_notify; | 1325 | buffer->cpu_notify.notifier_call = rb_cpu_notify; |
| 1326 | buffer->cpu_notify.priority = 0; | 1326 | buffer->cpu_notify.priority = 0; |
| 1327 | register_cpu_notifier(&buffer->cpu_notify); | 1327 | __register_cpu_notifier(&buffer->cpu_notify); |
| 1328 | cpu_notifier_register_done(); | ||
| 1328 | #endif | 1329 | #endif |
| 1329 | 1330 | ||
| 1330 | put_online_cpus(); | ||
| 1331 | mutex_init(&buffer->mutex); | 1331 | mutex_init(&buffer->mutex); |
| 1332 | 1332 | ||
| 1333 | return buffer; | 1333 | return buffer; |
| @@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, | |||
| 1341 | 1341 | ||
| 1342 | fail_free_cpumask: | 1342 | fail_free_cpumask: |
| 1343 | free_cpumask_var(buffer->cpumask); | 1343 | free_cpumask_var(buffer->cpumask); |
| 1344 | put_online_cpus(); | 1344 | #ifdef CONFIG_HOTPLUG_CPU |
| 1345 | cpu_notifier_register_done(); | ||
| 1346 | #endif | ||
| 1345 | 1347 | ||
| 1346 | fail_free_buffer: | 1348 | fail_free_buffer: |
| 1347 | kfree(buffer); | 1349 | kfree(buffer); |
| @@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
| 1358 | { | 1360 | { |
| 1359 | int cpu; | 1361 | int cpu; |
| 1360 | 1362 | ||
| 1361 | get_online_cpus(); | ||
| 1362 | |||
| 1363 | #ifdef CONFIG_HOTPLUG_CPU | 1363 | #ifdef CONFIG_HOTPLUG_CPU |
| 1364 | unregister_cpu_notifier(&buffer->cpu_notify); | 1364 | cpu_notifier_register_begin(); |
| 1365 | __unregister_cpu_notifier(&buffer->cpu_notify); | ||
| 1365 | #endif | 1366 | #endif |
| 1366 | 1367 | ||
| 1367 | for_each_buffer_cpu(buffer, cpu) | 1368 | for_each_buffer_cpu(buffer, cpu) |
| 1368 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 1369 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
| 1369 | 1370 | ||
| 1370 | put_online_cpus(); | 1371 | #ifdef CONFIG_HOTPLUG_CPU |
| 1372 | cpu_notifier_register_done(); | ||
| 1373 | #endif | ||
| 1371 | 1374 | ||
| 1372 | kfree(buffer->buffers); | 1375 | kfree(buffer->buffers); |
| 1373 | free_cpumask_var(buffer->cpumask); | 1376 | free_cpumask_var(buffer->cpumask); |
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index a5457d577b98..0434ff1b808e 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
| @@ -40,8 +40,8 @@ static int write_iteration = 50; | |||
| 40 | module_param(write_iteration, uint, 0644); | 40 | module_param(write_iteration, uint, 0644); |
| 41 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); | 41 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); |
| 42 | 42 | ||
| 43 | static int producer_nice = 19; | 43 | static int producer_nice = MAX_NICE; |
| 44 | static int consumer_nice = 19; | 44 | static int consumer_nice = MAX_NICE; |
| 45 | 45 | ||
| 46 | static int producer_fifo = -1; | 46 | static int producer_fifo = -1; |
| 47 | static int consumer_fifo = -1; | 47 | static int consumer_fifo = -1; |
| @@ -308,7 +308,7 @@ static void ring_buffer_producer(void) | |||
| 308 | 308 | ||
| 309 | /* Let the user know that the test is running at low priority */ | 309 | /* Let the user know that the test is running at low priority */ |
| 310 | if (producer_fifo < 0 && consumer_fifo < 0 && | 310 | if (producer_fifo < 0 && consumer_fifo < 0 && |
| 311 | producer_nice == 19 && consumer_nice == 19) | 311 | producer_nice == MAX_NICE && consumer_nice == MAX_NICE) |
| 312 | trace_printk("WARNING!!! This test is running at lowest priority.\n"); | 312 | trace_printk("WARNING!!! This test is running at lowest priority.\n"); |
| 313 | 313 | ||
| 314 | trace_printk("Time: %lld (usecs)\n", time); | 314 | trace_printk("Time: %lld (usecs)\n", time); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 815c878f409b..737b0efa1a62 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -73,7 +73,8 @@ static struct tracer_flags dummy_tracer_flags = { | |||
| 73 | .opts = dummy_tracer_opt | 73 | .opts = dummy_tracer_opt |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | 76 | static int |
| 77 | dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 77 | { | 78 | { |
| 78 | return 0; | 79 | return 0; |
| 79 | } | 80 | } |
| @@ -118,7 +119,7 @@ enum ftrace_dump_mode ftrace_dump_on_oops; | |||
| 118 | /* When set, tracing will stop when a WARN*() is hit */ | 119 | /* When set, tracing will stop when a WARN*() is hit */ |
| 119 | int __disable_trace_on_warning; | 120 | int __disable_trace_on_warning; |
| 120 | 121 | ||
| 121 | static int tracing_set_tracer(const char *buf); | 122 | static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
| 122 | 123 | ||
| 123 | #define MAX_TRACER_SIZE 100 | 124 | #define MAX_TRACER_SIZE 100 |
| 124 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 125 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
| @@ -180,6 +181,17 @@ static int __init set_trace_boot_options(char *str) | |||
| 180 | } | 181 | } |
| 181 | __setup("trace_options=", set_trace_boot_options); | 182 | __setup("trace_options=", set_trace_boot_options); |
| 182 | 183 | ||
| 184 | static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; | ||
| 185 | static char *trace_boot_clock __initdata; | ||
| 186 | |||
| 187 | static int __init set_trace_boot_clock(char *str) | ||
| 188 | { | ||
| 189 | strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); | ||
| 190 | trace_boot_clock = trace_boot_clock_buf; | ||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | __setup("trace_clock=", set_trace_boot_clock); | ||
| 194 | |||
| 183 | 195 | ||
| 184 | unsigned long long ns2usecs(cycle_t nsec) | 196 | unsigned long long ns2usecs(cycle_t nsec) |
| 185 | { | 197 | { |
| @@ -1230,7 +1242,7 @@ int register_tracer(struct tracer *type) | |||
| 1230 | 1242 | ||
| 1231 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | 1243 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); |
| 1232 | /* Do we want this tracer to start on bootup? */ | 1244 | /* Do we want this tracer to start on bootup? */ |
| 1233 | tracing_set_tracer(type->name); | 1245 | tracing_set_tracer(&global_trace, type->name); |
| 1234 | default_bootup_tracer = NULL; | 1246 | default_bootup_tracer = NULL; |
| 1235 | /* disable other selftests, since this will break it. */ | 1247 | /* disable other selftests, since this will break it. */ |
| 1236 | tracing_selftest_disabled = true; | 1248 | tracing_selftest_disabled = true; |
| @@ -1600,15 +1612,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer, | |||
| 1600 | } | 1612 | } |
| 1601 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); | 1613 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); |
| 1602 | 1614 | ||
| 1615 | static struct ring_buffer *temp_buffer; | ||
| 1616 | |||
| 1603 | struct ring_buffer_event * | 1617 | struct ring_buffer_event * |
| 1604 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | 1618 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, |
| 1605 | struct ftrace_event_file *ftrace_file, | 1619 | struct ftrace_event_file *ftrace_file, |
| 1606 | int type, unsigned long len, | 1620 | int type, unsigned long len, |
| 1607 | unsigned long flags, int pc) | 1621 | unsigned long flags, int pc) |
| 1608 | { | 1622 | { |
| 1623 | struct ring_buffer_event *entry; | ||
| 1624 | |||
| 1609 | *current_rb = ftrace_file->tr->trace_buffer.buffer; | 1625 | *current_rb = ftrace_file->tr->trace_buffer.buffer; |
| 1610 | return trace_buffer_lock_reserve(*current_rb, | 1626 | entry = trace_buffer_lock_reserve(*current_rb, |
| 1611 | type, len, flags, pc); | 1627 | type, len, flags, pc); |
| 1628 | /* | ||
| 1629 | * If tracing is off, but we have triggers enabled | ||
| 1630 | * we still need to look at the event data. Use the temp_buffer | ||
| 1631 | * to store the trace event for the tigger to use. It's recusive | ||
| 1632 | * safe and will not be recorded anywhere. | ||
| 1633 | */ | ||
| 1634 | if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) { | ||
| 1635 | *current_rb = temp_buffer; | ||
| 1636 | entry = trace_buffer_lock_reserve(*current_rb, | ||
| 1637 | type, len, flags, pc); | ||
| 1638 | } | ||
| 1639 | return entry; | ||
| 1612 | } | 1640 | } |
| 1613 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); | 1641 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); |
| 1614 | 1642 | ||
| @@ -3121,27 +3149,52 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
| 3121 | return ret; | 3149 | return ret; |
| 3122 | } | 3150 | } |
| 3123 | 3151 | ||
| 3152 | /* | ||
| 3153 | * Some tracers are not suitable for instance buffers. | ||
| 3154 | * A tracer is always available for the global array (toplevel) | ||
| 3155 | * or if it explicitly states that it is. | ||
| 3156 | */ | ||
| 3157 | static bool | ||
| 3158 | trace_ok_for_array(struct tracer *t, struct trace_array *tr) | ||
| 3159 | { | ||
| 3160 | return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; | ||
| 3161 | } | ||
| 3162 | |||
| 3163 | /* Find the next tracer that this trace array may use */ | ||
| 3164 | static struct tracer * | ||
| 3165 | get_tracer_for_array(struct trace_array *tr, struct tracer *t) | ||
| 3166 | { | ||
| 3167 | while (t && !trace_ok_for_array(t, tr)) | ||
| 3168 | t = t->next; | ||
| 3169 | |||
| 3170 | return t; | ||
| 3171 | } | ||
| 3172 | |||
| 3124 | static void * | 3173 | static void * |
| 3125 | t_next(struct seq_file *m, void *v, loff_t *pos) | 3174 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 3126 | { | 3175 | { |
| 3176 | struct trace_array *tr = m->private; | ||
| 3127 | struct tracer *t = v; | 3177 | struct tracer *t = v; |
| 3128 | 3178 | ||
| 3129 | (*pos)++; | 3179 | (*pos)++; |
| 3130 | 3180 | ||
| 3131 | if (t) | 3181 | if (t) |
| 3132 | t = t->next; | 3182 | t = get_tracer_for_array(tr, t->next); |
| 3133 | 3183 | ||
| 3134 | return t; | 3184 | return t; |
| 3135 | } | 3185 | } |
| 3136 | 3186 | ||
| 3137 | static void *t_start(struct seq_file *m, loff_t *pos) | 3187 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 3138 | { | 3188 | { |
| 3189 | struct trace_array *tr = m->private; | ||
| 3139 | struct tracer *t; | 3190 | struct tracer *t; |
| 3140 | loff_t l = 0; | 3191 | loff_t l = 0; |
| 3141 | 3192 | ||
| 3142 | mutex_lock(&trace_types_lock); | 3193 | mutex_lock(&trace_types_lock); |
| 3143 | for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) | 3194 | |
| 3144 | ; | 3195 | t = get_tracer_for_array(tr, trace_types); |
| 3196 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
| 3197 | ; | ||
| 3145 | 3198 | ||
| 3146 | return t; | 3199 | return t; |
| 3147 | } | 3200 | } |
| @@ -3176,10 +3229,21 @@ static const struct seq_operations show_traces_seq_ops = { | |||
| 3176 | 3229 | ||
| 3177 | static int show_traces_open(struct inode *inode, struct file *file) | 3230 | static int show_traces_open(struct inode *inode, struct file *file) |
| 3178 | { | 3231 | { |
| 3232 | struct trace_array *tr = inode->i_private; | ||
| 3233 | struct seq_file *m; | ||
| 3234 | int ret; | ||
| 3235 | |||
| 3179 | if (tracing_disabled) | 3236 | if (tracing_disabled) |
| 3180 | return -ENODEV; | 3237 | return -ENODEV; |
| 3181 | 3238 | ||
| 3182 | return seq_open(file, &show_traces_seq_ops); | 3239 | ret = seq_open(file, &show_traces_seq_ops); |
| 3240 | if (ret) | ||
| 3241 | return ret; | ||
| 3242 | |||
| 3243 | m = file->private_data; | ||
| 3244 | m->private = tr; | ||
| 3245 | |||
| 3246 | return 0; | ||
| 3183 | } | 3247 | } |
| 3184 | 3248 | ||
| 3185 | static ssize_t | 3249 | static ssize_t |
| @@ -3339,13 +3403,14 @@ static int tracing_trace_options_show(struct seq_file *m, void *v) | |||
| 3339 | return 0; | 3403 | return 0; |
| 3340 | } | 3404 | } |
| 3341 | 3405 | ||
| 3342 | static int __set_tracer_option(struct tracer *trace, | 3406 | static int __set_tracer_option(struct trace_array *tr, |
| 3343 | struct tracer_flags *tracer_flags, | 3407 | struct tracer_flags *tracer_flags, |
| 3344 | struct tracer_opt *opts, int neg) | 3408 | struct tracer_opt *opts, int neg) |
| 3345 | { | 3409 | { |
| 3410 | struct tracer *trace = tr->current_trace; | ||
| 3346 | int ret; | 3411 | int ret; |
| 3347 | 3412 | ||
| 3348 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); | 3413 | ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); |
| 3349 | if (ret) | 3414 | if (ret) |
| 3350 | return ret; | 3415 | return ret; |
| 3351 | 3416 | ||
| @@ -3357,8 +3422,9 @@ static int __set_tracer_option(struct tracer *trace, | |||
| 3357 | } | 3422 | } |
| 3358 | 3423 | ||
| 3359 | /* Try to assign a tracer specific option */ | 3424 | /* Try to assign a tracer specific option */ |
| 3360 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | 3425 | static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) |
| 3361 | { | 3426 | { |
| 3427 | struct tracer *trace = tr->current_trace; | ||
| 3362 | struct tracer_flags *tracer_flags = trace->flags; | 3428 | struct tracer_flags *tracer_flags = trace->flags; |
| 3363 | struct tracer_opt *opts = NULL; | 3429 | struct tracer_opt *opts = NULL; |
| 3364 | int i; | 3430 | int i; |
| @@ -3367,8 +3433,7 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 3367 | opts = &tracer_flags->opts[i]; | 3433 | opts = &tracer_flags->opts[i]; |
| 3368 | 3434 | ||
| 3369 | if (strcmp(cmp, opts->name) == 0) | 3435 | if (strcmp(cmp, opts->name) == 0) |
| 3370 | return __set_tracer_option(trace, trace->flags, | 3436 | return __set_tracer_option(tr, trace->flags, opts, neg); |
| 3371 | opts, neg); | ||
| 3372 | } | 3437 | } |
| 3373 | 3438 | ||
| 3374 | return -EINVAL; | 3439 | return -EINVAL; |
| @@ -3391,7 +3456,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) | |||
| 3391 | 3456 | ||
| 3392 | /* Give the tracer a chance to approve the change */ | 3457 | /* Give the tracer a chance to approve the change */ |
| 3393 | if (tr->current_trace->flag_changed) | 3458 | if (tr->current_trace->flag_changed) |
| 3394 | if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) | 3459 | if (tr->current_trace->flag_changed(tr, mask, !!enabled)) |
| 3395 | return -EINVAL; | 3460 | return -EINVAL; |
| 3396 | 3461 | ||
| 3397 | if (enabled) | 3462 | if (enabled) |
| @@ -3440,7 +3505,7 @@ static int trace_set_options(struct trace_array *tr, char *option) | |||
| 3440 | 3505 | ||
| 3441 | /* If no option could be set, test the specific tracer options */ | 3506 | /* If no option could be set, test the specific tracer options */ |
| 3442 | if (!trace_options[i]) | 3507 | if (!trace_options[i]) |
| 3443 | ret = set_tracer_option(tr->current_trace, cmp, neg); | 3508 | ret = set_tracer_option(tr, cmp, neg); |
| 3444 | 3509 | ||
| 3445 | mutex_unlock(&trace_types_lock); | 3510 | mutex_unlock(&trace_types_lock); |
| 3446 | 3511 | ||
| @@ -3546,6 +3611,8 @@ static const char readme_msg[] = | |||
| 3546 | #ifdef CONFIG_TRACER_SNAPSHOT | 3611 | #ifdef CONFIG_TRACER_SNAPSHOT |
| 3547 | "\t\t snapshot\n" | 3612 | "\t\t snapshot\n" |
| 3548 | #endif | 3613 | #endif |
| 3614 | "\t\t dump\n" | ||
| 3615 | "\t\t cpudump\n" | ||
| 3549 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" | 3616 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" |
| 3550 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" | 3617 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" |
| 3551 | "\t The first one will disable tracing every time do_fault is hit\n" | 3618 | "\t The first one will disable tracing every time do_fault is hit\n" |
| @@ -3869,10 +3936,26 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer); | |||
| 3869 | static void | 3936 | static void |
| 3870 | destroy_trace_option_files(struct trace_option_dentry *topts); | 3937 | destroy_trace_option_files(struct trace_option_dentry *topts); |
| 3871 | 3938 | ||
| 3872 | static int tracing_set_tracer(const char *buf) | 3939 | /* |
| 3940 | * Used to clear out the tracer before deletion of an instance. | ||
| 3941 | * Must have trace_types_lock held. | ||
| 3942 | */ | ||
| 3943 | static void tracing_set_nop(struct trace_array *tr) | ||
| 3944 | { | ||
| 3945 | if (tr->current_trace == &nop_trace) | ||
| 3946 | return; | ||
| 3947 | |||
| 3948 | tr->current_trace->enabled--; | ||
| 3949 | |||
| 3950 | if (tr->current_trace->reset) | ||
| 3951 | tr->current_trace->reset(tr); | ||
| 3952 | |||
| 3953 | tr->current_trace = &nop_trace; | ||
| 3954 | } | ||
| 3955 | |||
| 3956 | static int tracing_set_tracer(struct trace_array *tr, const char *buf) | ||
| 3873 | { | 3957 | { |
| 3874 | static struct trace_option_dentry *topts; | 3958 | static struct trace_option_dentry *topts; |
| 3875 | struct trace_array *tr = &global_trace; | ||
| 3876 | struct tracer *t; | 3959 | struct tracer *t; |
| 3877 | #ifdef CONFIG_TRACER_MAX_TRACE | 3960 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 3878 | bool had_max_tr; | 3961 | bool had_max_tr; |
| @@ -3900,9 +3983,15 @@ static int tracing_set_tracer(const char *buf) | |||
| 3900 | if (t == tr->current_trace) | 3983 | if (t == tr->current_trace) |
| 3901 | goto out; | 3984 | goto out; |
| 3902 | 3985 | ||
| 3986 | /* Some tracers are only allowed for the top level buffer */ | ||
| 3987 | if (!trace_ok_for_array(t, tr)) { | ||
| 3988 | ret = -EINVAL; | ||
| 3989 | goto out; | ||
| 3990 | } | ||
| 3991 | |||
| 3903 | trace_branch_disable(); | 3992 | trace_branch_disable(); |
| 3904 | 3993 | ||
| 3905 | tr->current_trace->enabled = false; | 3994 | tr->current_trace->enabled--; |
| 3906 | 3995 | ||
| 3907 | if (tr->current_trace->reset) | 3996 | if (tr->current_trace->reset) |
| 3908 | tr->current_trace->reset(tr); | 3997 | tr->current_trace->reset(tr); |
| @@ -3925,9 +4014,11 @@ static int tracing_set_tracer(const char *buf) | |||
| 3925 | free_snapshot(tr); | 4014 | free_snapshot(tr); |
| 3926 | } | 4015 | } |
| 3927 | #endif | 4016 | #endif |
| 3928 | destroy_trace_option_files(topts); | 4017 | /* Currently, only the top instance has options */ |
| 3929 | 4018 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | |
| 3930 | topts = create_trace_option_files(tr, t); | 4019 | destroy_trace_option_files(topts); |
| 4020 | topts = create_trace_option_files(tr, t); | ||
| 4021 | } | ||
| 3931 | 4022 | ||
| 3932 | #ifdef CONFIG_TRACER_MAX_TRACE | 4023 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 3933 | if (t->use_max_tr && !had_max_tr) { | 4024 | if (t->use_max_tr && !had_max_tr) { |
| @@ -3944,7 +4035,7 @@ static int tracing_set_tracer(const char *buf) | |||
| 3944 | } | 4035 | } |
| 3945 | 4036 | ||
| 3946 | tr->current_trace = t; | 4037 | tr->current_trace = t; |
| 3947 | tr->current_trace->enabled = true; | 4038 | tr->current_trace->enabled++; |
| 3948 | trace_branch_enable(tr); | 4039 | trace_branch_enable(tr); |
| 3949 | out: | 4040 | out: |
| 3950 | mutex_unlock(&trace_types_lock); | 4041 | mutex_unlock(&trace_types_lock); |
| @@ -3956,6 +4047,7 @@ static ssize_t | |||
| 3956 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | 4047 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, |
| 3957 | size_t cnt, loff_t *ppos) | 4048 | size_t cnt, loff_t *ppos) |
| 3958 | { | 4049 | { |
| 4050 | struct trace_array *tr = filp->private_data; | ||
| 3959 | char buf[MAX_TRACER_SIZE+1]; | 4051 | char buf[MAX_TRACER_SIZE+1]; |
| 3960 | int i; | 4052 | int i; |
| 3961 | size_t ret; | 4053 | size_t ret; |
| @@ -3975,7 +4067,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
| 3975 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 4067 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
| 3976 | buf[i] = 0; | 4068 | buf[i] = 0; |
| 3977 | 4069 | ||
| 3978 | err = tracing_set_tracer(buf); | 4070 | err = tracing_set_tracer(tr, buf); |
| 3979 | if (err) | 4071 | if (err) |
| 3980 | return err; | 4072 | return err; |
| 3981 | 4073 | ||
| @@ -4300,8 +4392,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
| 4300 | 4392 | ||
| 4301 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { | 4393 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
| 4302 | .can_merge = 0, | 4394 | .can_merge = 0, |
| 4303 | .map = generic_pipe_buf_map, | ||
| 4304 | .unmap = generic_pipe_buf_unmap, | ||
| 4305 | .confirm = generic_pipe_buf_confirm, | 4395 | .confirm = generic_pipe_buf_confirm, |
| 4306 | .release = generic_pipe_buf_release, | 4396 | .release = generic_pipe_buf_release, |
| 4307 | .steal = generic_pipe_buf_steal, | 4397 | .steal = generic_pipe_buf_steal, |
| @@ -4396,7 +4486,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 4396 | trace_access_lock(iter->cpu_file); | 4486 | trace_access_lock(iter->cpu_file); |
| 4397 | 4487 | ||
| 4398 | /* Fill as many pages as possible. */ | 4488 | /* Fill as many pages as possible. */ |
| 4399 | for (i = 0, rem = len; i < pipe->buffers && rem; i++) { | 4489 | for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { |
| 4400 | spd.pages[i] = alloc_page(GFP_KERNEL); | 4490 | spd.pages[i] = alloc_page(GFP_KERNEL); |
| 4401 | if (!spd.pages[i]) | 4491 | if (!spd.pages[i]) |
| 4402 | break; | 4492 | break; |
| @@ -4683,25 +4773,10 @@ static int tracing_clock_show(struct seq_file *m, void *v) | |||
| 4683 | return 0; | 4773 | return 0; |
| 4684 | } | 4774 | } |
| 4685 | 4775 | ||
| 4686 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 4776 | static int tracing_set_clock(struct trace_array *tr, const char *clockstr) |
| 4687 | size_t cnt, loff_t *fpos) | ||
| 4688 | { | 4777 | { |
| 4689 | struct seq_file *m = filp->private_data; | ||
| 4690 | struct trace_array *tr = m->private; | ||
| 4691 | char buf[64]; | ||
| 4692 | const char *clockstr; | ||
| 4693 | int i; | 4778 | int i; |
| 4694 | 4779 | ||
| 4695 | if (cnt >= sizeof(buf)) | ||
| 4696 | return -EINVAL; | ||
| 4697 | |||
| 4698 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 4699 | return -EFAULT; | ||
| 4700 | |||
| 4701 | buf[cnt] = 0; | ||
| 4702 | |||
| 4703 | clockstr = strstrip(buf); | ||
| 4704 | |||
| 4705 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { | 4780 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { |
| 4706 | if (strcmp(trace_clocks[i].name, clockstr) == 0) | 4781 | if (strcmp(trace_clocks[i].name, clockstr) == 0) |
| 4707 | break; | 4782 | break; |
| @@ -4729,6 +4804,32 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
| 4729 | 4804 | ||
| 4730 | mutex_unlock(&trace_types_lock); | 4805 | mutex_unlock(&trace_types_lock); |
| 4731 | 4806 | ||
| 4807 | return 0; | ||
| 4808 | } | ||
| 4809 | |||
| 4810 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | ||
| 4811 | size_t cnt, loff_t *fpos) | ||
| 4812 | { | ||
| 4813 | struct seq_file *m = filp->private_data; | ||
| 4814 | struct trace_array *tr = m->private; | ||
| 4815 | char buf[64]; | ||
| 4816 | const char *clockstr; | ||
| 4817 | int ret; | ||
| 4818 | |||
| 4819 | if (cnt >= sizeof(buf)) | ||
| 4820 | return -EINVAL; | ||
| 4821 | |||
| 4822 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 4823 | return -EFAULT; | ||
| 4824 | |||
| 4825 | buf[cnt] = 0; | ||
| 4826 | |||
| 4827 | clockstr = strstrip(buf); | ||
| 4828 | |||
| 4829 | ret = tracing_set_clock(tr, clockstr); | ||
| 4830 | if (ret) | ||
| 4831 | return ret; | ||
| 4832 | |||
| 4732 | *fpos += cnt; | 4833 | *fpos += cnt; |
| 4733 | 4834 | ||
| 4734 | return cnt; | 4835 | return cnt; |
| @@ -5178,8 +5279,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
| 5178 | /* Pipe buffer operations for a buffer. */ | 5279 | /* Pipe buffer operations for a buffer. */ |
| 5179 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { | 5280 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
| 5180 | .can_merge = 0, | 5281 | .can_merge = 0, |
| 5181 | .map = generic_pipe_buf_map, | ||
| 5182 | .unmap = generic_pipe_buf_unmap, | ||
| 5183 | .confirm = generic_pipe_buf_confirm, | 5282 | .confirm = generic_pipe_buf_confirm, |
| 5184 | .release = buffer_pipe_buf_release, | 5283 | .release = buffer_pipe_buf_release, |
| 5185 | .steal = generic_pipe_buf_steal, | 5284 | .steal = generic_pipe_buf_steal, |
| @@ -5255,7 +5354,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 5255 | trace_access_lock(iter->cpu_file); | 5354 | trace_access_lock(iter->cpu_file); |
| 5256 | entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); | 5355 | entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
| 5257 | 5356 | ||
| 5258 | for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { | 5357 | for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { |
| 5259 | struct page *page; | 5358 | struct page *page; |
| 5260 | int r; | 5359 | int r; |
| 5261 | 5360 | ||
| @@ -5689,7 +5788,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 5689 | 5788 | ||
| 5690 | if (!!(topt->flags->val & topt->opt->bit) != val) { | 5789 | if (!!(topt->flags->val & topt->opt->bit) != val) { |
| 5691 | mutex_lock(&trace_types_lock); | 5790 | mutex_lock(&trace_types_lock); |
| 5692 | ret = __set_tracer_option(topt->tr->current_trace, topt->flags, | 5791 | ret = __set_tracer_option(topt->tr, topt->flags, |
| 5693 | topt->opt, !val); | 5792 | topt->opt, !val); |
| 5694 | mutex_unlock(&trace_types_lock); | 5793 | mutex_unlock(&trace_types_lock); |
| 5695 | if (ret) | 5794 | if (ret) |
| @@ -6096,7 +6195,9 @@ static int instance_delete(const char *name) | |||
| 6096 | 6195 | ||
| 6097 | list_del(&tr->list); | 6196 | list_del(&tr->list); |
| 6098 | 6197 | ||
| 6198 | tracing_set_nop(tr); | ||
| 6099 | event_trace_del_tracer(tr); | 6199 | event_trace_del_tracer(tr); |
| 6200 | ftrace_destroy_function_files(tr); | ||
| 6100 | debugfs_remove_recursive(tr->dir); | 6201 | debugfs_remove_recursive(tr->dir); |
| 6101 | free_percpu(tr->trace_buffer.data); | 6202 | free_percpu(tr->trace_buffer.data); |
| 6102 | ring_buffer_free(tr->trace_buffer.buffer); | 6203 | ring_buffer_free(tr->trace_buffer.buffer); |
| @@ -6191,6 +6292,12 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
| 6191 | { | 6292 | { |
| 6192 | int cpu; | 6293 | int cpu; |
| 6193 | 6294 | ||
| 6295 | trace_create_file("available_tracers", 0444, d_tracer, | ||
| 6296 | tr, &show_traces_fops); | ||
| 6297 | |||
| 6298 | trace_create_file("current_tracer", 0644, d_tracer, | ||
| 6299 | tr, &set_tracer_fops); | ||
| 6300 | |||
| 6194 | trace_create_file("tracing_cpumask", 0644, d_tracer, | 6301 | trace_create_file("tracing_cpumask", 0644, d_tracer, |
| 6195 | tr, &tracing_cpumask_fops); | 6302 | tr, &tracing_cpumask_fops); |
| 6196 | 6303 | ||
| @@ -6221,6 +6328,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
| 6221 | trace_create_file("tracing_on", 0644, d_tracer, | 6328 | trace_create_file("tracing_on", 0644, d_tracer, |
| 6222 | tr, &rb_simple_fops); | 6329 | tr, &rb_simple_fops); |
| 6223 | 6330 | ||
| 6331 | if (ftrace_create_function_files(tr, d_tracer)) | ||
| 6332 | WARN(1, "Could not allocate function filter files"); | ||
| 6333 | |||
| 6224 | #ifdef CONFIG_TRACER_SNAPSHOT | 6334 | #ifdef CONFIG_TRACER_SNAPSHOT |
| 6225 | trace_create_file("snapshot", 0644, d_tracer, | 6335 | trace_create_file("snapshot", 0644, d_tracer, |
| 6226 | tr, &snapshot_fops); | 6336 | tr, &snapshot_fops); |
| @@ -6243,12 +6353,6 @@ static __init int tracer_init_debugfs(void) | |||
| 6243 | 6353 | ||
| 6244 | init_tracer_debugfs(&global_trace, d_tracer); | 6354 | init_tracer_debugfs(&global_trace, d_tracer); |
| 6245 | 6355 | ||
| 6246 | trace_create_file("available_tracers", 0444, d_tracer, | ||
| 6247 | &global_trace, &show_traces_fops); | ||
| 6248 | |||
| 6249 | trace_create_file("current_tracer", 0644, d_tracer, | ||
| 6250 | &global_trace, &set_tracer_fops); | ||
| 6251 | |||
| 6252 | #ifdef CONFIG_TRACER_MAX_TRACE | 6356 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 6253 | trace_create_file("tracing_max_latency", 0644, d_tracer, | 6357 | trace_create_file("tracing_max_latency", 0644, d_tracer, |
| 6254 | &tracing_max_latency, &tracing_max_lat_fops); | 6358 | &tracing_max_latency, &tracing_max_lat_fops); |
| @@ -6494,11 +6598,16 @@ __init static int tracer_alloc_buffers(void) | |||
| 6494 | 6598 | ||
| 6495 | raw_spin_lock_init(&global_trace.start_lock); | 6599 | raw_spin_lock_init(&global_trace.start_lock); |
| 6496 | 6600 | ||
| 6601 | /* Used for event triggers */ | ||
| 6602 | temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); | ||
| 6603 | if (!temp_buffer) | ||
| 6604 | goto out_free_cpumask; | ||
| 6605 | |||
| 6497 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 6606 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
| 6498 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { | 6607 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { |
| 6499 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 6608 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
| 6500 | WARN_ON(1); | 6609 | WARN_ON(1); |
| 6501 | goto out_free_cpumask; | 6610 | goto out_free_temp_buffer; |
| 6502 | } | 6611 | } |
| 6503 | 6612 | ||
| 6504 | if (global_trace.buffer_disabled) | 6613 | if (global_trace.buffer_disabled) |
| @@ -6506,6 +6615,13 @@ __init static int tracer_alloc_buffers(void) | |||
| 6506 | 6615 | ||
| 6507 | trace_init_cmdlines(); | 6616 | trace_init_cmdlines(); |
| 6508 | 6617 | ||
| 6618 | if (trace_boot_clock) { | ||
| 6619 | ret = tracing_set_clock(&global_trace, trace_boot_clock); | ||
| 6620 | if (ret < 0) | ||
| 6621 | pr_warning("Trace clock %s not defined, going back to default\n", | ||
| 6622 | trace_boot_clock); | ||
| 6623 | } | ||
| 6624 | |||
| 6509 | /* | 6625 | /* |
| 6510 | * register_tracer() might reference current_trace, so it | 6626 | * register_tracer() might reference current_trace, so it |
| 6511 | * needs to be set before we register anything. This is | 6627 | * needs to be set before we register anything. This is |
| @@ -6540,6 +6656,8 @@ __init static int tracer_alloc_buffers(void) | |||
| 6540 | 6656 | ||
| 6541 | return 0; | 6657 | return 0; |
| 6542 | 6658 | ||
| 6659 | out_free_temp_buffer: | ||
| 6660 | ring_buffer_free(temp_buffer); | ||
| 6543 | out_free_cpumask: | 6661 | out_free_cpumask: |
| 6544 | free_percpu(global_trace.trace_buffer.data); | 6662 | free_percpu(global_trace.trace_buffer.data); |
| 6545 | #ifdef CONFIG_TRACER_MAX_TRACE | 6663 | #ifdef CONFIG_TRACER_MAX_TRACE |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 02b592f2d4b7..2e29d7ba5a52 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/hw_breakpoint.h> | 13 | #include <linux/hw_breakpoint.h> |
| 14 | #include <linux/trace_seq.h> | 14 | #include <linux/trace_seq.h> |
| 15 | #include <linux/ftrace_event.h> | 15 | #include <linux/ftrace_event.h> |
| 16 | #include <linux/compiler.h> | ||
| 16 | 17 | ||
| 17 | #ifdef CONFIG_FTRACE_SYSCALLS | 18 | #ifdef CONFIG_FTRACE_SYSCALLS |
| 18 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | 19 | #include <asm/unistd.h> /* For NR_SYSCALLS */ |
| @@ -210,6 +211,11 @@ struct trace_array { | |||
| 210 | struct list_head events; | 211 | struct list_head events; |
| 211 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ | 212 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
| 212 | int ref; | 213 | int ref; |
| 214 | #ifdef CONFIG_FUNCTION_TRACER | ||
| 215 | struct ftrace_ops *ops; | ||
| 216 | /* function tracing enabled */ | ||
| 217 | int function_enabled; | ||
| 218 | #endif | ||
| 213 | }; | 219 | }; |
| 214 | 220 | ||
| 215 | enum { | 221 | enum { |
| @@ -355,14 +361,16 @@ struct tracer { | |||
| 355 | void (*print_header)(struct seq_file *m); | 361 | void (*print_header)(struct seq_file *m); |
| 356 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 362 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
| 357 | /* If you handled the flag setting, return 0 */ | 363 | /* If you handled the flag setting, return 0 */ |
| 358 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 364 | int (*set_flag)(struct trace_array *tr, |
| 365 | u32 old_flags, u32 bit, int set); | ||
| 359 | /* Return 0 if OK with change, else return non-zero */ | 366 | /* Return 0 if OK with change, else return non-zero */ |
| 360 | int (*flag_changed)(struct tracer *tracer, | 367 | int (*flag_changed)(struct trace_array *tr, |
| 361 | u32 mask, int set); | 368 | u32 mask, int set); |
| 362 | struct tracer *next; | 369 | struct tracer *next; |
| 363 | struct tracer_flags *flags; | 370 | struct tracer_flags *flags; |
| 371 | int enabled; | ||
| 364 | bool print_max; | 372 | bool print_max; |
| 365 | bool enabled; | 373 | bool allow_instances; |
| 366 | #ifdef CONFIG_TRACER_MAX_TRACE | 374 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 367 | bool use_max_tr; | 375 | bool use_max_tr; |
| 368 | #endif | 376 | #endif |
| @@ -812,13 +820,36 @@ static inline int ftrace_trace_task(struct task_struct *task) | |||
| 812 | return test_tsk_trace_trace(task); | 820 | return test_tsk_trace_trace(task); |
| 813 | } | 821 | } |
| 814 | extern int ftrace_is_dead(void); | 822 | extern int ftrace_is_dead(void); |
| 823 | int ftrace_create_function_files(struct trace_array *tr, | ||
| 824 | struct dentry *parent); | ||
| 825 | void ftrace_destroy_function_files(struct trace_array *tr); | ||
| 815 | #else | 826 | #else |
| 816 | static inline int ftrace_trace_task(struct task_struct *task) | 827 | static inline int ftrace_trace_task(struct task_struct *task) |
| 817 | { | 828 | { |
| 818 | return 1; | 829 | return 1; |
| 819 | } | 830 | } |
| 820 | static inline int ftrace_is_dead(void) { return 0; } | 831 | static inline int ftrace_is_dead(void) { return 0; } |
| 821 | #endif | 832 | static inline int |
| 833 | ftrace_create_function_files(struct trace_array *tr, | ||
| 834 | struct dentry *parent) | ||
| 835 | { | ||
| 836 | return 0; | ||
| 837 | } | ||
| 838 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } | ||
| 839 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
| 840 | |||
| 841 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | ||
| 842 | void ftrace_create_filter_files(struct ftrace_ops *ops, | ||
| 843 | struct dentry *parent); | ||
| 844 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); | ||
| 845 | #else | ||
| 846 | /* | ||
| 847 | * The ops parameter passed in is usually undefined. | ||
| 848 | * This must be a macro. | ||
| 849 | */ | ||
| 850 | #define ftrace_create_filter_files(ops, parent) do { } while (0) | ||
| 851 | #define ftrace_destroy_filter_files(ops) do { } while (0) | ||
| 852 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ | ||
| 822 | 853 | ||
| 823 | int ftrace_event_is_function(struct ftrace_event_call *call); | 854 | int ftrace_event_is_function(struct ftrace_event_call *call); |
| 824 | 855 | ||
| @@ -1249,7 +1280,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); | |||
| 1249 | #undef FTRACE_ENTRY | 1280 | #undef FTRACE_ENTRY |
| 1250 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 1281 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
| 1251 | extern struct ftrace_event_call \ | 1282 | extern struct ftrace_event_call \ |
| 1252 | __attribute__((__aligned__(4))) event_##call; | 1283 | __aligned(4) event_##call; |
| 1253 | #undef FTRACE_ENTRY_DUP | 1284 | #undef FTRACE_ENTRY_DUP |
| 1254 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ | 1285 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
| 1255 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | 1286 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index e854f420e033..c894614de14d 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
| @@ -31,9 +31,25 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | |||
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | /* The ftrace function trace is allowed only for root. */ | 33 | /* The ftrace function trace is allowed only for root. */ |
| 34 | if (ftrace_event_is_function(tp_event) && | 34 | if (ftrace_event_is_function(tp_event)) { |
| 35 | perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) | 35 | if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) |
| 36 | return -EPERM; | 36 | return -EPERM; |
| 37 | |||
| 38 | /* | ||
| 39 | * We don't allow user space callchains for function trace | ||
| 40 | * event, due to issues with page faults while tracing page | ||
| 41 | * fault handler and its overall trickiness nature. | ||
| 42 | */ | ||
| 43 | if (!p_event->attr.exclude_callchain_user) | ||
| 44 | return -EINVAL; | ||
| 45 | |||
| 46 | /* | ||
| 47 | * Same reason to disable user stack dump as for user space | ||
| 48 | * callchains above. | ||
| 49 | */ | ||
| 50 | if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER) | ||
| 51 | return -EINVAL; | ||
| 52 | } | ||
| 37 | 53 | ||
| 38 | /* No tracing, just counting, so no obvious leak */ | 54 | /* No tracing, just counting, so no obvious leak */ |
| 39 | if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) | 55 | if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4eccb5..3ddfd8f62c05 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -27,12 +27,6 @@ | |||
| 27 | 27 | ||
| 28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
| 29 | 29 | ||
| 30 | DEFINE_MUTEX(event_storage_mutex); | ||
| 31 | EXPORT_SYMBOL_GPL(event_storage_mutex); | ||
| 32 | |||
| 33 | char event_storage[EVENT_STORAGE_SIZE]; | ||
| 34 | EXPORT_SYMBOL_GPL(event_storage); | ||
| 35 | |||
| 36 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
| 37 | static LIST_HEAD(ftrace_common_fields); | 31 | static LIST_HEAD(ftrace_common_fields); |
| 38 | 32 | ||
| @@ -194,29 +188,60 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
| 194 | } | 188 | } |
| 195 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 189 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
| 196 | 190 | ||
| 191 | void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | ||
| 192 | struct ftrace_event_file *ftrace_file, | ||
| 193 | unsigned long len) | ||
| 194 | { | ||
| 195 | struct ftrace_event_call *event_call = ftrace_file->event_call; | ||
| 196 | |||
| 197 | local_save_flags(fbuffer->flags); | ||
| 198 | fbuffer->pc = preempt_count(); | ||
| 199 | fbuffer->ftrace_file = ftrace_file; | ||
| 200 | |||
| 201 | fbuffer->event = | ||
| 202 | trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, | ||
| 203 | event_call->event.type, len, | ||
| 204 | fbuffer->flags, fbuffer->pc); | ||
| 205 | if (!fbuffer->event) | ||
| 206 | return NULL; | ||
| 207 | |||
| 208 | fbuffer->entry = ring_buffer_event_data(fbuffer->event); | ||
| 209 | return fbuffer->entry; | ||
| 210 | } | ||
| 211 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); | ||
| 212 | |||
| 213 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) | ||
| 214 | { | ||
| 215 | event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, | ||
| 216 | fbuffer->event, fbuffer->entry, | ||
| 217 | fbuffer->flags, fbuffer->pc); | ||
| 218 | } | ||
| 219 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); | ||
| 220 | |||
| 197 | int ftrace_event_reg(struct ftrace_event_call *call, | 221 | int ftrace_event_reg(struct ftrace_event_call *call, |
| 198 | enum trace_reg type, void *data) | 222 | enum trace_reg type, void *data) |
| 199 | { | 223 | { |
| 200 | struct ftrace_event_file *file = data; | 224 | struct ftrace_event_file *file = data; |
| 201 | 225 | ||
| 226 | WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); | ||
| 202 | switch (type) { | 227 | switch (type) { |
| 203 | case TRACE_REG_REGISTER: | 228 | case TRACE_REG_REGISTER: |
| 204 | return tracepoint_probe_register(call->name, | 229 | return tracepoint_probe_register(call->tp, |
| 205 | call->class->probe, | 230 | call->class->probe, |
| 206 | file); | 231 | file); |
| 207 | case TRACE_REG_UNREGISTER: | 232 | case TRACE_REG_UNREGISTER: |
| 208 | tracepoint_probe_unregister(call->name, | 233 | tracepoint_probe_unregister(call->tp, |
| 209 | call->class->probe, | 234 | call->class->probe, |
| 210 | file); | 235 | file); |
| 211 | return 0; | 236 | return 0; |
| 212 | 237 | ||
| 213 | #ifdef CONFIG_PERF_EVENTS | 238 | #ifdef CONFIG_PERF_EVENTS |
| 214 | case TRACE_REG_PERF_REGISTER: | 239 | case TRACE_REG_PERF_REGISTER: |
| 215 | return tracepoint_probe_register(call->name, | 240 | return tracepoint_probe_register(call->tp, |
| 216 | call->class->perf_probe, | 241 | call->class->perf_probe, |
| 217 | call); | 242 | call); |
| 218 | case TRACE_REG_PERF_UNREGISTER: | 243 | case TRACE_REG_PERF_UNREGISTER: |
| 219 | tracepoint_probe_unregister(call->name, | 244 | tracepoint_probe_unregister(call->tp, |
| 220 | call->class->perf_probe, | 245 | call->class->perf_probe, |
| 221 | call); | 246 | call); |
| 222 | return 0; | 247 | return 0; |
| @@ -328,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
| 328 | if (ret) { | 353 | if (ret) { |
| 329 | tracing_stop_cmdline_record(); | 354 | tracing_stop_cmdline_record(); |
| 330 | pr_info("event trace: Could not enable event " | 355 | pr_info("event trace: Could not enable event " |
| 331 | "%s\n", call->name); | 356 | "%s\n", ftrace_event_name(call)); |
| 332 | break; | 357 | break; |
| 333 | } | 358 | } |
| 334 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); | 359 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); |
| @@ -457,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, | |||
| 457 | { | 482 | { |
| 458 | struct ftrace_event_file *file; | 483 | struct ftrace_event_file *file; |
| 459 | struct ftrace_event_call *call; | 484 | struct ftrace_event_call *call; |
| 485 | const char *name; | ||
| 460 | int ret = -EINVAL; | 486 | int ret = -EINVAL; |
| 461 | 487 | ||
| 462 | list_for_each_entry(file, &tr->events, list) { | 488 | list_for_each_entry(file, &tr->events, list) { |
| 463 | 489 | ||
| 464 | call = file->event_call; | 490 | call = file->event_call; |
| 491 | name = ftrace_event_name(call); | ||
| 465 | 492 | ||
| 466 | if (!call->name || !call->class || !call->class->reg) | 493 | if (!name || !call->class || !call->class->reg) |
| 467 | continue; | 494 | continue; |
| 468 | 495 | ||
| 469 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) | 496 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
| 470 | continue; | 497 | continue; |
| 471 | 498 | ||
| 472 | if (match && | 499 | if (match && |
| 473 | strcmp(match, call->name) != 0 && | 500 | strcmp(match, name) != 0 && |
| 474 | strcmp(match, call->class->system) != 0) | 501 | strcmp(match, call->class->system) != 0) |
| 475 | continue; | 502 | continue; |
| 476 | 503 | ||
| 477 | if (sub && strcmp(sub, call->class->system) != 0) | 504 | if (sub && strcmp(sub, call->class->system) != 0) |
| 478 | continue; | 505 | continue; |
| 479 | 506 | ||
| 480 | if (event && strcmp(event, call->name) != 0) | 507 | if (event && strcmp(event, name) != 0) |
| 481 | continue; | 508 | continue; |
| 482 | 509 | ||
| 483 | ftrace_event_enable_disable(file, set); | 510 | ftrace_event_enable_disable(file, set); |
| @@ -675,7 +702,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 675 | 702 | ||
| 676 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) | 703 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
| 677 | seq_printf(m, "%s:", call->class->system); | 704 | seq_printf(m, "%s:", call->class->system); |
| 678 | seq_printf(m, "%s\n", call->name); | 705 | seq_printf(m, "%s\n", ftrace_event_name(call)); |
| 679 | 706 | ||
| 680 | return 0; | 707 | return 0; |
| 681 | } | 708 | } |
| @@ -768,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
| 768 | mutex_lock(&event_mutex); | 795 | mutex_lock(&event_mutex); |
| 769 | list_for_each_entry(file, &tr->events, list) { | 796 | list_for_each_entry(file, &tr->events, list) { |
| 770 | call = file->event_call; | 797 | call = file->event_call; |
| 771 | if (!call->name || !call->class || !call->class->reg) | 798 | if (!ftrace_event_name(call) || !call->class || !call->class->reg) |
| 772 | continue; | 799 | continue; |
| 773 | 800 | ||
| 774 | if (system && strcmp(call->class->system, system->name) != 0) | 801 | if (system && strcmp(call->class->system, system->name) != 0) |
| @@ -883,7 +910,7 @@ static int f_show(struct seq_file *m, void *v) | |||
| 883 | 910 | ||
| 884 | switch ((unsigned long)v) { | 911 | switch ((unsigned long)v) { |
| 885 | case FORMAT_HEADER: | 912 | case FORMAT_HEADER: |
| 886 | seq_printf(m, "name: %s\n", call->name); | 913 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); |
| 887 | seq_printf(m, "ID: %d\n", call->event.type); | 914 | seq_printf(m, "ID: %d\n", call->event.type); |
| 888 | seq_printf(m, "format:\n"); | 915 | seq_printf(m, "format:\n"); |
| 889 | return 0; | 916 | return 0; |
| @@ -1503,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1503 | struct trace_array *tr = file->tr; | 1530 | struct trace_array *tr = file->tr; |
| 1504 | struct list_head *head; | 1531 | struct list_head *head; |
| 1505 | struct dentry *d_events; | 1532 | struct dentry *d_events; |
| 1533 | const char *name; | ||
| 1506 | int ret; | 1534 | int ret; |
| 1507 | 1535 | ||
| 1508 | /* | 1536 | /* |
| @@ -1516,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1516 | } else | 1544 | } else |
| 1517 | d_events = parent; | 1545 | d_events = parent; |
| 1518 | 1546 | ||
| 1519 | file->dir = debugfs_create_dir(call->name, d_events); | 1547 | name = ftrace_event_name(call); |
| 1548 | file->dir = debugfs_create_dir(name, d_events); | ||
| 1520 | if (!file->dir) { | 1549 | if (!file->dir) { |
| 1521 | pr_warning("Could not create debugfs '%s' directory\n", | 1550 | pr_warning("Could not create debugfs '%s' directory\n", |
| 1522 | call->name); | 1551 | name); |
| 1523 | return -1; | 1552 | return -1; |
| 1524 | } | 1553 | } |
| 1525 | 1554 | ||
| @@ -1543,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1543 | ret = call->class->define_fields(call); | 1572 | ret = call->class->define_fields(call); |
| 1544 | if (ret < 0) { | 1573 | if (ret < 0) { |
| 1545 | pr_warning("Could not initialize trace point" | 1574 | pr_warning("Could not initialize trace point" |
| 1546 | " events/%s\n", call->name); | 1575 | " events/%s\n", name); |
| 1547 | return -1; | 1576 | return -1; |
| 1548 | } | 1577 | } |
| 1549 | } | 1578 | } |
| @@ -1607,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call) | |||
| 1607 | static int event_init(struct ftrace_event_call *call) | 1636 | static int event_init(struct ftrace_event_call *call) |
| 1608 | { | 1637 | { |
| 1609 | int ret = 0; | 1638 | int ret = 0; |
| 1639 | const char *name; | ||
| 1610 | 1640 | ||
| 1611 | if (WARN_ON(!call->name)) | 1641 | name = ftrace_event_name(call); |
| 1642 | if (WARN_ON(!name)) | ||
| 1612 | return -EINVAL; | 1643 | return -EINVAL; |
| 1613 | 1644 | ||
| 1614 | if (call->class->raw_init) { | 1645 | if (call->class->raw_init) { |
| 1615 | ret = call->class->raw_init(call); | 1646 | ret = call->class->raw_init(call); |
| 1616 | if (ret < 0 && ret != -ENOSYS) | 1647 | if (ret < 0 && ret != -ENOSYS) |
| 1617 | pr_warn("Could not initialize trace events/%s\n", | 1648 | pr_warn("Could not initialize trace events/%s\n", |
| 1618 | call->name); | 1649 | name); |
| 1619 | } | 1650 | } |
| 1620 | 1651 | ||
| 1621 | return ret; | 1652 | return ret; |
| @@ -1777,6 +1808,16 @@ static void trace_module_add_events(struct module *mod) | |||
| 1777 | { | 1808 | { |
| 1778 | struct ftrace_event_call **call, **start, **end; | 1809 | struct ftrace_event_call **call, **start, **end; |
| 1779 | 1810 | ||
| 1811 | if (!mod->num_trace_events) | ||
| 1812 | return; | ||
| 1813 | |||
| 1814 | /* Don't add infrastructure for mods without tracepoints */ | ||
| 1815 | if (trace_module_has_bad_taint(mod)) { | ||
| 1816 | pr_err("%s: module has bad taint, not creating trace events\n", | ||
| 1817 | mod->name); | ||
| 1818 | return; | ||
| 1819 | } | ||
| 1820 | |||
| 1780 | start = mod->trace_events; | 1821 | start = mod->trace_events; |
| 1781 | end = mod->trace_events + mod->num_trace_events; | 1822 | end = mod->trace_events + mod->num_trace_events; |
| 1782 | 1823 | ||
| @@ -1851,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr) | |||
| 1851 | ret = __trace_add_new_event(call, tr); | 1892 | ret = __trace_add_new_event(call, tr); |
| 1852 | if (ret < 0) | 1893 | if (ret < 0) |
| 1853 | pr_warning("Could not create directory for event %s\n", | 1894 | pr_warning("Could not create directory for event %s\n", |
| 1854 | call->name); | 1895 | ftrace_event_name(call)); |
| 1855 | } | 1896 | } |
| 1856 | } | 1897 | } |
| 1857 | 1898 | ||
| @@ -1860,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system, const char *event) | |||
| 1860 | { | 1901 | { |
| 1861 | struct ftrace_event_file *file; | 1902 | struct ftrace_event_file *file; |
| 1862 | struct ftrace_event_call *call; | 1903 | struct ftrace_event_call *call; |
| 1904 | const char *name; | ||
| 1863 | 1905 | ||
| 1864 | list_for_each_entry(file, &tr->events, list) { | 1906 | list_for_each_entry(file, &tr->events, list) { |
| 1865 | 1907 | ||
| 1866 | call = file->event_call; | 1908 | call = file->event_call; |
| 1909 | name = ftrace_event_name(call); | ||
| 1867 | 1910 | ||
| 1868 | if (!call->name || !call->class || !call->class->reg) | 1911 | if (!name || !call->class || !call->class->reg) |
| 1869 | continue; | 1912 | continue; |
| 1870 | 1913 | ||
| 1871 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) | 1914 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
| 1872 | continue; | 1915 | continue; |
| 1873 | 1916 | ||
| 1874 | if (strcmp(event, call->name) == 0 && | 1917 | if (strcmp(event, name) == 0 && |
| 1875 | strcmp(system, call->class->system) == 0) | 1918 | strcmp(system, call->class->system) == 0) |
| 1876 | return file; | 1919 | return file; |
| 1877 | } | 1920 | } |
| @@ -1939,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, | |||
| 1939 | seq_printf(m, "%s:%s:%s", | 1982 | seq_printf(m, "%s:%s:%s", |
| 1940 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1983 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
| 1941 | data->file->event_call->class->system, | 1984 | data->file->event_call->class->system, |
| 1942 | data->file->event_call->name); | 1985 | ftrace_event_name(data->file->event_call)); |
| 1943 | 1986 | ||
| 1944 | if (data->count == -1) | 1987 | if (data->count == -1) |
| 1945 | seq_printf(m, ":unlimited\n"); | 1988 | seq_printf(m, ":unlimited\n"); |
| @@ -2159,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) | |||
| 2159 | ret = event_create_dir(tr->event_dir, file); | 2202 | ret = event_create_dir(tr->event_dir, file); |
| 2160 | if (ret < 0) | 2203 | if (ret < 0) |
| 2161 | pr_warning("Could not create directory for event %s\n", | 2204 | pr_warning("Could not create directory for event %s\n", |
| 2162 | file->event_call->name); | 2205 | ftrace_event_name(file->event_call)); |
| 2163 | } | 2206 | } |
| 2164 | } | 2207 | } |
| 2165 | 2208 | ||
| @@ -2183,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr) | |||
| 2183 | ret = __trace_early_add_new_event(call, tr); | 2226 | ret = __trace_early_add_new_event(call, tr); |
| 2184 | if (ret < 0) | 2227 | if (ret < 0) |
| 2185 | pr_warning("Could not create early event %s\n", | 2228 | pr_warning("Could not create early event %s\n", |
| 2186 | call->name); | 2229 | ftrace_event_name(call)); |
| 2187 | } | 2230 | } |
| 2188 | } | 2231 | } |
| 2189 | 2232 | ||
| @@ -2515,7 +2558,7 @@ static __init void event_trace_self_tests(void) | |||
| 2515 | continue; | 2558 | continue; |
| 2516 | #endif | 2559 | #endif |
| 2517 | 2560 | ||
| 2518 | pr_info("Testing event %s: ", call->name); | 2561 | pr_info("Testing event %s: ", ftrace_event_name(call)); |
| 2519 | 2562 | ||
| 2520 | /* | 2563 | /* |
| 2521 | * If an event is already enabled, someone is using | 2564 | * If an event is already enabled, someone is using |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 8efbb69b04f0..925f537f07d1 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | |||
| 1095 | seq_printf(m, "%s:%s:%s", | 1095 | seq_printf(m, "%s:%s:%s", |
| 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
| 1097 | enable_data->file->event_call->class->system, | 1097 | enable_data->file->event_call->class->system, |
| 1098 | enable_data->file->event_call->name); | 1098 | ftrace_event_name(enable_data->file->event_call)); |
| 1099 | 1099 | ||
| 1100 | if (data->count == -1) | 1100 | if (data->count == -1) |
| 1101 | seq_puts(m, ":unlimited"); | 1101 | seq_puts(m, ":unlimited"); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 7c3e3e72e2b6..d4ddde28a81a 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \ | |||
| 95 | #undef __array | 95 | #undef __array |
| 96 | #define __array(type, item, len) \ | 96 | #define __array(type, item, len) \ |
| 97 | do { \ | 97 | do { \ |
| 98 | char *type_str = #type"["__stringify(len)"]"; \ | ||
| 98 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 99 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
| 99 | mutex_lock(&event_storage_mutex); \ | 100 | ret = trace_define_field(event_call, type_str, #item, \ |
| 100 | snprintf(event_storage, sizeof(event_storage), \ | ||
| 101 | "%s[%d]", #type, len); \ | ||
| 102 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
| 103 | offsetof(typeof(field), item), \ | 101 | offsetof(typeof(field), item), \ |
| 104 | sizeof(field.item), \ | 102 | sizeof(field.item), \ |
| 105 | is_signed_type(type), filter_type); \ | 103 | is_signed_type(type), filter_type); \ |
| 106 | mutex_unlock(&event_storage_mutex); \ | ||
| 107 | if (ret) \ | 104 | if (ret) \ |
| 108 | return ret; \ | 105 | return ret; \ |
| 109 | } while (0); | 106 | } while (0); |
| @@ -176,9 +173,11 @@ struct ftrace_event_class __refdata event_class_ftrace_##call = { \ | |||
| 176 | }; \ | 173 | }; \ |
| 177 | \ | 174 | \ |
| 178 | struct ftrace_event_call __used event_##call = { \ | 175 | struct ftrace_event_call __used event_##call = { \ |
| 179 | .name = #call, \ | ||
| 180 | .event.type = etype, \ | ||
| 181 | .class = &event_class_ftrace_##call, \ | 176 | .class = &event_class_ftrace_##call, \ |
| 177 | { \ | ||
| 178 | .name = #call, \ | ||
| 179 | }, \ | ||
| 180 | .event.type = etype, \ | ||
| 182 | .print_fmt = print, \ | 181 | .print_fmt = print, \ |
| 183 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ | 182 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ |
| 184 | }; \ | 183 | }; \ |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 38fe1483c508..5b781d2be383 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -13,32 +13,106 @@ | |||
| 13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
| 14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
| 16 | #include <linux/slab.h> | ||
| 16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 17 | 18 | ||
| 18 | #include "trace.h" | 19 | #include "trace.h" |
| 19 | 20 | ||
| 20 | /* function tracing enabled */ | 21 | static void tracing_start_function_trace(struct trace_array *tr); |
| 21 | static int ftrace_function_enabled; | 22 | static void tracing_stop_function_trace(struct trace_array *tr); |
| 23 | static void | ||
| 24 | function_trace_call(unsigned long ip, unsigned long parent_ip, | ||
| 25 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
| 26 | static void | ||
| 27 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | ||
| 28 | struct ftrace_ops *op, struct pt_regs *pt_regs); | ||
| 29 | static struct ftrace_ops trace_ops; | ||
| 30 | static struct ftrace_ops trace_stack_ops; | ||
| 31 | static struct tracer_flags func_flags; | ||
| 32 | |||
| 33 | /* Our option */ | ||
| 34 | enum { | ||
| 35 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 36 | }; | ||
| 37 | |||
| 38 | static int allocate_ftrace_ops(struct trace_array *tr) | ||
| 39 | { | ||
| 40 | struct ftrace_ops *ops; | ||
| 41 | |||
| 42 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
| 43 | if (!ops) | ||
| 44 | return -ENOMEM; | ||
| 22 | 45 | ||
| 23 | static struct trace_array *func_trace; | 46 | /* Currently only the non stack verision is supported */ |
| 47 | ops->func = function_trace_call; | ||
| 48 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; | ||
| 49 | |||
| 50 | tr->ops = ops; | ||
| 51 | ops->private = tr; | ||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | |||
| 56 | int ftrace_create_function_files(struct trace_array *tr, | ||
| 57 | struct dentry *parent) | ||
| 58 | { | ||
| 59 | int ret; | ||
| 60 | |||
| 61 | /* The top level array uses the "global_ops". */ | ||
| 62 | if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { | ||
| 63 | ret = allocate_ftrace_ops(tr); | ||
| 64 | if (ret) | ||
| 65 | return ret; | ||
| 66 | } | ||
| 67 | |||
| 68 | ftrace_create_filter_files(tr->ops, parent); | ||
| 69 | |||
| 70 | return 0; | ||
| 71 | } | ||
| 24 | 72 | ||
| 25 | static void tracing_start_function_trace(void); | 73 | void ftrace_destroy_function_files(struct trace_array *tr) |
| 26 | static void tracing_stop_function_trace(void); | 74 | { |
| 75 | ftrace_destroy_filter_files(tr->ops); | ||
| 76 | kfree(tr->ops); | ||
| 77 | tr->ops = NULL; | ||
| 78 | } | ||
| 27 | 79 | ||
| 28 | static int function_trace_init(struct trace_array *tr) | 80 | static int function_trace_init(struct trace_array *tr) |
| 29 | { | 81 | { |
| 30 | func_trace = tr; | 82 | struct ftrace_ops *ops; |
| 83 | |||
| 84 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | ||
| 85 | /* There's only one global tr */ | ||
| 86 | if (!trace_ops.private) { | ||
| 87 | trace_ops.private = tr; | ||
| 88 | trace_stack_ops.private = tr; | ||
| 89 | } | ||
| 90 | |||
| 91 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 92 | ops = &trace_stack_ops; | ||
| 93 | else | ||
| 94 | ops = &trace_ops; | ||
| 95 | tr->ops = ops; | ||
| 96 | } else if (!tr->ops) { | ||
| 97 | /* | ||
| 98 | * Instance trace_arrays get their ops allocated | ||
| 99 | * at instance creation. Unless it failed | ||
| 100 | * the allocation. | ||
| 101 | */ | ||
| 102 | return -ENOMEM; | ||
| 103 | } | ||
| 104 | |||
| 31 | tr->trace_buffer.cpu = get_cpu(); | 105 | tr->trace_buffer.cpu = get_cpu(); |
| 32 | put_cpu(); | 106 | put_cpu(); |
| 33 | 107 | ||
| 34 | tracing_start_cmdline_record(); | 108 | tracing_start_cmdline_record(); |
| 35 | tracing_start_function_trace(); | 109 | tracing_start_function_trace(tr); |
| 36 | return 0; | 110 | return 0; |
| 37 | } | 111 | } |
| 38 | 112 | ||
| 39 | static void function_trace_reset(struct trace_array *tr) | 113 | static void function_trace_reset(struct trace_array *tr) |
| 40 | { | 114 | { |
| 41 | tracing_stop_function_trace(); | 115 | tracing_stop_function_trace(tr); |
| 42 | tracing_stop_cmdline_record(); | 116 | tracing_stop_cmdline_record(); |
| 43 | } | 117 | } |
| 44 | 118 | ||
| @@ -47,25 +121,18 @@ static void function_trace_start(struct trace_array *tr) | |||
| 47 | tracing_reset_online_cpus(&tr->trace_buffer); | 121 | tracing_reset_online_cpus(&tr->trace_buffer); |
| 48 | } | 122 | } |
| 49 | 123 | ||
| 50 | /* Our option */ | ||
| 51 | enum { | ||
| 52 | TRACE_FUNC_OPT_STACK = 0x1, | ||
| 53 | }; | ||
| 54 | |||
| 55 | static struct tracer_flags func_flags; | ||
| 56 | |||
| 57 | static void | 124 | static void |
| 58 | function_trace_call(unsigned long ip, unsigned long parent_ip, | 125 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
| 59 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 126 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 60 | { | 127 | { |
| 61 | struct trace_array *tr = func_trace; | 128 | struct trace_array *tr = op->private; |
| 62 | struct trace_array_cpu *data; | 129 | struct trace_array_cpu *data; |
| 63 | unsigned long flags; | 130 | unsigned long flags; |
| 64 | int bit; | 131 | int bit; |
| 65 | int cpu; | 132 | int cpu; |
| 66 | int pc; | 133 | int pc; |
| 67 | 134 | ||
| 68 | if (unlikely(!ftrace_function_enabled)) | 135 | if (unlikely(!tr->function_enabled)) |
| 69 | return; | 136 | return; |
| 70 | 137 | ||
| 71 | pc = preempt_count(); | 138 | pc = preempt_count(); |
| @@ -91,14 +158,14 @@ static void | |||
| 91 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | 158 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
| 92 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 159 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 93 | { | 160 | { |
| 94 | struct trace_array *tr = func_trace; | 161 | struct trace_array *tr = op->private; |
| 95 | struct trace_array_cpu *data; | 162 | struct trace_array_cpu *data; |
| 96 | unsigned long flags; | 163 | unsigned long flags; |
| 97 | long disabled; | 164 | long disabled; |
| 98 | int cpu; | 165 | int cpu; |
| 99 | int pc; | 166 | int pc; |
| 100 | 167 | ||
| 101 | if (unlikely(!ftrace_function_enabled)) | 168 | if (unlikely(!tr->function_enabled)) |
| 102 | return; | 169 | return; |
| 103 | 170 | ||
| 104 | /* | 171 | /* |
| @@ -128,7 +195,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
| 128 | local_irq_restore(flags); | 195 | local_irq_restore(flags); |
| 129 | } | 196 | } |
| 130 | 197 | ||
| 131 | |||
| 132 | static struct ftrace_ops trace_ops __read_mostly = | 198 | static struct ftrace_ops trace_ops __read_mostly = |
| 133 | { | 199 | { |
| 134 | .func = function_trace_call, | 200 | .func = function_trace_call, |
| @@ -153,29 +219,21 @@ static struct tracer_flags func_flags = { | |||
| 153 | .opts = func_opts | 219 | .opts = func_opts |
| 154 | }; | 220 | }; |
| 155 | 221 | ||
| 156 | static void tracing_start_function_trace(void) | 222 | static void tracing_start_function_trace(struct trace_array *tr) |
| 157 | { | 223 | { |
| 158 | ftrace_function_enabled = 0; | 224 | tr->function_enabled = 0; |
| 159 | 225 | register_ftrace_function(tr->ops); | |
| 160 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | 226 | tr->function_enabled = 1; |
| 161 | register_ftrace_function(&trace_stack_ops); | ||
| 162 | else | ||
| 163 | register_ftrace_function(&trace_ops); | ||
| 164 | |||
| 165 | ftrace_function_enabled = 1; | ||
| 166 | } | 227 | } |
| 167 | 228 | ||
| 168 | static void tracing_stop_function_trace(void) | 229 | static void tracing_stop_function_trace(struct trace_array *tr) |
| 169 | { | 230 | { |
| 170 | ftrace_function_enabled = 0; | 231 | tr->function_enabled = 0; |
| 171 | 232 | unregister_ftrace_function(tr->ops); | |
| 172 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
| 173 | unregister_ftrace_function(&trace_stack_ops); | ||
| 174 | else | ||
| 175 | unregister_ftrace_function(&trace_ops); | ||
| 176 | } | 233 | } |
| 177 | 234 | ||
| 178 | static int func_set_flag(u32 old_flags, u32 bit, int set) | 235 | static int |
| 236 | func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 179 | { | 237 | { |
| 180 | switch (bit) { | 238 | switch (bit) { |
| 181 | case TRACE_FUNC_OPT_STACK: | 239 | case TRACE_FUNC_OPT_STACK: |
| @@ -183,12 +241,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) | |||
| 183 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | 241 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) |
| 184 | break; | 242 | break; |
| 185 | 243 | ||
| 244 | unregister_ftrace_function(tr->ops); | ||
| 245 | |||
| 186 | if (set) { | 246 | if (set) { |
| 187 | unregister_ftrace_function(&trace_ops); | 247 | tr->ops = &trace_stack_ops; |
| 188 | register_ftrace_function(&trace_stack_ops); | 248 | register_ftrace_function(tr->ops); |
| 189 | } else { | 249 | } else { |
| 190 | unregister_ftrace_function(&trace_stack_ops); | 250 | tr->ops = &trace_ops; |
| 191 | register_ftrace_function(&trace_ops); | 251 | register_ftrace_function(tr->ops); |
| 192 | } | 252 | } |
| 193 | 253 | ||
| 194 | break; | 254 | break; |
| @@ -208,6 +268,7 @@ static struct tracer function_trace __tracer_data = | |||
| 208 | .wait_pipe = poll_wait_pipe, | 268 | .wait_pipe = poll_wait_pipe, |
| 209 | .flags = &func_flags, | 269 | .flags = &func_flags, |
| 210 | .set_flag = func_set_flag, | 270 | .set_flag = func_set_flag, |
| 271 | .allow_instances = true, | ||
| 211 | #ifdef CONFIG_FTRACE_SELFTEST | 272 | #ifdef CONFIG_FTRACE_SELFTEST |
| 212 | .selftest = trace_selftest_startup_function, | 273 | .selftest = trace_selftest_startup_function, |
| 213 | #endif | 274 | #endif |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 0b99120d395c..deff11200261 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -1476,7 +1476,8 @@ void graph_trace_close(struct trace_iterator *iter) | |||
| 1476 | } | 1476 | } |
| 1477 | } | 1477 | } |
| 1478 | 1478 | ||
| 1479 | static int func_graph_set_flag(u32 old_flags, u32 bit, int set) | 1479 | static int |
| 1480 | func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 1480 | { | 1481 | { |
| 1481 | if (bit == TRACE_GRAPH_PRINT_IRQS) | 1482 | if (bit == TRACE_GRAPH_PRINT_IRQS) |
| 1482 | ftrace_graph_skip_irqs = !set; | 1483 | ftrace_graph_skip_irqs = !set; |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2aefbee93a6d..8ff02cbb892f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -160,7 +160,8 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
| 160 | #endif /* CONFIG_FUNCTION_TRACER */ | 160 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 161 | 161 | ||
| 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 163 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 163 | static int |
| 164 | irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 164 | { | 165 | { |
| 165 | int cpu; | 166 | int cpu; |
| 166 | 167 | ||
| @@ -266,7 +267,8 @@ __trace_function(struct trace_array *tr, | |||
| 266 | #else | 267 | #else |
| 267 | #define __trace_function trace_function | 268 | #define __trace_function trace_function |
| 268 | 269 | ||
| 269 | static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) | 270 | static int |
| 271 | irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 270 | { | 272 | { |
| 271 | return -EINVAL; | 273 | return -EINVAL; |
| 272 | } | 274 | } |
| @@ -498,14 +500,14 @@ void trace_hardirqs_off(void) | |||
| 498 | } | 500 | } |
| 499 | EXPORT_SYMBOL(trace_hardirqs_off); | 501 | EXPORT_SYMBOL(trace_hardirqs_off); |
| 500 | 502 | ||
| 501 | void trace_hardirqs_on_caller(unsigned long caller_addr) | 503 | __visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
| 502 | { | 504 | { |
| 503 | if (!preempt_trace() && irq_trace()) | 505 | if (!preempt_trace() && irq_trace()) |
| 504 | stop_critical_timing(CALLER_ADDR0, caller_addr); | 506 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
| 505 | } | 507 | } |
| 506 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 508 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
| 507 | 509 | ||
| 508 | void trace_hardirqs_off_caller(unsigned long caller_addr) | 510 | __visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
| 509 | { | 511 | { |
| 510 | if (!preempt_trace() && irq_trace()) | 512 | if (!preempt_trace() && irq_trace()) |
| 511 | start_critical_timing(CALLER_ADDR0, caller_addr); | 513 | start_critical_timing(CALLER_ADDR0, caller_addr); |
| @@ -570,8 +572,10 @@ static void irqsoff_function_set(int set) | |||
| 570 | unregister_irqsoff_function(is_graph()); | 572 | unregister_irqsoff_function(is_graph()); |
| 571 | } | 573 | } |
| 572 | 574 | ||
| 573 | static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set) | 575 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
| 574 | { | 576 | { |
| 577 | struct tracer *tracer = tr->current_trace; | ||
| 578 | |||
| 575 | if (mask & TRACE_ITER_FUNCTION) | 579 | if (mask & TRACE_ITER_FUNCTION) |
| 576 | irqsoff_function_set(set); | 580 | irqsoff_function_set(set); |
| 577 | 581 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index bdbae450c13e..903ae28962be 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -35,11 +35,6 @@ struct trace_kprobe { | |||
| 35 | struct trace_probe tp; | 35 | struct trace_probe tp; |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | struct event_file_link { | ||
| 39 | struct ftrace_event_file *file; | ||
| 40 | struct list_head list; | ||
| 41 | }; | ||
| 42 | |||
| 43 | #define SIZEOF_TRACE_KPROBE(n) \ | 38 | #define SIZEOF_TRACE_KPROBE(n) \ |
| 44 | (offsetof(struct trace_kprobe, tp.args) + \ | 39 | (offsetof(struct trace_kprobe, tp.args) + \ |
| 45 | (sizeof(struct probe_arg) * (n))) | 40 | (sizeof(struct probe_arg) * (n))) |
| @@ -346,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, | |||
| 346 | struct trace_kprobe *tk; | 341 | struct trace_kprobe *tk; |
| 347 | 342 | ||
| 348 | list_for_each_entry(tk, &probe_list, list) | 343 | list_for_each_entry(tk, &probe_list, list) |
| 349 | if (strcmp(tk->tp.call.name, event) == 0 && | 344 | if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 && |
| 350 | strcmp(tk->tp.call.class->system, group) == 0) | 345 | strcmp(tk->tp.call.class->system, group) == 0) |
| 351 | return tk; | 346 | return tk; |
| 352 | return NULL; | 347 | return NULL; |
| @@ -387,18 +382,6 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) | |||
| 387 | return ret; | 382 | return ret; |
| 388 | } | 383 | } |
| 389 | 384 | ||
| 390 | static struct event_file_link * | ||
| 391 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | ||
| 392 | { | ||
| 393 | struct event_file_link *link; | ||
| 394 | |||
| 395 | list_for_each_entry(link, &tp->files, list) | ||
| 396 | if (link->file == file) | ||
| 397 | return link; | ||
| 398 | |||
| 399 | return NULL; | ||
| 400 | } | ||
| 401 | |||
| 402 | /* | 385 | /* |
| 403 | * Disable trace_probe | 386 | * Disable trace_probe |
| 404 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. | 387 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
| @@ -533,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk) | |||
| 533 | mutex_lock(&probe_lock); | 516 | mutex_lock(&probe_lock); |
| 534 | 517 | ||
| 535 | /* Delete old (same name) event if exist */ | 518 | /* Delete old (same name) event if exist */ |
| 536 | old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); | 519 | old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call), |
| 520 | tk->tp.call.class->system); | ||
| 537 | if (old_tk) { | 521 | if (old_tk) { |
| 538 | ret = unregister_trace_kprobe(old_tk); | 522 | ret = unregister_trace_kprobe(old_tk); |
| 539 | if (ret < 0) | 523 | if (ret < 0) |
| @@ -581,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, | |||
| 581 | if (ret) | 565 | if (ret) |
| 582 | pr_warning("Failed to re-register probe %s on" | 566 | pr_warning("Failed to re-register probe %s on" |
| 583 | "%s: %d\n", | 567 | "%s: %d\n", |
| 584 | tk->tp.call.name, mod->name, ret); | 568 | ftrace_event_name(&tk->tp.call), |
| 569 | mod->name, ret); | ||
| 585 | } | 570 | } |
| 586 | } | 571 | } |
| 587 | mutex_unlock(&probe_lock); | 572 | mutex_unlock(&probe_lock); |
| @@ -835,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 835 | int i; | 820 | int i; |
| 836 | 821 | ||
| 837 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); | 822 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); |
| 838 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); | 823 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, |
| 824 | ftrace_event_name(&tk->tp.call)); | ||
| 839 | 825 | ||
| 840 | if (!tk->symbol) | 826 | if (!tk->symbol) |
| 841 | seq_printf(m, " 0x%p", tk->rp.kp.addr); | 827 | seq_printf(m, " 0x%p", tk->rp.kp.addr); |
| @@ -893,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
| 893 | { | 879 | { |
| 894 | struct trace_kprobe *tk = v; | 880 | struct trace_kprobe *tk = v; |
| 895 | 881 | ||
| 896 | seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, | 882 | seq_printf(m, " %-44s %15lu %15lu\n", |
| 883 | ftrace_event_name(&tk->tp.call), tk->nhit, | ||
| 897 | tk->rp.kp.nmissed); | 884 | tk->rp.kp.nmissed); |
| 898 | 885 | ||
| 899 | return 0; | 886 | return 0; |
| @@ -1028,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
| 1028 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1015 | field = (struct kprobe_trace_entry_head *)iter->ent; |
| 1029 | tp = container_of(event, struct trace_probe, call.event); | 1016 | tp = container_of(event, struct trace_probe, call.event); |
| 1030 | 1017 | ||
| 1031 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1018 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) |
| 1032 | goto partial; | 1019 | goto partial; |
| 1033 | 1020 | ||
| 1034 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 1021 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) |
| @@ -1064,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
| 1064 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1051 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
| 1065 | tp = container_of(event, struct trace_probe, call.event); | 1052 | tp = container_of(event, struct trace_probe, call.event); |
| 1066 | 1053 | ||
| 1067 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1054 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) |
| 1068 | goto partial; | 1055 | goto partial; |
| 1069 | 1056 | ||
| 1070 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | 1057 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) |
| @@ -1303,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk) | |||
| 1303 | call->data = tk; | 1290 | call->data = tk; |
| 1304 | ret = trace_add_event_call(call); | 1291 | ret = trace_add_event_call(call); |
| 1305 | if (ret) { | 1292 | if (ret) { |
| 1306 | pr_info("Failed to register kprobe event: %s\n", call->name); | 1293 | pr_info("Failed to register kprobe event: %s\n", |
| 1294 | ftrace_event_name(call)); | ||
| 1307 | kfree(call->print_fmt); | 1295 | kfree(call->print_fmt); |
| 1308 | unregister_ftrace_event(&call->event); | 1296 | unregister_ftrace_event(&call->event); |
| 1309 | } | 1297 | } |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 394f94417e2f..69a5cc94c01a 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
| @@ -62,7 +62,7 @@ static void nop_trace_reset(struct trace_array *tr) | |||
| 62 | * If you don't implement it, then the flag setting will be | 62 | * If you don't implement it, then the flag setting will be |
| 63 | * automatically accepted. | 63 | * automatically accepted. |
| 64 | */ | 64 | */ |
| 65 | static int nop_set_flag(u32 old_flags, u32 bit, int set) | 65 | static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) |
| 66 | { | 66 | { |
| 67 | /* | 67 | /* |
| 68 | * Note that you don't need to update nop_flags.val yourself. | 68 | * Note that you don't need to update nop_flags.val yourself. |
| @@ -96,6 +96,7 @@ struct tracer nop_trace __read_mostly = | |||
| 96 | .selftest = trace_selftest_startup_nop, | 96 | .selftest = trace_selftest_startup_nop, |
| 97 | #endif | 97 | #endif |
| 98 | .flags = &nop_flags, | 98 | .flags = &nop_flags, |
| 99 | .set_flag = nop_set_flag | 99 | .set_flag = nop_set_flag, |
| 100 | .allow_instances = true, | ||
| 100 | }; | 101 | }; |
| 101 | 102 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ed32284fbe32..a436de18aa99 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
| 431 | } | 431 | } |
| 432 | 432 | ||
| 433 | trace_seq_init(p); | 433 | trace_seq_init(p); |
| 434 | ret = trace_seq_printf(s, "%s: ", event->name); | 434 | ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); |
| 435 | if (!ret) | 435 | if (!ret) |
| 436 | return TRACE_TYPE_PARTIAL_LINE; | 436 | return TRACE_TYPE_PARTIAL_LINE; |
| 437 | 437 | ||
| @@ -439,6 +439,37 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
| 439 | } | 439 | } |
| 440 | EXPORT_SYMBOL(ftrace_raw_output_prep); | 440 | EXPORT_SYMBOL(ftrace_raw_output_prep); |
| 441 | 441 | ||
| 442 | static int ftrace_output_raw(struct trace_iterator *iter, char *name, | ||
| 443 | char *fmt, va_list ap) | ||
| 444 | { | ||
| 445 | struct trace_seq *s = &iter->seq; | ||
| 446 | int ret; | ||
| 447 | |||
| 448 | ret = trace_seq_printf(s, "%s: ", name); | ||
| 449 | if (!ret) | ||
| 450 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 451 | |||
| 452 | ret = trace_seq_vprintf(s, fmt, ap); | ||
| 453 | |||
| 454 | if (!ret) | ||
| 455 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 456 | |||
| 457 | return TRACE_TYPE_HANDLED; | ||
| 458 | } | ||
| 459 | |||
| 460 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) | ||
| 461 | { | ||
| 462 | va_list ap; | ||
| 463 | int ret; | ||
| 464 | |||
| 465 | va_start(ap, fmt); | ||
| 466 | ret = ftrace_output_raw(iter, name, fmt, ap); | ||
| 467 | va_end(ap); | ||
| 468 | |||
| 469 | return ret; | ||
| 470 | } | ||
| 471 | EXPORT_SYMBOL_GPL(ftrace_output_call); | ||
| 472 | |||
| 442 | #ifdef CONFIG_KRETPROBES | 473 | #ifdef CONFIG_KRETPROBES |
| 443 | static inline const char *kretprobed(const char *name) | 474 | static inline const char *kretprobed(const char *name) |
| 444 | { | 475 | { |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index b73574a5f429..fb1ab5dfbd42 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
| @@ -288,6 +288,11 @@ struct trace_probe { | |||
| 288 | struct probe_arg args[]; | 288 | struct probe_arg args[]; |
| 289 | }; | 289 | }; |
| 290 | 290 | ||
| 291 | struct event_file_link { | ||
| 292 | struct ftrace_event_file *file; | ||
| 293 | struct list_head list; | ||
| 294 | }; | ||
| 295 | |||
| 291 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) | 296 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) |
| 292 | { | 297 | { |
| 293 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | 298 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); |
| @@ -316,6 +321,18 @@ static inline int is_good_name(const char *name) | |||
| 316 | return 1; | 321 | return 1; |
| 317 | } | 322 | } |
| 318 | 323 | ||
| 324 | static inline struct event_file_link * | ||
| 325 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | ||
| 326 | { | ||
| 327 | struct event_file_link *link; | ||
| 328 | |||
| 329 | list_for_each_entry(link, &tp->files, list) | ||
| 330 | if (link->file == file) | ||
| 331 | return link; | ||
| 332 | |||
| 333 | return NULL; | ||
| 334 | } | ||
| 335 | |||
| 319 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, | 336 | extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, |
| 320 | struct probe_arg *parg, bool is_return, bool is_kprobe); | 337 | struct probe_arg *parg, bool is_return, bool is_kprobe); |
| 321 | 338 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 6e32635e5e57..e14da5e97a69 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -179,8 +179,10 @@ static void wakeup_function_set(int set) | |||
| 179 | unregister_wakeup_function(is_graph()); | 179 | unregister_wakeup_function(is_graph()); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) | 182 | static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) |
| 183 | { | 183 | { |
| 184 | struct tracer *tracer = tr->current_trace; | ||
| 185 | |||
| 184 | if (mask & TRACE_ITER_FUNCTION) | 186 | if (mask & TRACE_ITER_FUNCTION) |
| 185 | wakeup_function_set(set); | 187 | wakeup_function_set(set); |
| 186 | 188 | ||
| @@ -209,7 +211,8 @@ static void stop_func_tracer(int graph) | |||
| 209 | } | 211 | } |
| 210 | 212 | ||
| 211 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 213 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 212 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | 214 | static int |
| 215 | wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 213 | { | 216 | { |
| 214 | 217 | ||
| 215 | if (!(bit & TRACE_DISPLAY_GRAPH)) | 218 | if (!(bit & TRACE_DISPLAY_GRAPH)) |
| @@ -311,7 +314,8 @@ __trace_function(struct trace_array *tr, | |||
| 311 | #else | 314 | #else |
| 312 | #define __trace_function trace_function | 315 | #define __trace_function trace_function |
| 313 | 316 | ||
| 314 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | 317 | static int |
| 318 | wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | ||
| 315 | { | 319 | { |
| 316 | return -EINVAL; | 320 | return -EINVAL; |
| 317 | } | 321 | } |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index e6be585cf06a..21b320e5d163 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 16 | #include <linux/magic.h> | ||
| 16 | 17 | ||
| 17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
| 18 | 19 | ||
| @@ -144,6 +145,8 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
| 144 | i++; | 145 | i++; |
| 145 | } | 146 | } |
| 146 | 147 | ||
| 148 | BUG_ON(current != &init_task && | ||
| 149 | *(end_of_stack(current)) != STACK_END_MAGIC); | ||
| 147 | out: | 150 | out: |
| 148 | arch_spin_unlock(&max_stack_lock); | 151 | arch_spin_unlock(&max_stack_lock); |
| 149 | local_irq_restore(flags); | 152 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 79e52d93860b..930e51462dc8 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -260,6 +260,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
| 260 | goto error; | 260 | goto error; |
| 261 | 261 | ||
| 262 | INIT_LIST_HEAD(&tu->list); | 262 | INIT_LIST_HEAD(&tu->list); |
| 263 | INIT_LIST_HEAD(&tu->tp.files); | ||
| 263 | tu->consumer.handler = uprobe_dispatcher; | 264 | tu->consumer.handler = uprobe_dispatcher; |
| 264 | if (is_ret) | 265 | if (is_ret) |
| 265 | tu->consumer.ret_handler = uretprobe_dispatcher; | 266 | tu->consumer.ret_handler = uretprobe_dispatcher; |
| @@ -293,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
| 293 | struct trace_uprobe *tu; | 294 | struct trace_uprobe *tu; |
| 294 | 295 | ||
| 295 | list_for_each_entry(tu, &uprobe_list, list) | 296 | list_for_each_entry(tu, &uprobe_list, list) |
| 296 | if (strcmp(tu->tp.call.name, event) == 0 && | 297 | if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && |
| 297 | strcmp(tu->tp.call.class->system, group) == 0) | 298 | strcmp(tu->tp.call.class->system, group) == 0) |
| 298 | return tu; | 299 | return tu; |
| 299 | 300 | ||
| @@ -323,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu) | |||
| 323 | mutex_lock(&uprobe_lock); | 324 | mutex_lock(&uprobe_lock); |
| 324 | 325 | ||
| 325 | /* register as an event */ | 326 | /* register as an event */ |
| 326 | old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); | 327 | old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), |
| 328 | tu->tp.call.class->system); | ||
| 327 | if (old_tu) { | 329 | if (old_tu) { |
| 328 | /* delete old event */ | 330 | /* delete old event */ |
| 329 | ret = unregister_trace_uprobe(old_tu); | 331 | ret = unregister_trace_uprobe(old_tu); |
| @@ -598,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 598 | char c = is_ret_probe(tu) ? 'r' : 'p'; | 600 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
| 599 | int i; | 601 | int i; |
| 600 | 602 | ||
| 601 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); | 603 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, |
| 604 | ftrace_event_name(&tu->tp.call)); | ||
| 602 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 605 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
| 603 | 606 | ||
| 604 | for (i = 0; i < tu->tp.nr_args; i++) | 607 | for (i = 0; i < tu->tp.nr_args; i++) |
| @@ -648,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
| 648 | { | 651 | { |
| 649 | struct trace_uprobe *tu = v; | 652 | struct trace_uprobe *tu = v; |
| 650 | 653 | ||
| 651 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); | 654 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
| 655 | ftrace_event_name(&tu->tp.call), tu->nhit); | ||
| 652 | return 0; | 656 | return 0; |
| 653 | } | 657 | } |
| 654 | 658 | ||
| @@ -758,31 +762,32 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | |||
| 758 | mutex_unlock(&ucb->mutex); | 762 | mutex_unlock(&ucb->mutex); |
| 759 | } | 763 | } |
| 760 | 764 | ||
| 761 | static void uprobe_trace_print(struct trace_uprobe *tu, | 765 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
| 762 | unsigned long func, struct pt_regs *regs) | 766 | unsigned long func, struct pt_regs *regs, |
| 767 | struct uprobe_cpu_buffer *ucb, int dsize, | ||
| 768 | struct ftrace_event_file *ftrace_file) | ||
| 763 | { | 769 | { |
| 764 | struct uprobe_trace_entry_head *entry; | 770 | struct uprobe_trace_entry_head *entry; |
| 765 | struct ring_buffer_event *event; | 771 | struct ring_buffer_event *event; |
| 766 | struct ring_buffer *buffer; | 772 | struct ring_buffer *buffer; |
| 767 | struct uprobe_cpu_buffer *ucb; | ||
| 768 | void *data; | 773 | void *data; |
| 769 | int size, dsize, esize; | 774 | int size, esize; |
| 770 | struct ftrace_event_call *call = &tu->tp.call; | 775 | struct ftrace_event_call *call = &tu->tp.call; |
| 771 | 776 | ||
| 772 | dsize = __get_data_size(&tu->tp, regs); | 777 | WARN_ON(call != ftrace_file->event_call); |
| 773 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 774 | 778 | ||
| 775 | if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) | 779 | if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) |
| 776 | return; | 780 | return; |
| 777 | 781 | ||
| 778 | ucb = uprobe_buffer_get(); | 782 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
| 779 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | 783 | return; |
| 780 | 784 | ||
| 785 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 781 | size = esize + tu->tp.size + dsize; | 786 | size = esize + tu->tp.size + dsize; |
| 782 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 787 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
| 783 | size, 0, 0); | 788 | call->event.type, size, 0, 0); |
| 784 | if (!event) | 789 | if (!event) |
| 785 | goto out; | 790 | return; |
| 786 | 791 | ||
| 787 | entry = ring_buffer_event_data(event); | 792 | entry = ring_buffer_event_data(event); |
| 788 | if (is_ret_probe(tu)) { | 793 | if (is_ret_probe(tu)) { |
| @@ -796,25 +801,36 @@ static void uprobe_trace_print(struct trace_uprobe *tu, | |||
| 796 | 801 | ||
| 797 | memcpy(data, ucb->buf, tu->tp.size + dsize); | 802 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
| 798 | 803 | ||
| 799 | if (!call_filter_check_discard(call, entry, buffer, event)) | 804 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); |
| 800 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
| 801 | |||
| 802 | out: | ||
| 803 | uprobe_buffer_put(ucb); | ||
| 804 | } | 805 | } |
| 805 | 806 | ||
| 806 | /* uprobe handler */ | 807 | /* uprobe handler */ |
| 807 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) | 808 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 809 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 808 | { | 810 | { |
| 809 | if (!is_ret_probe(tu)) | 811 | struct event_file_link *link; |
| 810 | uprobe_trace_print(tu, 0, regs); | 812 | |
| 813 | if (is_ret_probe(tu)) | ||
| 814 | return 0; | ||
| 815 | |||
| 816 | rcu_read_lock(); | ||
| 817 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
| 818 | __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); | ||
| 819 | rcu_read_unlock(); | ||
| 820 | |||
| 811 | return 0; | 821 | return 0; |
| 812 | } | 822 | } |
| 813 | 823 | ||
| 814 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, | 824 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, |
| 815 | struct pt_regs *regs) | 825 | struct pt_regs *regs, |
| 826 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 816 | { | 827 | { |
| 817 | uprobe_trace_print(tu, func, regs); | 828 | struct event_file_link *link; |
| 829 | |||
| 830 | rcu_read_lock(); | ||
| 831 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
| 832 | __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); | ||
| 833 | rcu_read_unlock(); | ||
| 818 | } | 834 | } |
| 819 | 835 | ||
| 820 | /* Event entry printers */ | 836 | /* Event entry printers */ |
| @@ -831,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
| 831 | tu = container_of(event, struct trace_uprobe, tp.call.event); | 847 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
| 832 | 848 | ||
| 833 | if (is_ret_probe(tu)) { | 849 | if (is_ret_probe(tu)) { |
| 834 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, | 850 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
| 851 | ftrace_event_name(&tu->tp.call), | ||
| 835 | entry->vaddr[1], entry->vaddr[0])) | 852 | entry->vaddr[1], entry->vaddr[0])) |
| 836 | goto partial; | 853 | goto partial; |
| 837 | data = DATAOF_TRACE_ENTRY(entry, true); | 854 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 838 | } else { | 855 | } else { |
| 839 | if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, | 856 | if (!trace_seq_printf(s, "%s: (0x%lx)", |
| 857 | ftrace_event_name(&tu->tp.call), | ||
| 840 | entry->vaddr[0])) | 858 | entry->vaddr[0])) |
| 841 | goto partial; | 859 | goto partial; |
| 842 | data = DATAOF_TRACE_ENTRY(entry, false); | 860 | data = DATAOF_TRACE_ENTRY(entry, false); |
| @@ -861,12 +879,24 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, | |||
| 861 | struct mm_struct *mm); | 879 | struct mm_struct *mm); |
| 862 | 880 | ||
| 863 | static int | 881 | static int |
| 864 | probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | 882 | probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, |
| 883 | filter_func_t filter) | ||
| 865 | { | 884 | { |
| 866 | int ret = 0; | 885 | bool enabled = trace_probe_is_enabled(&tu->tp); |
| 886 | struct event_file_link *link = NULL; | ||
| 887 | int ret; | ||
| 888 | |||
| 889 | if (file) { | ||
| 890 | link = kmalloc(sizeof(*link), GFP_KERNEL); | ||
| 891 | if (!link) | ||
| 892 | return -ENOMEM; | ||
| 867 | 893 | ||
| 868 | if (trace_probe_is_enabled(&tu->tp)) | 894 | link->file = file; |
| 869 | return -EINTR; | 895 | list_add_tail_rcu(&link->list, &tu->tp.files); |
| 896 | |||
| 897 | tu->tp.flags |= TP_FLAG_TRACE; | ||
| 898 | } else | ||
| 899 | tu->tp.flags |= TP_FLAG_PROFILE; | ||
| 870 | 900 | ||
| 871 | ret = uprobe_buffer_enable(); | 901 | ret = uprobe_buffer_enable(); |
| 872 | if (ret < 0) | 902 | if (ret < 0) |
| @@ -874,24 +904,49 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | |||
| 874 | 904 | ||
| 875 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 905 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 876 | 906 | ||
| 877 | tu->tp.flags |= flag; | 907 | if (enabled) |
| 908 | return 0; | ||
| 909 | |||
| 878 | tu->consumer.filter = filter; | 910 | tu->consumer.filter = filter; |
| 879 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 911 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
| 880 | if (ret) | 912 | if (ret) { |
| 881 | tu->tp.flags &= ~flag; | 913 | if (file) { |
| 914 | list_del(&link->list); | ||
| 915 | kfree(link); | ||
| 916 | tu->tp.flags &= ~TP_FLAG_TRACE; | ||
| 917 | } else | ||
| 918 | tu->tp.flags &= ~TP_FLAG_PROFILE; | ||
| 919 | } | ||
| 882 | 920 | ||
| 883 | return ret; | 921 | return ret; |
| 884 | } | 922 | } |
| 885 | 923 | ||
| 886 | static void probe_event_disable(struct trace_uprobe *tu, int flag) | 924 | static void |
| 925 | probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) | ||
| 887 | { | 926 | { |
| 888 | if (!trace_probe_is_enabled(&tu->tp)) | 927 | if (!trace_probe_is_enabled(&tu->tp)) |
| 889 | return; | 928 | return; |
| 890 | 929 | ||
| 930 | if (file) { | ||
| 931 | struct event_file_link *link; | ||
| 932 | |||
| 933 | link = find_event_file_link(&tu->tp, file); | ||
| 934 | if (!link) | ||
| 935 | return; | ||
| 936 | |||
| 937 | list_del_rcu(&link->list); | ||
| 938 | /* synchronize with u{,ret}probe_trace_func */ | ||
| 939 | synchronize_sched(); | ||
| 940 | kfree(link); | ||
| 941 | |||
| 942 | if (!list_empty(&tu->tp.files)) | ||
| 943 | return; | ||
| 944 | } | ||
| 945 | |||
| 891 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 946 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 892 | 947 | ||
| 893 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | 948 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
| 894 | tu->tp.flags &= ~flag; | 949 | tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; |
| 895 | 950 | ||
| 896 | uprobe_buffer_disable(); | 951 | uprobe_buffer_disable(); |
| 897 | } | 952 | } |
| @@ -1014,31 +1069,24 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, | |||
| 1014 | return ret; | 1069 | return ret; |
| 1015 | } | 1070 | } |
| 1016 | 1071 | ||
| 1017 | static void uprobe_perf_print(struct trace_uprobe *tu, | 1072 | static void __uprobe_perf_func(struct trace_uprobe *tu, |
| 1018 | unsigned long func, struct pt_regs *regs) | 1073 | unsigned long func, struct pt_regs *regs, |
| 1074 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1019 | { | 1075 | { |
| 1020 | struct ftrace_event_call *call = &tu->tp.call; | 1076 | struct ftrace_event_call *call = &tu->tp.call; |
| 1021 | struct uprobe_trace_entry_head *entry; | 1077 | struct uprobe_trace_entry_head *entry; |
| 1022 | struct hlist_head *head; | 1078 | struct hlist_head *head; |
| 1023 | struct uprobe_cpu_buffer *ucb; | ||
| 1024 | void *data; | 1079 | void *data; |
| 1025 | int size, dsize, esize; | 1080 | int size, esize; |
| 1026 | int rctx; | 1081 | int rctx; |
| 1027 | 1082 | ||
| 1028 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1029 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 1083 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 1030 | 1084 | ||
| 1031 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1032 | return; | ||
| 1033 | |||
| 1034 | size = esize + tu->tp.size + dsize; | 1085 | size = esize + tu->tp.size + dsize; |
| 1035 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 1086 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
| 1036 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | 1087 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) |
| 1037 | return; | 1088 | return; |
| 1038 | 1089 | ||
| 1039 | ucb = uprobe_buffer_get(); | ||
| 1040 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1041 | |||
| 1042 | preempt_disable(); | 1090 | preempt_disable(); |
| 1043 | head = this_cpu_ptr(call->perf_events); | 1091 | head = this_cpu_ptr(call->perf_events); |
| 1044 | if (hlist_empty(head)) | 1092 | if (hlist_empty(head)) |
| @@ -1068,46 +1116,49 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
| 1068 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1116 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 1069 | out: | 1117 | out: |
| 1070 | preempt_enable(); | 1118 | preempt_enable(); |
| 1071 | uprobe_buffer_put(ucb); | ||
| 1072 | } | 1119 | } |
| 1073 | 1120 | ||
| 1074 | /* uprobe profile handler */ | 1121 | /* uprobe profile handler */ |
| 1075 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) | 1122 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 1123 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1076 | { | 1124 | { |
| 1077 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) | 1125 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) |
| 1078 | return UPROBE_HANDLER_REMOVE; | 1126 | return UPROBE_HANDLER_REMOVE; |
| 1079 | 1127 | ||
| 1080 | if (!is_ret_probe(tu)) | 1128 | if (!is_ret_probe(tu)) |
| 1081 | uprobe_perf_print(tu, 0, regs); | 1129 | __uprobe_perf_func(tu, 0, regs, ucb, dsize); |
| 1082 | return 0; | 1130 | return 0; |
| 1083 | } | 1131 | } |
| 1084 | 1132 | ||
| 1085 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | 1133 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, |
| 1086 | struct pt_regs *regs) | 1134 | struct pt_regs *regs, |
| 1135 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1087 | { | 1136 | { |
| 1088 | uprobe_perf_print(tu, func, regs); | 1137 | __uprobe_perf_func(tu, func, regs, ucb, dsize); |
| 1089 | } | 1138 | } |
| 1090 | #endif /* CONFIG_PERF_EVENTS */ | 1139 | #endif /* CONFIG_PERF_EVENTS */ |
| 1091 | 1140 | ||
| 1092 | static | 1141 | static int |
| 1093 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) | 1142 | trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, |
| 1143 | void *data) | ||
| 1094 | { | 1144 | { |
| 1095 | struct trace_uprobe *tu = event->data; | 1145 | struct trace_uprobe *tu = event->data; |
| 1146 | struct ftrace_event_file *file = data; | ||
| 1096 | 1147 | ||
| 1097 | switch (type) { | 1148 | switch (type) { |
| 1098 | case TRACE_REG_REGISTER: | 1149 | case TRACE_REG_REGISTER: |
| 1099 | return probe_event_enable(tu, TP_FLAG_TRACE, NULL); | 1150 | return probe_event_enable(tu, file, NULL); |
| 1100 | 1151 | ||
| 1101 | case TRACE_REG_UNREGISTER: | 1152 | case TRACE_REG_UNREGISTER: |
| 1102 | probe_event_disable(tu, TP_FLAG_TRACE); | 1153 | probe_event_disable(tu, file); |
| 1103 | return 0; | 1154 | return 0; |
| 1104 | 1155 | ||
| 1105 | #ifdef CONFIG_PERF_EVENTS | 1156 | #ifdef CONFIG_PERF_EVENTS |
| 1106 | case TRACE_REG_PERF_REGISTER: | 1157 | case TRACE_REG_PERF_REGISTER: |
| 1107 | return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); | 1158 | return probe_event_enable(tu, NULL, uprobe_perf_filter); |
| 1108 | 1159 | ||
| 1109 | case TRACE_REG_PERF_UNREGISTER: | 1160 | case TRACE_REG_PERF_UNREGISTER: |
| 1110 | probe_event_disable(tu, TP_FLAG_PROFILE); | 1161 | probe_event_disable(tu, NULL); |
| 1111 | return 0; | 1162 | return 0; |
| 1112 | 1163 | ||
| 1113 | case TRACE_REG_PERF_OPEN: | 1164 | case TRACE_REG_PERF_OPEN: |
| @@ -1127,8 +1178,11 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 1127 | { | 1178 | { |
| 1128 | struct trace_uprobe *tu; | 1179 | struct trace_uprobe *tu; |
| 1129 | struct uprobe_dispatch_data udd; | 1180 | struct uprobe_dispatch_data udd; |
| 1181 | struct uprobe_cpu_buffer *ucb; | ||
| 1182 | int dsize, esize; | ||
| 1130 | int ret = 0; | 1183 | int ret = 0; |
| 1131 | 1184 | ||
| 1185 | |||
| 1132 | tu = container_of(con, struct trace_uprobe, consumer); | 1186 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1133 | tu->nhit++; | 1187 | tu->nhit++; |
| 1134 | 1188 | ||
| @@ -1137,13 +1191,29 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 1137 | 1191 | ||
| 1138 | current->utask->vaddr = (unsigned long) &udd; | 1192 | current->utask->vaddr = (unsigned long) &udd; |
| 1139 | 1193 | ||
| 1194 | #ifdef CONFIG_PERF_EVENTS | ||
| 1195 | if ((tu->tp.flags & TP_FLAG_TRACE) == 0 && | ||
| 1196 | !uprobe_perf_filter(&tu->consumer, 0, current->mm)) | ||
| 1197 | return UPROBE_HANDLER_REMOVE; | ||
| 1198 | #endif | ||
| 1199 | |||
| 1200 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1201 | return 0; | ||
| 1202 | |||
| 1203 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1204 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 1205 | |||
| 1206 | ucb = uprobe_buffer_get(); | ||
| 1207 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1208 | |||
| 1140 | if (tu->tp.flags & TP_FLAG_TRACE) | 1209 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 1141 | ret |= uprobe_trace_func(tu, regs); | 1210 | ret |= uprobe_trace_func(tu, regs, ucb, dsize); |
| 1142 | 1211 | ||
| 1143 | #ifdef CONFIG_PERF_EVENTS | 1212 | #ifdef CONFIG_PERF_EVENTS |
| 1144 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1213 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 1145 | ret |= uprobe_perf_func(tu, regs); | 1214 | ret |= uprobe_perf_func(tu, regs, ucb, dsize); |
| 1146 | #endif | 1215 | #endif |
| 1216 | uprobe_buffer_put(ucb); | ||
| 1147 | return ret; | 1217 | return ret; |
| 1148 | } | 1218 | } |
| 1149 | 1219 | ||
| @@ -1152,6 +1222,8 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 1152 | { | 1222 | { |
| 1153 | struct trace_uprobe *tu; | 1223 | struct trace_uprobe *tu; |
| 1154 | struct uprobe_dispatch_data udd; | 1224 | struct uprobe_dispatch_data udd; |
| 1225 | struct uprobe_cpu_buffer *ucb; | ||
| 1226 | int dsize, esize; | ||
| 1155 | 1227 | ||
| 1156 | tu = container_of(con, struct trace_uprobe, consumer); | 1228 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1157 | 1229 | ||
| @@ -1160,13 +1232,23 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 1160 | 1232 | ||
| 1161 | current->utask->vaddr = (unsigned long) &udd; | 1233 | current->utask->vaddr = (unsigned long) &udd; |
| 1162 | 1234 | ||
| 1235 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1236 | return 0; | ||
| 1237 | |||
| 1238 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1239 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 1240 | |||
| 1241 | ucb = uprobe_buffer_get(); | ||
| 1242 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1243 | |||
| 1163 | if (tu->tp.flags & TP_FLAG_TRACE) | 1244 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 1164 | uretprobe_trace_func(tu, func, regs); | 1245 | uretprobe_trace_func(tu, func, regs, ucb, dsize); |
| 1165 | 1246 | ||
| 1166 | #ifdef CONFIG_PERF_EVENTS | 1247 | #ifdef CONFIG_PERF_EVENTS |
| 1167 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1248 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 1168 | uretprobe_perf_func(tu, func, regs); | 1249 | uretprobe_perf_func(tu, func, regs, ucb, dsize); |
| 1169 | #endif | 1250 | #endif |
| 1251 | uprobe_buffer_put(ucb); | ||
| 1170 | return 0; | 1252 | return 0; |
| 1171 | } | 1253 | } |
| 1172 | 1254 | ||
| @@ -1198,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
| 1198 | ret = trace_add_event_call(call); | 1280 | ret = trace_add_event_call(call); |
| 1199 | 1281 | ||
| 1200 | if (ret) { | 1282 | if (ret) { |
| 1201 | pr_info("Failed to register uprobe event: %s\n", call->name); | 1283 | pr_info("Failed to register uprobe event: %s\n", |
| 1284 | ftrace_event_name(call)); | ||
| 1202 | kfree(call->print_fmt); | 1285 | kfree(call->print_fmt); |
| 1203 | unregister_ftrace_event(&call->event); | 1286 | unregister_ftrace_event(&call->event); |
| 1204 | } | 1287 | } |
