diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 490 |
1 files changed, 53 insertions, 437 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f0ff24173a0b..aac7847c0214 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/sched/task.h> | 19 | #include <linux/sched/task.h> |
20 | #include <linux/kallsyms.h> | 20 | #include <linux/kallsyms.h> |
21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
22 | #include <linux/suspend.h> | ||
23 | #include <linux/tracefs.h> | 22 | #include <linux/tracefs.h> |
24 | #include <linux/hardirq.h> | 23 | #include <linux/hardirq.h> |
25 | #include <linux/kthread.h> | 24 | #include <linux/kthread.h> |
@@ -40,6 +39,7 @@ | |||
40 | #include <asm/sections.h> | 39 | #include <asm/sections.h> |
41 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
42 | 41 | ||
42 | #include "ftrace_internal.h" | ||
43 | #include "trace_output.h" | 43 | #include "trace_output.h" |
44 | #include "trace_stat.h" | 44 | #include "trace_stat.h" |
45 | 45 | ||
@@ -77,7 +77,12 @@ | |||
77 | #define ASSIGN_OPS_HASH(opsname, val) | 77 | #define ASSIGN_OPS_HASH(opsname, val) |
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | static struct ftrace_ops ftrace_list_end __read_mostly = { | 80 | enum { |
81 | FTRACE_MODIFY_ENABLE_FL = (1 << 0), | ||
82 | FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), | ||
83 | }; | ||
84 | |||
85 | struct ftrace_ops ftrace_list_end __read_mostly = { | ||
81 | .func = ftrace_stub, | 86 | .func = ftrace_stub, |
82 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, | 87 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
83 | INIT_OPS_HASH(ftrace_list_end) | 88 | INIT_OPS_HASH(ftrace_list_end) |
@@ -112,11 +117,11 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops); | |||
112 | */ | 117 | */ |
113 | static int ftrace_disabled __read_mostly; | 118 | static int ftrace_disabled __read_mostly; |
114 | 119 | ||
115 | static DEFINE_MUTEX(ftrace_lock); | 120 | DEFINE_MUTEX(ftrace_lock); |
116 | 121 | ||
117 | static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; | 122 | struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
118 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 123 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
119 | static struct ftrace_ops global_ops; | 124 | struct ftrace_ops global_ops; |
120 | 125 | ||
121 | #if ARCH_SUPPORTS_FTRACE_OPS | 126 | #if ARCH_SUPPORTS_FTRACE_OPS |
122 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 127 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
@@ -127,26 +132,6 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | |||
127 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) | 132 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) |
128 | #endif | 133 | #endif |
129 | 134 | ||
130 | /* | ||
131 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | ||
132 | * can use rcu_dereference_raw_notrace() is that elements removed from this list | ||
133 | * are simply leaked, so there is no need to interact with a grace-period | ||
134 | * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle | ||
135 | * concurrent insertions into the ftrace_global_list. | ||
136 | * | ||
137 | * Silly Alpha and silly pointer-speculation compiler optimizations! | ||
138 | */ | ||
139 | #define do_for_each_ftrace_op(op, list) \ | ||
140 | op = rcu_dereference_raw_notrace(list); \ | ||
141 | do | ||
142 | |||
143 | /* | ||
144 | * Optimized for just a single item in the list (as that is the normal case). | ||
145 | */ | ||
146 | #define while_for_each_ftrace_op(op) \ | ||
147 | while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ | ||
148 | unlikely((op) != &ftrace_list_end)) | ||
149 | |||
150 | static inline void ftrace_ops_init(struct ftrace_ops *ops) | 135 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
151 | { | 136 | { |
152 | #ifdef CONFIG_DYNAMIC_FTRACE | 137 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -186,18 +171,6 @@ static void ftrace_sync_ipi(void *data) | |||
186 | smp_rmb(); | 171 | smp_rmb(); |
187 | } | 172 | } |
188 | 173 | ||
189 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
190 | static void update_function_graph_func(void); | ||
191 | |||
192 | /* Both enabled by default (can be cleared by function_graph tracer flags */ | ||
193 | static bool fgraph_sleep_time = true; | ||
194 | static bool fgraph_graph_time = true; | ||
195 | |||
196 | #else | ||
197 | static inline void update_function_graph_func(void) { } | ||
198 | #endif | ||
199 | |||
200 | |||
201 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) | 174 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) |
202 | { | 175 | { |
203 | /* | 176 | /* |
@@ -334,7 +307,7 @@ static int remove_ftrace_ops(struct ftrace_ops __rcu **list, | |||
334 | 307 | ||
335 | static void ftrace_update_trampoline(struct ftrace_ops *ops); | 308 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
336 | 309 | ||
337 | static int __register_ftrace_function(struct ftrace_ops *ops) | 310 | int __register_ftrace_function(struct ftrace_ops *ops) |
338 | { | 311 | { |
339 | if (ops->flags & FTRACE_OPS_FL_DELETED) | 312 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
340 | return -EINVAL; | 313 | return -EINVAL; |
@@ -375,7 +348,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
375 | return 0; | 348 | return 0; |
376 | } | 349 | } |
377 | 350 | ||
378 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 351 | int __unregister_ftrace_function(struct ftrace_ops *ops) |
379 | { | 352 | { |
380 | int ret; | 353 | int ret; |
381 | 354 | ||
@@ -815,9 +788,16 @@ function_profile_call(unsigned long ip, unsigned long parent_ip, | |||
815 | } | 788 | } |
816 | 789 | ||
817 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 790 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
791 | static bool fgraph_graph_time = true; | ||
792 | |||
793 | void ftrace_graph_graph_time_control(bool enable) | ||
794 | { | ||
795 | fgraph_graph_time = enable; | ||
796 | } | ||
797 | |||
818 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | 798 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
819 | { | 799 | { |
820 | int index = current->curr_ret_stack; | 800 | struct ftrace_ret_stack *ret_stack; |
821 | 801 | ||
822 | function_profile_call(trace->func, 0, NULL, NULL); | 802 | function_profile_call(trace->func, 0, NULL, NULL); |
823 | 803 | ||
@@ -825,14 +805,16 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace) | |||
825 | if (!current->ret_stack) | 805 | if (!current->ret_stack) |
826 | return 0; | 806 | return 0; |
827 | 807 | ||
828 | if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) | 808 | ret_stack = ftrace_graph_get_ret_stack(current, 0); |
829 | current->ret_stack[index].subtime = 0; | 809 | if (ret_stack) |
810 | ret_stack->subtime = 0; | ||
830 | 811 | ||
831 | return 1; | 812 | return 1; |
832 | } | 813 | } |
833 | 814 | ||
834 | static void profile_graph_return(struct ftrace_graph_ret *trace) | 815 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
835 | { | 816 | { |
817 | struct ftrace_ret_stack *ret_stack; | ||
836 | struct ftrace_profile_stat *stat; | 818 | struct ftrace_profile_stat *stat; |
837 | unsigned long long calltime; | 819 | unsigned long long calltime; |
838 | struct ftrace_profile *rec; | 820 | struct ftrace_profile *rec; |
@@ -850,16 +832,15 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
850 | calltime = trace->rettime - trace->calltime; | 832 | calltime = trace->rettime - trace->calltime; |
851 | 833 | ||
852 | if (!fgraph_graph_time) { | 834 | if (!fgraph_graph_time) { |
853 | int index; | ||
854 | |||
855 | index = current->curr_ret_stack; | ||
856 | 835 | ||
857 | /* Append this call time to the parent time to subtract */ | 836 | /* Append this call time to the parent time to subtract */ |
858 | if (index) | 837 | ret_stack = ftrace_graph_get_ret_stack(current, 1); |
859 | current->ret_stack[index - 1].subtime += calltime; | 838 | if (ret_stack) |
839 | ret_stack->subtime += calltime; | ||
860 | 840 | ||
861 | if (current->ret_stack[index].subtime < calltime) | 841 | ret_stack = ftrace_graph_get_ret_stack(current, 0); |
862 | calltime -= current->ret_stack[index].subtime; | 842 | if (ret_stack && ret_stack->subtime < calltime) |
843 | calltime -= ret_stack->subtime; | ||
863 | else | 844 | else |
864 | calltime = 0; | 845 | calltime = 0; |
865 | } | 846 | } |
@@ -874,15 +855,19 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) | |||
874 | local_irq_restore(flags); | 855 | local_irq_restore(flags); |
875 | } | 856 | } |
876 | 857 | ||
858 | static struct fgraph_ops fprofiler_ops = { | ||
859 | .entryfunc = &profile_graph_entry, | ||
860 | .retfunc = &profile_graph_return, | ||
861 | }; | ||
862 | |||
877 | static int register_ftrace_profiler(void) | 863 | static int register_ftrace_profiler(void) |
878 | { | 864 | { |
879 | return register_ftrace_graph(&profile_graph_return, | 865 | return register_ftrace_graph(&fprofiler_ops); |
880 | &profile_graph_entry); | ||
881 | } | 866 | } |
882 | 867 | ||
883 | static void unregister_ftrace_profiler(void) | 868 | static void unregister_ftrace_profiler(void) |
884 | { | 869 | { |
885 | unregister_ftrace_graph(); | 870 | unregister_ftrace_graph(&fprofiler_ops); |
886 | } | 871 | } |
887 | #else | 872 | #else |
888 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | 873 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
@@ -1021,12 +1006,6 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer) | |||
1021 | } | 1006 | } |
1022 | #endif /* CONFIG_FUNCTION_PROFILER */ | 1007 | #endif /* CONFIG_FUNCTION_PROFILER */ |
1023 | 1008 | ||
1024 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1025 | static int ftrace_graph_active; | ||
1026 | #else | ||
1027 | # define ftrace_graph_active 0 | ||
1028 | #endif | ||
1029 | |||
1030 | #ifdef CONFIG_DYNAMIC_FTRACE | 1009 | #ifdef CONFIG_DYNAMIC_FTRACE |
1031 | 1010 | ||
1032 | static struct ftrace_ops *removed_ops; | 1011 | static struct ftrace_ops *removed_ops; |
@@ -1067,7 +1046,7 @@ static const struct ftrace_hash empty_hash = { | |||
1067 | }; | 1046 | }; |
1068 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) | 1047 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
1069 | 1048 | ||
1070 | static struct ftrace_ops global_ops = { | 1049 | struct ftrace_ops global_ops = { |
1071 | .func = ftrace_stub, | 1050 | .func = ftrace_stub, |
1072 | .local_hash.notrace_hash = EMPTY_HASH, | 1051 | .local_hash.notrace_hash = EMPTY_HASH, |
1073 | .local_hash.filter_hash = EMPTY_HASH, | 1052 | .local_hash.filter_hash = EMPTY_HASH, |
@@ -1503,7 +1482,7 @@ static bool hash_contains_ip(unsigned long ip, | |||
1503 | * This needs to be called with preemption disabled as | 1482 | * This needs to be called with preemption disabled as |
1504 | * the hashes are freed with call_rcu(). | 1483 | * the hashes are freed with call_rcu(). |
1505 | */ | 1484 | */ |
1506 | static int | 1485 | int |
1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | 1486 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
1508 | { | 1487 | { |
1509 | struct ftrace_ops_hash hash; | 1488 | struct ftrace_ops_hash hash; |
@@ -2415,10 +2394,12 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
2415 | return -1; /* unknow ftrace bug */ | 2394 | return -1; /* unknow ftrace bug */ |
2416 | } | 2395 | } |
2417 | 2396 | ||
2418 | void __weak ftrace_replace_code(int enable) | 2397 | void __weak ftrace_replace_code(int mod_flags) |
2419 | { | 2398 | { |
2420 | struct dyn_ftrace *rec; | 2399 | struct dyn_ftrace *rec; |
2421 | struct ftrace_page *pg; | 2400 | struct ftrace_page *pg; |
2401 | int enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; | ||
2402 | int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; | ||
2422 | int failed; | 2403 | int failed; |
2423 | 2404 | ||
2424 | if (unlikely(ftrace_disabled)) | 2405 | if (unlikely(ftrace_disabled)) |
@@ -2435,6 +2416,8 @@ void __weak ftrace_replace_code(int enable) | |||
2435 | /* Stop processing */ | 2416 | /* Stop processing */ |
2436 | return; | 2417 | return; |
2437 | } | 2418 | } |
2419 | if (schedulable) | ||
2420 | cond_resched(); | ||
2438 | } while_for_each_ftrace_rec(); | 2421 | } while_for_each_ftrace_rec(); |
2439 | } | 2422 | } |
2440 | 2423 | ||
@@ -2548,8 +2531,12 @@ int __weak ftrace_arch_code_modify_post_process(void) | |||
2548 | void ftrace_modify_all_code(int command) | 2531 | void ftrace_modify_all_code(int command) |
2549 | { | 2532 | { |
2550 | int update = command & FTRACE_UPDATE_TRACE_FUNC; | 2533 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
2534 | int mod_flags = 0; | ||
2551 | int err = 0; | 2535 | int err = 0; |
2552 | 2536 | ||
2537 | if (command & FTRACE_MAY_SLEEP) | ||
2538 | mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; | ||
2539 | |||
2553 | /* | 2540 | /* |
2554 | * If the ftrace_caller calls a ftrace_ops func directly, | 2541 | * If the ftrace_caller calls a ftrace_ops func directly, |
2555 | * we need to make sure that it only traces functions it | 2542 | * we need to make sure that it only traces functions it |
@@ -2567,9 +2554,9 @@ void ftrace_modify_all_code(int command) | |||
2567 | } | 2554 | } |
2568 | 2555 | ||
2569 | if (command & FTRACE_UPDATE_CALLS) | 2556 | if (command & FTRACE_UPDATE_CALLS) |
2570 | ftrace_replace_code(1); | 2557 | ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); |
2571 | else if (command & FTRACE_DISABLE_CALLS) | 2558 | else if (command & FTRACE_DISABLE_CALLS) |
2572 | ftrace_replace_code(0); | 2559 | ftrace_replace_code(mod_flags); |
2573 | 2560 | ||
2574 | if (update && ftrace_trace_function != ftrace_ops_list_func) { | 2561 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
2575 | function_trace_op = set_function_trace_op; | 2562 | function_trace_op = set_function_trace_op; |
@@ -2682,7 +2669,7 @@ static void ftrace_startup_all(int command) | |||
2682 | update_all_ops = false; | 2669 | update_all_ops = false; |
2683 | } | 2670 | } |
2684 | 2671 | ||
2685 | static int ftrace_startup(struct ftrace_ops *ops, int command) | 2672 | int ftrace_startup(struct ftrace_ops *ops, int command) |
2686 | { | 2673 | { |
2687 | int ret; | 2674 | int ret; |
2688 | 2675 | ||
@@ -2724,7 +2711,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) | |||
2724 | return 0; | 2711 | return 0; |
2725 | } | 2712 | } |
2726 | 2713 | ||
2727 | static int ftrace_shutdown(struct ftrace_ops *ops, int command) | 2714 | int ftrace_shutdown(struct ftrace_ops *ops, int command) |
2728 | { | 2715 | { |
2729 | int ret; | 2716 | int ret; |
2730 | 2717 | ||
@@ -6178,7 +6165,7 @@ void ftrace_init_trace_array(struct trace_array *tr) | |||
6178 | } | 6165 | } |
6179 | #else | 6166 | #else |
6180 | 6167 | ||
6181 | static struct ftrace_ops global_ops = { | 6168 | struct ftrace_ops global_ops = { |
6182 | .func = ftrace_stub, | 6169 | .func = ftrace_stub, |
6183 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | 6170 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
6184 | FTRACE_OPS_FL_INITIALIZED | | 6171 | FTRACE_OPS_FL_INITIALIZED | |
@@ -6195,31 +6182,10 @@ core_initcall(ftrace_nodyn_init); | |||
6195 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } | 6182 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } |
6196 | static inline void ftrace_startup_enable(int command) { } | 6183 | static inline void ftrace_startup_enable(int command) { } |
6197 | static inline void ftrace_startup_all(int command) { } | 6184 | static inline void ftrace_startup_all(int command) { } |
6198 | /* Keep as macros so we do not need to define the commands */ | ||
6199 | # define ftrace_startup(ops, command) \ | ||
6200 | ({ \ | ||
6201 | int ___ret = __register_ftrace_function(ops); \ | ||
6202 | if (!___ret) \ | ||
6203 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | ||
6204 | ___ret; \ | ||
6205 | }) | ||
6206 | # define ftrace_shutdown(ops, command) \ | ||
6207 | ({ \ | ||
6208 | int ___ret = __unregister_ftrace_function(ops); \ | ||
6209 | if (!___ret) \ | ||
6210 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ | ||
6211 | ___ret; \ | ||
6212 | }) | ||
6213 | 6185 | ||
6214 | # define ftrace_startup_sysctl() do { } while (0) | 6186 | # define ftrace_startup_sysctl() do { } while (0) |
6215 | # define ftrace_shutdown_sysctl() do { } while (0) | 6187 | # define ftrace_shutdown_sysctl() do { } while (0) |
6216 | 6188 | ||
6217 | static inline int | ||
6218 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | ||
6219 | { | ||
6220 | return 1; | ||
6221 | } | ||
6222 | |||
6223 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | 6189 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
6224 | { | 6190 | { |
6225 | } | 6191 | } |
@@ -6746,353 +6712,3 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
6746 | mutex_unlock(&ftrace_lock); | 6712 | mutex_unlock(&ftrace_lock); |
6747 | return ret; | 6713 | return ret; |
6748 | } | 6714 | } |
6749 | |||
6750 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
6751 | |||
6752 | static struct ftrace_ops graph_ops = { | ||
6753 | .func = ftrace_stub, | ||
6754 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | ||
6755 | FTRACE_OPS_FL_INITIALIZED | | ||
6756 | FTRACE_OPS_FL_PID | | ||
6757 | FTRACE_OPS_FL_STUB, | ||
6758 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | ||
6759 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | ||
6760 | /* trampoline_size is only needed for dynamically allocated tramps */ | ||
6761 | #endif | ||
6762 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | ||
6763 | }; | ||
6764 | |||
6765 | void ftrace_graph_sleep_time_control(bool enable) | ||
6766 | { | ||
6767 | fgraph_sleep_time = enable; | ||
6768 | } | ||
6769 | |||
6770 | void ftrace_graph_graph_time_control(bool enable) | ||
6771 | { | ||
6772 | fgraph_graph_time = enable; | ||
6773 | } | ||
6774 | |||
6775 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | ||
6776 | { | ||
6777 | return 0; | ||
6778 | } | ||
6779 | |||
6780 | /* The callbacks that hook a function */ | ||
6781 | trace_func_graph_ret_t ftrace_graph_return = | ||
6782 | (trace_func_graph_ret_t)ftrace_stub; | ||
6783 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; | ||
6784 | static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; | ||
6785 | |||
6786 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | ||
6787 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | ||
6788 | { | ||
6789 | int i; | ||
6790 | int ret = 0; | ||
6791 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; | ||
6792 | struct task_struct *g, *t; | ||
6793 | |||
6794 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { | ||
6795 | ret_stack_list[i] = | ||
6796 | kmalloc_array(FTRACE_RETFUNC_DEPTH, | ||
6797 | sizeof(struct ftrace_ret_stack), | ||
6798 | GFP_KERNEL); | ||
6799 | if (!ret_stack_list[i]) { | ||
6800 | start = 0; | ||
6801 | end = i; | ||
6802 | ret = -ENOMEM; | ||
6803 | goto free; | ||
6804 | } | ||
6805 | } | ||
6806 | |||
6807 | read_lock(&tasklist_lock); | ||
6808 | do_each_thread(g, t) { | ||
6809 | if (start == end) { | ||
6810 | ret = -EAGAIN; | ||
6811 | goto unlock; | ||
6812 | } | ||
6813 | |||
6814 | if (t->ret_stack == NULL) { | ||
6815 | atomic_set(&t->tracing_graph_pause, 0); | ||
6816 | atomic_set(&t->trace_overrun, 0); | ||
6817 | t->curr_ret_stack = -1; | ||
6818 | t->curr_ret_depth = -1; | ||
6819 | /* Make sure the tasks see the -1 first: */ | ||
6820 | smp_wmb(); | ||
6821 | t->ret_stack = ret_stack_list[start++]; | ||
6822 | } | ||
6823 | } while_each_thread(g, t); | ||
6824 | |||
6825 | unlock: | ||
6826 | read_unlock(&tasklist_lock); | ||
6827 | free: | ||
6828 | for (i = start; i < end; i++) | ||
6829 | kfree(ret_stack_list[i]); | ||
6830 | return ret; | ||
6831 | } | ||
6832 | |||
6833 | static void | ||
6834 | ftrace_graph_probe_sched_switch(void *ignore, bool preempt, | ||
6835 | struct task_struct *prev, struct task_struct *next) | ||
6836 | { | ||
6837 | unsigned long long timestamp; | ||
6838 | int index; | ||
6839 | |||
6840 | /* | ||
6841 | * Does the user want to count the time a function was asleep. | ||
6842 | * If so, do not update the time stamps. | ||
6843 | */ | ||
6844 | if (fgraph_sleep_time) | ||
6845 | return; | ||
6846 | |||
6847 | timestamp = trace_clock_local(); | ||
6848 | |||
6849 | prev->ftrace_timestamp = timestamp; | ||
6850 | |||
6851 | /* only process tasks that we timestamped */ | ||
6852 | if (!next->ftrace_timestamp) | ||
6853 | return; | ||
6854 | |||
6855 | /* | ||
6856 | * Update all the counters in next to make up for the | ||
6857 | * time next was sleeping. | ||
6858 | */ | ||
6859 | timestamp -= next->ftrace_timestamp; | ||
6860 | |||
6861 | for (index = next->curr_ret_stack; index >= 0; index--) | ||
6862 | next->ret_stack[index].calltime += timestamp; | ||
6863 | } | ||
6864 | |||
6865 | /* Allocate a return stack for each task */ | ||
6866 | static int start_graph_tracing(void) | ||
6867 | { | ||
6868 | struct ftrace_ret_stack **ret_stack_list; | ||
6869 | int ret, cpu; | ||
6870 | |||
6871 | ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE, | ||
6872 | sizeof(struct ftrace_ret_stack *), | ||
6873 | GFP_KERNEL); | ||
6874 | |||
6875 | if (!ret_stack_list) | ||
6876 | return -ENOMEM; | ||
6877 | |||
6878 | /* The cpu_boot init_task->ret_stack will never be freed */ | ||
6879 | for_each_online_cpu(cpu) { | ||
6880 | if (!idle_task(cpu)->ret_stack) | ||
6881 | ftrace_graph_init_idle_task(idle_task(cpu), cpu); | ||
6882 | } | ||
6883 | |||
6884 | do { | ||
6885 | ret = alloc_retstack_tasklist(ret_stack_list); | ||
6886 | } while (ret == -EAGAIN); | ||
6887 | |||
6888 | if (!ret) { | ||
6889 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | ||
6890 | if (ret) | ||
6891 | pr_info("ftrace_graph: Couldn't activate tracepoint" | ||
6892 | " probe to kernel_sched_switch\n"); | ||
6893 | } | ||
6894 | |||
6895 | kfree(ret_stack_list); | ||
6896 | return ret; | ||
6897 | } | ||
6898 | |||
6899 | /* | ||
6900 | * Hibernation protection. | ||
6901 | * The state of the current task is too much unstable during | ||
6902 | * suspend/restore to disk. We want to protect against that. | ||
6903 | */ | ||
6904 | static int | ||
6905 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | ||
6906 | void *unused) | ||
6907 | { | ||
6908 | switch (state) { | ||
6909 | case PM_HIBERNATION_PREPARE: | ||
6910 | pause_graph_tracing(); | ||
6911 | break; | ||
6912 | |||
6913 | case PM_POST_HIBERNATION: | ||
6914 | unpause_graph_tracing(); | ||
6915 | break; | ||
6916 | } | ||
6917 | return NOTIFY_DONE; | ||
6918 | } | ||
6919 | |||
6920 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) | ||
6921 | { | ||
6922 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) | ||
6923 | return 0; | ||
6924 | return __ftrace_graph_entry(trace); | ||
6925 | } | ||
6926 | |||
6927 | /* | ||
6928 | * The function graph tracer should only trace the functions defined | ||
6929 | * by set_ftrace_filter and set_ftrace_notrace. If another function | ||
6930 | * tracer ops is registered, the graph tracer requires testing the | ||
6931 | * function against the global ops, and not just trace any function | ||
6932 | * that any ftrace_ops registered. | ||
6933 | */ | ||
6934 | static void update_function_graph_func(void) | ||
6935 | { | ||
6936 | struct ftrace_ops *op; | ||
6937 | bool do_test = false; | ||
6938 | |||
6939 | /* | ||
6940 | * The graph and global ops share the same set of functions | ||
6941 | * to test. If any other ops is on the list, then | ||
6942 | * the graph tracing needs to test if its the function | ||
6943 | * it should call. | ||
6944 | */ | ||
6945 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
6946 | if (op != &global_ops && op != &graph_ops && | ||
6947 | op != &ftrace_list_end) { | ||
6948 | do_test = true; | ||
6949 | /* in double loop, break out with goto */ | ||
6950 | goto out; | ||
6951 | } | ||
6952 | } while_for_each_ftrace_op(op); | ||
6953 | out: | ||
6954 | if (do_test) | ||
6955 | ftrace_graph_entry = ftrace_graph_entry_test; | ||
6956 | else | ||
6957 | ftrace_graph_entry = __ftrace_graph_entry; | ||
6958 | } | ||
6959 | |||
6960 | static struct notifier_block ftrace_suspend_notifier = { | ||
6961 | .notifier_call = ftrace_suspend_notifier_call, | ||
6962 | }; | ||
6963 | |||
6964 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | ||
6965 | trace_func_graph_ent_t entryfunc) | ||
6966 | { | ||
6967 | int ret = 0; | ||
6968 | |||
6969 | mutex_lock(&ftrace_lock); | ||
6970 | |||
6971 | /* we currently allow only one tracer registered at a time */ | ||
6972 | if (ftrace_graph_active) { | ||
6973 | ret = -EBUSY; | ||
6974 | goto out; | ||
6975 | } | ||
6976 | |||
6977 | register_pm_notifier(&ftrace_suspend_notifier); | ||
6978 | |||
6979 | ftrace_graph_active++; | ||
6980 | ret = start_graph_tracing(); | ||
6981 | if (ret) { | ||
6982 | ftrace_graph_active--; | ||
6983 | goto out; | ||
6984 | } | ||
6985 | |||
6986 | ftrace_graph_return = retfunc; | ||
6987 | |||
6988 | /* | ||
6989 | * Update the indirect function to the entryfunc, and the | ||
6990 | * function that gets called to the entry_test first. Then | ||
6991 | * call the update fgraph entry function to determine if | ||
6992 | * the entryfunc should be called directly or not. | ||
6993 | */ | ||
6994 | __ftrace_graph_entry = entryfunc; | ||
6995 | ftrace_graph_entry = ftrace_graph_entry_test; | ||
6996 | update_function_graph_func(); | ||
6997 | |||
6998 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); | ||
6999 | out: | ||
7000 | mutex_unlock(&ftrace_lock); | ||
7001 | return ret; | ||
7002 | } | ||
7003 | |||
7004 | void unregister_ftrace_graph(void) | ||
7005 | { | ||
7006 | mutex_lock(&ftrace_lock); | ||
7007 | |||
7008 | if (unlikely(!ftrace_graph_active)) | ||
7009 | goto out; | ||
7010 | |||
7011 | ftrace_graph_active--; | ||
7012 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | ||
7013 | ftrace_graph_entry = ftrace_graph_entry_stub; | ||
7014 | __ftrace_graph_entry = ftrace_graph_entry_stub; | ||
7015 | ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); | ||
7016 | unregister_pm_notifier(&ftrace_suspend_notifier); | ||
7017 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | ||
7018 | |||
7019 | out: | ||
7020 | mutex_unlock(&ftrace_lock); | ||
7021 | } | ||
7022 | |||
7023 | static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); | ||
7024 | |||
7025 | static void | ||
7026 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) | ||
7027 | { | ||
7028 | atomic_set(&t->tracing_graph_pause, 0); | ||
7029 | atomic_set(&t->trace_overrun, 0); | ||
7030 | t->ftrace_timestamp = 0; | ||
7031 | /* make curr_ret_stack visible before we add the ret_stack */ | ||
7032 | smp_wmb(); | ||
7033 | t->ret_stack = ret_stack; | ||
7034 | } | ||
7035 | |||
7036 | /* | ||
7037 | * Allocate a return stack for the idle task. May be the first | ||
7038 | * time through, or it may be done by CPU hotplug online. | ||
7039 | */ | ||
7040 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) | ||
7041 | { | ||
7042 | t->curr_ret_stack = -1; | ||
7043 | t->curr_ret_depth = -1; | ||
7044 | /* | ||
7045 | * The idle task has no parent, it either has its own | ||
7046 | * stack or no stack at all. | ||
7047 | */ | ||
7048 | if (t->ret_stack) | ||
7049 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); | ||
7050 | |||
7051 | if (ftrace_graph_active) { | ||
7052 | struct ftrace_ret_stack *ret_stack; | ||
7053 | |||
7054 | ret_stack = per_cpu(idle_ret_stack, cpu); | ||
7055 | if (!ret_stack) { | ||
7056 | ret_stack = | ||
7057 | kmalloc_array(FTRACE_RETFUNC_DEPTH, | ||
7058 | sizeof(struct ftrace_ret_stack), | ||
7059 | GFP_KERNEL); | ||
7060 | if (!ret_stack) | ||
7061 | return; | ||
7062 | per_cpu(idle_ret_stack, cpu) = ret_stack; | ||
7063 | } | ||
7064 | graph_init_task(t, ret_stack); | ||
7065 | } | ||
7066 | } | ||
7067 | |||
7068 | /* Allocate a return stack for newly created task */ | ||
7069 | void ftrace_graph_init_task(struct task_struct *t) | ||
7070 | { | ||
7071 | /* Make sure we do not use the parent ret_stack */ | ||
7072 | t->ret_stack = NULL; | ||
7073 | t->curr_ret_stack = -1; | ||
7074 | t->curr_ret_depth = -1; | ||
7075 | |||
7076 | if (ftrace_graph_active) { | ||
7077 | struct ftrace_ret_stack *ret_stack; | ||
7078 | |||
7079 | ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, | ||
7080 | sizeof(struct ftrace_ret_stack), | ||
7081 | GFP_KERNEL); | ||
7082 | if (!ret_stack) | ||
7083 | return; | ||
7084 | graph_init_task(t, ret_stack); | ||
7085 | } | ||
7086 | } | ||
7087 | |||
7088 | void ftrace_graph_exit_task(struct task_struct *t) | ||
7089 | { | ||
7090 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | ||
7091 | |||
7092 | t->ret_stack = NULL; | ||
7093 | /* NULL must become visible to IRQs before we free it: */ | ||
7094 | barrier(); | ||
7095 | |||
7096 | kfree(ret_stack); | ||
7097 | } | ||
7098 | #endif | ||