aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-31 14:46:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-31 14:46:59 -0500
commit495d714ad140e1732e66c45d0409054b24c1a0d6 (patch)
tree373ec6619adea47d848d36f140b32def27164bbd /kernel/trace/ftrace.c
parentf12e840c819bab42621685558a01d3f46ab9a226 (diff)
parent3d739c1f6156c70eb0548aa288dcfbac9e0bd162 (diff)
Merge tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: - Rework of the kprobe/uprobe and synthetic events to consolidate all the dynamic event code. This will make changes in the future easier. - Partial rewrite of the function graph tracing infrastructure. This will allow for multiple users of hooking onto functions to get the callback (return) of the function. This is the ground work for having kprobes and function graph tracer using one code base. - Clean up of the histogram code that will facilitate adding more features to the histograms in the future. - Addition of str_has_prefix() and a few use cases. There currently is a similar function strstart() that is used in a few places, but only returns a bool and not a length. These instances will be removed in the future to use str_has_prefix() instead. - A few other various clean ups as well. * tag 'trace-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits) tracing: Use the return of str_has_prefix() to remove open coded numbers tracing: Have the historgram use the result of str_has_prefix() for len of prefix tracing: Use str_has_prefix() instead of using fixed sizes tracing: Use str_has_prefix() helper for histogram code string.h: Add str_has_prefix() helper function tracing: Make function ‘ftrace_exports’ static tracing: Simplify printf'ing in seq_print_sym tracing: Avoid -Wformat-nonliteral warning tracing: Merge seq_print_sym_short() and seq_print_sym_offset() tracing: Add hist trigger comments for variable-related fields tracing: Remove hist trigger synth_var_refs tracing: Use hist trigger's var_ref array to destroy var_refs tracing: Remove open-coding of hist trigger var_ref management tracing: Use var_refs[] for hist trigger reference checking tracing: Change strlen to sizeof for hist trigger static strings tracing: Remove unnecessary hist trigger struct field tracing: Fix ftrace_graph_get_ret_stack() to use task and not current seq_buf: Use size_t for len in seq_buf_puts() seq_buf: Make seq_buf_puts() null-terminate the buffer arm64: Use ftrace_graph_get_ret_stack() instead of curr_ret_stack ...
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c490
1 files changed, 53 insertions, 437 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f0ff24173a0b..aac7847c0214 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -19,7 +19,6 @@
19#include <linux/sched/task.h> 19#include <linux/sched/task.h>
20#include <linux/kallsyms.h> 20#include <linux/kallsyms.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/suspend.h>
23#include <linux/tracefs.h> 22#include <linux/tracefs.h>
24#include <linux/hardirq.h> 23#include <linux/hardirq.h>
25#include <linux/kthread.h> 24#include <linux/kthread.h>
@@ -40,6 +39,7 @@
40#include <asm/sections.h> 39#include <asm/sections.h>
41#include <asm/setup.h> 40#include <asm/setup.h>
42 41
42#include "ftrace_internal.h"
43#include "trace_output.h" 43#include "trace_output.h"
44#include "trace_stat.h" 44#include "trace_stat.h"
45 45
@@ -77,7 +77,12 @@
77#define ASSIGN_OPS_HASH(opsname, val) 77#define ASSIGN_OPS_HASH(opsname, val)
78#endif 78#endif
79 79
80static struct ftrace_ops ftrace_list_end __read_mostly = { 80enum {
81 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
82 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
83};
84
85struct ftrace_ops ftrace_list_end __read_mostly = {
81 .func = ftrace_stub, 86 .func = ftrace_stub,
82 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 87 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
83 INIT_OPS_HASH(ftrace_list_end) 88 INIT_OPS_HASH(ftrace_list_end)
@@ -112,11 +117,11 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops);
112 */ 117 */
113static int ftrace_disabled __read_mostly; 118static int ftrace_disabled __read_mostly;
114 119
115static DEFINE_MUTEX(ftrace_lock); 120DEFINE_MUTEX(ftrace_lock);
116 121
117static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; 122struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 123ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
119static struct ftrace_ops global_ops; 124struct ftrace_ops global_ops;
120 125
121#if ARCH_SUPPORTS_FTRACE_OPS 126#if ARCH_SUPPORTS_FTRACE_OPS
122static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 127static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
@@ -127,26 +132,6 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
127#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) 132#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
128#endif 133#endif
129 134
130/*
131 * Traverse the ftrace_global_list, invoking all entries. The reason that we
132 * can use rcu_dereference_raw_notrace() is that elements removed from this list
133 * are simply leaked, so there is no need to interact with a grace-period
134 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
135 * concurrent insertions into the ftrace_global_list.
136 *
137 * Silly Alpha and silly pointer-speculation compiler optimizations!
138 */
139#define do_for_each_ftrace_op(op, list) \
140 op = rcu_dereference_raw_notrace(list); \
141 do
142
143/*
144 * Optimized for just a single item in the list (as that is the normal case).
145 */
146#define while_for_each_ftrace_op(op) \
147 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
148 unlikely((op) != &ftrace_list_end))
149
150static inline void ftrace_ops_init(struct ftrace_ops *ops) 135static inline void ftrace_ops_init(struct ftrace_ops *ops)
151{ 136{
152#ifdef CONFIG_DYNAMIC_FTRACE 137#ifdef CONFIG_DYNAMIC_FTRACE
@@ -186,18 +171,6 @@ static void ftrace_sync_ipi(void *data)
186 smp_rmb(); 171 smp_rmb();
187} 172}
188 173
189#ifdef CONFIG_FUNCTION_GRAPH_TRACER
190static void update_function_graph_func(void);
191
192/* Both enabled by default (can be cleared by function_graph tracer flags */
193static bool fgraph_sleep_time = true;
194static bool fgraph_graph_time = true;
195
196#else
197static inline void update_function_graph_func(void) { }
198#endif
199
200
201static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 174static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
202{ 175{
203 /* 176 /*
@@ -334,7 +307,7 @@ static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
334 307
335static void ftrace_update_trampoline(struct ftrace_ops *ops); 308static void ftrace_update_trampoline(struct ftrace_ops *ops);
336 309
337static int __register_ftrace_function(struct ftrace_ops *ops) 310int __register_ftrace_function(struct ftrace_ops *ops)
338{ 311{
339 if (ops->flags & FTRACE_OPS_FL_DELETED) 312 if (ops->flags & FTRACE_OPS_FL_DELETED)
340 return -EINVAL; 313 return -EINVAL;
@@ -375,7 +348,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
375 return 0; 348 return 0;
376} 349}
377 350
378static int __unregister_ftrace_function(struct ftrace_ops *ops) 351int __unregister_ftrace_function(struct ftrace_ops *ops)
379{ 352{
380 int ret; 353 int ret;
381 354
@@ -815,9 +788,16 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
815} 788}
816 789
817#ifdef CONFIG_FUNCTION_GRAPH_TRACER 790#ifdef CONFIG_FUNCTION_GRAPH_TRACER
791static bool fgraph_graph_time = true;
792
793void ftrace_graph_graph_time_control(bool enable)
794{
795 fgraph_graph_time = enable;
796}
797
818static int profile_graph_entry(struct ftrace_graph_ent *trace) 798static int profile_graph_entry(struct ftrace_graph_ent *trace)
819{ 799{
820 int index = current->curr_ret_stack; 800 struct ftrace_ret_stack *ret_stack;
821 801
822 function_profile_call(trace->func, 0, NULL, NULL); 802 function_profile_call(trace->func, 0, NULL, NULL);
823 803
@@ -825,14 +805,16 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
825 if (!current->ret_stack) 805 if (!current->ret_stack)
826 return 0; 806 return 0;
827 807
828 if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) 808 ret_stack = ftrace_graph_get_ret_stack(current, 0);
829 current->ret_stack[index].subtime = 0; 809 if (ret_stack)
810 ret_stack->subtime = 0;
830 811
831 return 1; 812 return 1;
832} 813}
833 814
834static void profile_graph_return(struct ftrace_graph_ret *trace) 815static void profile_graph_return(struct ftrace_graph_ret *trace)
835{ 816{
817 struct ftrace_ret_stack *ret_stack;
836 struct ftrace_profile_stat *stat; 818 struct ftrace_profile_stat *stat;
837 unsigned long long calltime; 819 unsigned long long calltime;
838 struct ftrace_profile *rec; 820 struct ftrace_profile *rec;
@@ -850,16 +832,15 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
850 calltime = trace->rettime - trace->calltime; 832 calltime = trace->rettime - trace->calltime;
851 833
852 if (!fgraph_graph_time) { 834 if (!fgraph_graph_time) {
853 int index;
854
855 index = current->curr_ret_stack;
856 835
857 /* Append this call time to the parent time to subtract */ 836 /* Append this call time to the parent time to subtract */
858 if (index) 837 ret_stack = ftrace_graph_get_ret_stack(current, 1);
859 current->ret_stack[index - 1].subtime += calltime; 838 if (ret_stack)
839 ret_stack->subtime += calltime;
860 840
861 if (current->ret_stack[index].subtime < calltime) 841 ret_stack = ftrace_graph_get_ret_stack(current, 0);
862 calltime -= current->ret_stack[index].subtime; 842 if (ret_stack && ret_stack->subtime < calltime)
843 calltime -= ret_stack->subtime;
863 else 844 else
864 calltime = 0; 845 calltime = 0;
865 } 846 }
@@ -874,15 +855,19 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
874 local_irq_restore(flags); 855 local_irq_restore(flags);
875} 856}
876 857
858static struct fgraph_ops fprofiler_ops = {
859 .entryfunc = &profile_graph_entry,
860 .retfunc = &profile_graph_return,
861};
862
877static int register_ftrace_profiler(void) 863static int register_ftrace_profiler(void)
878{ 864{
879 return register_ftrace_graph(&profile_graph_return, 865 return register_ftrace_graph(&fprofiler_ops);
880 &profile_graph_entry);
881} 866}
882 867
883static void unregister_ftrace_profiler(void) 868static void unregister_ftrace_profiler(void)
884{ 869{
885 unregister_ftrace_graph(); 870 unregister_ftrace_graph(&fprofiler_ops);
886} 871}
887#else 872#else
888static struct ftrace_ops ftrace_profile_ops __read_mostly = { 873static struct ftrace_ops ftrace_profile_ops __read_mostly = {
@@ -1021,12 +1006,6 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1021} 1006}
1022#endif /* CONFIG_FUNCTION_PROFILER */ 1007#endif /* CONFIG_FUNCTION_PROFILER */
1023 1008
1024#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1025static int ftrace_graph_active;
1026#else
1027# define ftrace_graph_active 0
1028#endif
1029
1030#ifdef CONFIG_DYNAMIC_FTRACE 1009#ifdef CONFIG_DYNAMIC_FTRACE
1031 1010
1032static struct ftrace_ops *removed_ops; 1011static struct ftrace_ops *removed_ops;
@@ -1067,7 +1046,7 @@ static const struct ftrace_hash empty_hash = {
1067}; 1046};
1068#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1047#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1069 1048
1070static struct ftrace_ops global_ops = { 1049struct ftrace_ops global_ops = {
1071 .func = ftrace_stub, 1050 .func = ftrace_stub,
1072 .local_hash.notrace_hash = EMPTY_HASH, 1051 .local_hash.notrace_hash = EMPTY_HASH,
1073 .local_hash.filter_hash = EMPTY_HASH, 1052 .local_hash.filter_hash = EMPTY_HASH,
@@ -1503,7 +1482,7 @@ static bool hash_contains_ip(unsigned long ip,
1503 * This needs to be called with preemption disabled as 1482 * This needs to be called with preemption disabled as
1504 * the hashes are freed with call_rcu(). 1483 * the hashes are freed with call_rcu().
1505 */ 1484 */
1506static int 1485int
1507ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1486ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1508{ 1487{
1509 struct ftrace_ops_hash hash; 1488 struct ftrace_ops_hash hash;
@@ -2415,10 +2394,12 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2415 return -1; /* unknow ftrace bug */ 2394 return -1; /* unknow ftrace bug */
2416} 2395}
2417 2396
2418void __weak ftrace_replace_code(int enable) 2397void __weak ftrace_replace_code(int mod_flags)
2419{ 2398{
2420 struct dyn_ftrace *rec; 2399 struct dyn_ftrace *rec;
2421 struct ftrace_page *pg; 2400 struct ftrace_page *pg;
2401 int enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2402 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2422 int failed; 2403 int failed;
2423 2404
2424 if (unlikely(ftrace_disabled)) 2405 if (unlikely(ftrace_disabled))
@@ -2435,6 +2416,8 @@ void __weak ftrace_replace_code(int enable)
2435 /* Stop processing */ 2416 /* Stop processing */
2436 return; 2417 return;
2437 } 2418 }
2419 if (schedulable)
2420 cond_resched();
2438 } while_for_each_ftrace_rec(); 2421 } while_for_each_ftrace_rec();
2439} 2422}
2440 2423
@@ -2548,8 +2531,12 @@ int __weak ftrace_arch_code_modify_post_process(void)
2548void ftrace_modify_all_code(int command) 2531void ftrace_modify_all_code(int command)
2549{ 2532{
2550 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2533 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2534 int mod_flags = 0;
2551 int err = 0; 2535 int err = 0;
2552 2536
2537 if (command & FTRACE_MAY_SLEEP)
2538 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2539
2553 /* 2540 /*
2554 * If the ftrace_caller calls a ftrace_ops func directly, 2541 * If the ftrace_caller calls a ftrace_ops func directly,
2555 * we need to make sure that it only traces functions it 2542 * we need to make sure that it only traces functions it
@@ -2567,9 +2554,9 @@ void ftrace_modify_all_code(int command)
2567 } 2554 }
2568 2555
2569 if (command & FTRACE_UPDATE_CALLS) 2556 if (command & FTRACE_UPDATE_CALLS)
2570 ftrace_replace_code(1); 2557 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2571 else if (command & FTRACE_DISABLE_CALLS) 2558 else if (command & FTRACE_DISABLE_CALLS)
2572 ftrace_replace_code(0); 2559 ftrace_replace_code(mod_flags);
2573 2560
2574 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2561 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2575 function_trace_op = set_function_trace_op; 2562 function_trace_op = set_function_trace_op;
@@ -2682,7 +2669,7 @@ static void ftrace_startup_all(int command)
2682 update_all_ops = false; 2669 update_all_ops = false;
2683} 2670}
2684 2671
2685static int ftrace_startup(struct ftrace_ops *ops, int command) 2672int ftrace_startup(struct ftrace_ops *ops, int command)
2686{ 2673{
2687 int ret; 2674 int ret;
2688 2675
@@ -2724,7 +2711,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
2724 return 0; 2711 return 0;
2725} 2712}
2726 2713
2727static int ftrace_shutdown(struct ftrace_ops *ops, int command) 2714int ftrace_shutdown(struct ftrace_ops *ops, int command)
2728{ 2715{
2729 int ret; 2716 int ret;
2730 2717
@@ -6178,7 +6165,7 @@ void ftrace_init_trace_array(struct trace_array *tr)
6178} 6165}
6179#else 6166#else
6180 6167
6181static struct ftrace_ops global_ops = { 6168struct ftrace_ops global_ops = {
6182 .func = ftrace_stub, 6169 .func = ftrace_stub,
6183 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 6170 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
6184 FTRACE_OPS_FL_INITIALIZED | 6171 FTRACE_OPS_FL_INITIALIZED |
@@ -6195,31 +6182,10 @@ core_initcall(ftrace_nodyn_init);
6195static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 6182static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
6196static inline void ftrace_startup_enable(int command) { } 6183static inline void ftrace_startup_enable(int command) { }
6197static inline void ftrace_startup_all(int command) { } 6184static inline void ftrace_startup_all(int command) { }
6198/* Keep as macros so we do not need to define the commands */
6199# define ftrace_startup(ops, command) \
6200 ({ \
6201 int ___ret = __register_ftrace_function(ops); \
6202 if (!___ret) \
6203 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
6204 ___ret; \
6205 })
6206# define ftrace_shutdown(ops, command) \
6207 ({ \
6208 int ___ret = __unregister_ftrace_function(ops); \
6209 if (!___ret) \
6210 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
6211 ___ret; \
6212 })
6213 6185
6214# define ftrace_startup_sysctl() do { } while (0) 6186# define ftrace_startup_sysctl() do { } while (0)
6215# define ftrace_shutdown_sysctl() do { } while (0) 6187# define ftrace_shutdown_sysctl() do { } while (0)
6216 6188
6217static inline int
6218ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
6219{
6220 return 1;
6221}
6222
6223static void ftrace_update_trampoline(struct ftrace_ops *ops) 6189static void ftrace_update_trampoline(struct ftrace_ops *ops)
6224{ 6190{
6225} 6191}
@@ -6746,353 +6712,3 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
6746 mutex_unlock(&ftrace_lock); 6712 mutex_unlock(&ftrace_lock);
6747 return ret; 6713 return ret;
6748} 6714}
6749
6750#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6751
6752static struct ftrace_ops graph_ops = {
6753 .func = ftrace_stub,
6754 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
6755 FTRACE_OPS_FL_INITIALIZED |
6756 FTRACE_OPS_FL_PID |
6757 FTRACE_OPS_FL_STUB,
6758#ifdef FTRACE_GRAPH_TRAMP_ADDR
6759 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
6760 /* trampoline_size is only needed for dynamically allocated tramps */
6761#endif
6762 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
6763};
6764
6765void ftrace_graph_sleep_time_control(bool enable)
6766{
6767 fgraph_sleep_time = enable;
6768}
6769
6770void ftrace_graph_graph_time_control(bool enable)
6771{
6772 fgraph_graph_time = enable;
6773}
6774
6775int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
6776{
6777 return 0;
6778}
6779
6780/* The callbacks that hook a function */
6781trace_func_graph_ret_t ftrace_graph_return =
6782 (trace_func_graph_ret_t)ftrace_stub;
6783trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
6784static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
6785
6786/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
6787static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
6788{
6789 int i;
6790 int ret = 0;
6791 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
6792 struct task_struct *g, *t;
6793
6794 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
6795 ret_stack_list[i] =
6796 kmalloc_array(FTRACE_RETFUNC_DEPTH,
6797 sizeof(struct ftrace_ret_stack),
6798 GFP_KERNEL);
6799 if (!ret_stack_list[i]) {
6800 start = 0;
6801 end = i;
6802 ret = -ENOMEM;
6803 goto free;
6804 }
6805 }
6806
6807 read_lock(&tasklist_lock);
6808 do_each_thread(g, t) {
6809 if (start == end) {
6810 ret = -EAGAIN;
6811 goto unlock;
6812 }
6813
6814 if (t->ret_stack == NULL) {
6815 atomic_set(&t->tracing_graph_pause, 0);
6816 atomic_set(&t->trace_overrun, 0);
6817 t->curr_ret_stack = -1;
6818 t->curr_ret_depth = -1;
6819 /* Make sure the tasks see the -1 first: */
6820 smp_wmb();
6821 t->ret_stack = ret_stack_list[start++];
6822 }
6823 } while_each_thread(g, t);
6824
6825unlock:
6826 read_unlock(&tasklist_lock);
6827free:
6828 for (i = start; i < end; i++)
6829 kfree(ret_stack_list[i]);
6830 return ret;
6831}
6832
6833static void
6834ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
6835 struct task_struct *prev, struct task_struct *next)
6836{
6837 unsigned long long timestamp;
6838 int index;
6839
6840 /*
6841 * Does the user want to count the time a function was asleep.
6842 * If so, do not update the time stamps.
6843 */
6844 if (fgraph_sleep_time)
6845 return;
6846
6847 timestamp = trace_clock_local();
6848
6849 prev->ftrace_timestamp = timestamp;
6850
6851 /* only process tasks that we timestamped */
6852 if (!next->ftrace_timestamp)
6853 return;
6854
6855 /*
6856 * Update all the counters in next to make up for the
6857 * time next was sleeping.
6858 */
6859 timestamp -= next->ftrace_timestamp;
6860
6861 for (index = next->curr_ret_stack; index >= 0; index--)
6862 next->ret_stack[index].calltime += timestamp;
6863}
6864
6865/* Allocate a return stack for each task */
6866static int start_graph_tracing(void)
6867{
6868 struct ftrace_ret_stack **ret_stack_list;
6869 int ret, cpu;
6870
6871 ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
6872 sizeof(struct ftrace_ret_stack *),
6873 GFP_KERNEL);
6874
6875 if (!ret_stack_list)
6876 return -ENOMEM;
6877
6878 /* The cpu_boot init_task->ret_stack will never be freed */
6879 for_each_online_cpu(cpu) {
6880 if (!idle_task(cpu)->ret_stack)
6881 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
6882 }
6883
6884 do {
6885 ret = alloc_retstack_tasklist(ret_stack_list);
6886 } while (ret == -EAGAIN);
6887
6888 if (!ret) {
6889 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6890 if (ret)
6891 pr_info("ftrace_graph: Couldn't activate tracepoint"
6892 " probe to kernel_sched_switch\n");
6893 }
6894
6895 kfree(ret_stack_list);
6896 return ret;
6897}
6898
6899/*
6900 * Hibernation protection.
6901 * The state of the current task is too much unstable during
6902 * suspend/restore to disk. We want to protect against that.
6903 */
6904static int
6905ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
6906 void *unused)
6907{
6908 switch (state) {
6909 case PM_HIBERNATION_PREPARE:
6910 pause_graph_tracing();
6911 break;
6912
6913 case PM_POST_HIBERNATION:
6914 unpause_graph_tracing();
6915 break;
6916 }
6917 return NOTIFY_DONE;
6918}
6919
6920static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
6921{
6922 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
6923 return 0;
6924 return __ftrace_graph_entry(trace);
6925}
6926
6927/*
6928 * The function graph tracer should only trace the functions defined
6929 * by set_ftrace_filter and set_ftrace_notrace. If another function
6930 * tracer ops is registered, the graph tracer requires testing the
6931 * function against the global ops, and not just trace any function
6932 * that any ftrace_ops registered.
6933 */
6934static void update_function_graph_func(void)
6935{
6936 struct ftrace_ops *op;
6937 bool do_test = false;
6938
6939 /*
6940 * The graph and global ops share the same set of functions
6941 * to test. If any other ops is on the list, then
6942 * the graph tracing needs to test if its the function
6943 * it should call.
6944 */
6945 do_for_each_ftrace_op(op, ftrace_ops_list) {
6946 if (op != &global_ops && op != &graph_ops &&
6947 op != &ftrace_list_end) {
6948 do_test = true;
6949 /* in double loop, break out with goto */
6950 goto out;
6951 }
6952 } while_for_each_ftrace_op(op);
6953 out:
6954 if (do_test)
6955 ftrace_graph_entry = ftrace_graph_entry_test;
6956 else
6957 ftrace_graph_entry = __ftrace_graph_entry;
6958}
6959
6960static struct notifier_block ftrace_suspend_notifier = {
6961 .notifier_call = ftrace_suspend_notifier_call,
6962};
6963
6964int register_ftrace_graph(trace_func_graph_ret_t retfunc,
6965 trace_func_graph_ent_t entryfunc)
6966{
6967 int ret = 0;
6968
6969 mutex_lock(&ftrace_lock);
6970
6971 /* we currently allow only one tracer registered at a time */
6972 if (ftrace_graph_active) {
6973 ret = -EBUSY;
6974 goto out;
6975 }
6976
6977 register_pm_notifier(&ftrace_suspend_notifier);
6978
6979 ftrace_graph_active++;
6980 ret = start_graph_tracing();
6981 if (ret) {
6982 ftrace_graph_active--;
6983 goto out;
6984 }
6985
6986 ftrace_graph_return = retfunc;
6987
6988 /*
6989 * Update the indirect function to the entryfunc, and the
6990 * function that gets called to the entry_test first. Then
6991 * call the update fgraph entry function to determine if
6992 * the entryfunc should be called directly or not.
6993 */
6994 __ftrace_graph_entry = entryfunc;
6995 ftrace_graph_entry = ftrace_graph_entry_test;
6996 update_function_graph_func();
6997
6998 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
6999out:
7000 mutex_unlock(&ftrace_lock);
7001 return ret;
7002}
7003
7004void unregister_ftrace_graph(void)
7005{
7006 mutex_lock(&ftrace_lock);
7007
7008 if (unlikely(!ftrace_graph_active))
7009 goto out;
7010
7011 ftrace_graph_active--;
7012 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
7013 ftrace_graph_entry = ftrace_graph_entry_stub;
7014 __ftrace_graph_entry = ftrace_graph_entry_stub;
7015 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
7016 unregister_pm_notifier(&ftrace_suspend_notifier);
7017 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
7018
7019 out:
7020 mutex_unlock(&ftrace_lock);
7021}
7022
7023static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
7024
7025static void
7026graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
7027{
7028 atomic_set(&t->tracing_graph_pause, 0);
7029 atomic_set(&t->trace_overrun, 0);
7030 t->ftrace_timestamp = 0;
7031 /* make curr_ret_stack visible before we add the ret_stack */
7032 smp_wmb();
7033 t->ret_stack = ret_stack;
7034}
7035
7036/*
7037 * Allocate a return stack for the idle task. May be the first
7038 * time through, or it may be done by CPU hotplug online.
7039 */
7040void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
7041{
7042 t->curr_ret_stack = -1;
7043 t->curr_ret_depth = -1;
7044 /*
7045 * The idle task has no parent, it either has its own
7046 * stack or no stack at all.
7047 */
7048 if (t->ret_stack)
7049 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
7050
7051 if (ftrace_graph_active) {
7052 struct ftrace_ret_stack *ret_stack;
7053
7054 ret_stack = per_cpu(idle_ret_stack, cpu);
7055 if (!ret_stack) {
7056 ret_stack =
7057 kmalloc_array(FTRACE_RETFUNC_DEPTH,
7058 sizeof(struct ftrace_ret_stack),
7059 GFP_KERNEL);
7060 if (!ret_stack)
7061 return;
7062 per_cpu(idle_ret_stack, cpu) = ret_stack;
7063 }
7064 graph_init_task(t, ret_stack);
7065 }
7066}
7067
7068/* Allocate a return stack for newly created task */
7069void ftrace_graph_init_task(struct task_struct *t)
7070{
7071 /* Make sure we do not use the parent ret_stack */
7072 t->ret_stack = NULL;
7073 t->curr_ret_stack = -1;
7074 t->curr_ret_depth = -1;
7075
7076 if (ftrace_graph_active) {
7077 struct ftrace_ret_stack *ret_stack;
7078
7079 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
7080 sizeof(struct ftrace_ret_stack),
7081 GFP_KERNEL);
7082 if (!ret_stack)
7083 return;
7084 graph_init_task(t, ret_stack);
7085 }
7086}
7087
7088void ftrace_graph_exit_task(struct task_struct *t)
7089{
7090 struct ftrace_ret_stack *ret_stack = t->ret_stack;
7091
7092 t->ret_stack = NULL;
7093 /* NULL must become visible to IRQs before we free it: */
7094 barrier();
7095
7096 kfree(ret_stack);
7097}
7098#endif