aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-11-15 12:35:13 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-12-08 20:54:06 -0500
commite73e679f656e678b0e7f8961094201f3544f4541 (patch)
treef523a2211170af31be239ad1db1b263450207120 /kernel/trace/ftrace.c
parentc8dd0f45874547e6e77bab03d71feb16c4cb98a8 (diff)
fgraph: Move function graph specific code into fgraph.c
To make the function graph infrastructure more managable, the code needs to be in its own file (fgraph.c). Move the code that is specific for managing the function graph infrastructure out of ftrace.c and into fgraph.c Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c368
1 files changed, 7 insertions, 361 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 52c89428b0db..c53533b833cf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -19,7 +19,6 @@
19#include <linux/sched/task.h> 19#include <linux/sched/task.h>
20#include <linux/kallsyms.h> 20#include <linux/kallsyms.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/suspend.h>
23#include <linux/tracefs.h> 22#include <linux/tracefs.h>
24#include <linux/hardirq.h> 23#include <linux/hardirq.h>
25#include <linux/kthread.h> 24#include <linux/kthread.h>
@@ -167,12 +166,6 @@ static void ftrace_sync_ipi(void *data)
167 smp_rmb(); 166 smp_rmb();
168} 167}
169 168
170#ifdef CONFIG_FUNCTION_GRAPH_TRACER
171/* Both enabled by default (can be cleared by function_graph tracer flags */
172static bool fgraph_sleep_time = true;
173static bool fgraph_graph_time = true;
174#endif
175
176static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 169static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
177{ 170{
178 /* 171 /*
@@ -790,6 +783,13 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
790} 783}
791 784
792#ifdef CONFIG_FUNCTION_GRAPH_TRACER 785#ifdef CONFIG_FUNCTION_GRAPH_TRACER
786static bool fgraph_graph_time = true;
787
788void ftrace_graph_graph_time_control(bool enable)
789{
790 fgraph_graph_time = enable;
791}
792
793static int profile_graph_entry(struct ftrace_graph_ent *trace) 793static int profile_graph_entry(struct ftrace_graph_ent *trace)
794{ 794{
795 int index = current->curr_ret_stack; 795 int index = current->curr_ret_stack;
@@ -996,10 +996,6 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
996} 996}
997#endif /* CONFIG_FUNCTION_PROFILER */ 997#endif /* CONFIG_FUNCTION_PROFILER */
998 998
999#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1000int ftrace_graph_active;
1001#endif
1002
1003#ifdef CONFIG_DYNAMIC_FTRACE 999#ifdef CONFIG_DYNAMIC_FTRACE
1004 1000
1005static struct ftrace_ops *removed_ops; 1001static struct ftrace_ops *removed_ops;
@@ -6697,353 +6693,3 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
6697 mutex_unlock(&ftrace_lock); 6693 mutex_unlock(&ftrace_lock);
6698 return ret; 6694 return ret;
6699} 6695}
6700
6701#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6702
6703static struct ftrace_ops graph_ops = {
6704 .func = ftrace_stub,
6705 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
6706 FTRACE_OPS_FL_INITIALIZED |
6707 FTRACE_OPS_FL_PID |
6708 FTRACE_OPS_FL_STUB,
6709#ifdef FTRACE_GRAPH_TRAMP_ADDR
6710 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
6711 /* trampoline_size is only needed for dynamically allocated tramps */
6712#endif
6713 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
6714};
6715
6716void ftrace_graph_sleep_time_control(bool enable)
6717{
6718 fgraph_sleep_time = enable;
6719}
6720
6721void ftrace_graph_graph_time_control(bool enable)
6722{
6723 fgraph_graph_time = enable;
6724}
6725
6726int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
6727{
6728 return 0;
6729}
6730
6731/* The callbacks that hook a function */
6732trace_func_graph_ret_t ftrace_graph_return =
6733 (trace_func_graph_ret_t)ftrace_stub;
6734trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
6735static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
6736
6737/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
6738static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
6739{
6740 int i;
6741 int ret = 0;
6742 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
6743 struct task_struct *g, *t;
6744
6745 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
6746 ret_stack_list[i] =
6747 kmalloc_array(FTRACE_RETFUNC_DEPTH,
6748 sizeof(struct ftrace_ret_stack),
6749 GFP_KERNEL);
6750 if (!ret_stack_list[i]) {
6751 start = 0;
6752 end = i;
6753 ret = -ENOMEM;
6754 goto free;
6755 }
6756 }
6757
6758 read_lock(&tasklist_lock);
6759 do_each_thread(g, t) {
6760 if (start == end) {
6761 ret = -EAGAIN;
6762 goto unlock;
6763 }
6764
6765 if (t->ret_stack == NULL) {
6766 atomic_set(&t->tracing_graph_pause, 0);
6767 atomic_set(&t->trace_overrun, 0);
6768 t->curr_ret_stack = -1;
6769 t->curr_ret_depth = -1;
6770 /* Make sure the tasks see the -1 first: */
6771 smp_wmb();
6772 t->ret_stack = ret_stack_list[start++];
6773 }
6774 } while_each_thread(g, t);
6775
6776unlock:
6777 read_unlock(&tasklist_lock);
6778free:
6779 for (i = start; i < end; i++)
6780 kfree(ret_stack_list[i]);
6781 return ret;
6782}
6783
6784static void
6785ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
6786 struct task_struct *prev, struct task_struct *next)
6787{
6788 unsigned long long timestamp;
6789 int index;
6790
6791 /*
6792 * Does the user want to count the time a function was asleep.
6793 * If so, do not update the time stamps.
6794 */
6795 if (fgraph_sleep_time)
6796 return;
6797
6798 timestamp = trace_clock_local();
6799
6800 prev->ftrace_timestamp = timestamp;
6801
6802 /* only process tasks that we timestamped */
6803 if (!next->ftrace_timestamp)
6804 return;
6805
6806 /*
6807 * Update all the counters in next to make up for the
6808 * time next was sleeping.
6809 */
6810 timestamp -= next->ftrace_timestamp;
6811
6812 for (index = next->curr_ret_stack; index >= 0; index--)
6813 next->ret_stack[index].calltime += timestamp;
6814}
6815
6816/* Allocate a return stack for each task */
6817static int start_graph_tracing(void)
6818{
6819 struct ftrace_ret_stack **ret_stack_list;
6820 int ret, cpu;
6821
6822 ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
6823 sizeof(struct ftrace_ret_stack *),
6824 GFP_KERNEL);
6825
6826 if (!ret_stack_list)
6827 return -ENOMEM;
6828
6829 /* The cpu_boot init_task->ret_stack will never be freed */
6830 for_each_online_cpu(cpu) {
6831 if (!idle_task(cpu)->ret_stack)
6832 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
6833 }
6834
6835 do {
6836 ret = alloc_retstack_tasklist(ret_stack_list);
6837 } while (ret == -EAGAIN);
6838
6839 if (!ret) {
6840 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6841 if (ret)
6842 pr_info("ftrace_graph: Couldn't activate tracepoint"
6843 " probe to kernel_sched_switch\n");
6844 }
6845
6846 kfree(ret_stack_list);
6847 return ret;
6848}
6849
6850/*
6851 * Hibernation protection.
6852 * The state of the current task is too much unstable during
6853 * suspend/restore to disk. We want to protect against that.
6854 */
6855static int
6856ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
6857 void *unused)
6858{
6859 switch (state) {
6860 case PM_HIBERNATION_PREPARE:
6861 pause_graph_tracing();
6862 break;
6863
6864 case PM_POST_HIBERNATION:
6865 unpause_graph_tracing();
6866 break;
6867 }
6868 return NOTIFY_DONE;
6869}
6870
6871static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
6872{
6873 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
6874 return 0;
6875 return __ftrace_graph_entry(trace);
6876}
6877
6878/*
6879 * The function graph tracer should only trace the functions defined
6880 * by set_ftrace_filter and set_ftrace_notrace. If another function
6881 * tracer ops is registered, the graph tracer requires testing the
6882 * function against the global ops, and not just trace any function
6883 * that any ftrace_ops registered.
6884 */
6885void update_function_graph_func(void)
6886{
6887 struct ftrace_ops *op;
6888 bool do_test = false;
6889
6890 /*
6891 * The graph and global ops share the same set of functions
6892 * to test. If any other ops is on the list, then
6893 * the graph tracing needs to test if its the function
6894 * it should call.
6895 */
6896 do_for_each_ftrace_op(op, ftrace_ops_list) {
6897 if (op != &global_ops && op != &graph_ops &&
6898 op != &ftrace_list_end) {
6899 do_test = true;
6900 /* in double loop, break out with goto */
6901 goto out;
6902 }
6903 } while_for_each_ftrace_op(op);
6904 out:
6905 if (do_test)
6906 ftrace_graph_entry = ftrace_graph_entry_test;
6907 else
6908 ftrace_graph_entry = __ftrace_graph_entry;
6909}
6910
6911static struct notifier_block ftrace_suspend_notifier = {
6912 .notifier_call = ftrace_suspend_notifier_call,
6913};
6914
6915int register_ftrace_graph(trace_func_graph_ret_t retfunc,
6916 trace_func_graph_ent_t entryfunc)
6917{
6918 int ret = 0;
6919
6920 mutex_lock(&ftrace_lock);
6921
6922 /* we currently allow only one tracer registered at a time */
6923 if (ftrace_graph_active) {
6924 ret = -EBUSY;
6925 goto out;
6926 }
6927
6928 register_pm_notifier(&ftrace_suspend_notifier);
6929
6930 ftrace_graph_active++;
6931 ret = start_graph_tracing();
6932 if (ret) {
6933 ftrace_graph_active--;
6934 goto out;
6935 }
6936
6937 ftrace_graph_return = retfunc;
6938
6939 /*
6940 * Update the indirect function to the entryfunc, and the
6941 * function that gets called to the entry_test first. Then
6942 * call the update fgraph entry function to determine if
6943 * the entryfunc should be called directly or not.
6944 */
6945 __ftrace_graph_entry = entryfunc;
6946 ftrace_graph_entry = ftrace_graph_entry_test;
6947 update_function_graph_func();
6948
6949 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
6950out:
6951 mutex_unlock(&ftrace_lock);
6952 return ret;
6953}
6954
6955void unregister_ftrace_graph(void)
6956{
6957 mutex_lock(&ftrace_lock);
6958
6959 if (unlikely(!ftrace_graph_active))
6960 goto out;
6961
6962 ftrace_graph_active--;
6963 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
6964 ftrace_graph_entry = ftrace_graph_entry_stub;
6965 __ftrace_graph_entry = ftrace_graph_entry_stub;
6966 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
6967 unregister_pm_notifier(&ftrace_suspend_notifier);
6968 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6969
6970 out:
6971 mutex_unlock(&ftrace_lock);
6972}
6973
6974static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
6975
6976static void
6977graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
6978{
6979 atomic_set(&t->tracing_graph_pause, 0);
6980 atomic_set(&t->trace_overrun, 0);
6981 t->ftrace_timestamp = 0;
6982 /* make curr_ret_stack visible before we add the ret_stack */
6983 smp_wmb();
6984 t->ret_stack = ret_stack;
6985}
6986
6987/*
6988 * Allocate a return stack for the idle task. May be the first
6989 * time through, or it may be done by CPU hotplug online.
6990 */
6991void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
6992{
6993 t->curr_ret_stack = -1;
6994 t->curr_ret_depth = -1;
6995 /*
6996 * The idle task has no parent, it either has its own
6997 * stack or no stack at all.
6998 */
6999 if (t->ret_stack)
7000 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
7001
7002 if (ftrace_graph_active) {
7003 struct ftrace_ret_stack *ret_stack;
7004
7005 ret_stack = per_cpu(idle_ret_stack, cpu);
7006 if (!ret_stack) {
7007 ret_stack =
7008 kmalloc_array(FTRACE_RETFUNC_DEPTH,
7009 sizeof(struct ftrace_ret_stack),
7010 GFP_KERNEL);
7011 if (!ret_stack)
7012 return;
7013 per_cpu(idle_ret_stack, cpu) = ret_stack;
7014 }
7015 graph_init_task(t, ret_stack);
7016 }
7017}
7018
7019/* Allocate a return stack for newly created task */
7020void ftrace_graph_init_task(struct task_struct *t)
7021{
7022 /* Make sure we do not use the parent ret_stack */
7023 t->ret_stack = NULL;
7024 t->curr_ret_stack = -1;
7025 t->curr_ret_depth = -1;
7026
7027 if (ftrace_graph_active) {
7028 struct ftrace_ret_stack *ret_stack;
7029
7030 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
7031 sizeof(struct ftrace_ret_stack),
7032 GFP_KERNEL);
7033 if (!ret_stack)
7034 return;
7035 graph_init_task(t, ret_stack);
7036 }
7037}
7038
7039void ftrace_graph_exit_task(struct task_struct *t)
7040{
7041 struct ftrace_ret_stack *ret_stack = t->ret_stack;
7042
7043 t->ret_stack = NULL;
7044 /* NULL must become visible to IRQs before we free it: */
7045 barrier();
7046
7047 kfree(ret_stack);
7048}
7049#endif