diff options
| -rw-r--r-- | kernel/trace/trace.c | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f8fdb9cedc24..2e37857f7dfe 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -96,6 +96,9 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | |||
| 96 | /* tracer_enabled is used to toggle activation of a tracer */ | 96 | /* tracer_enabled is used to toggle activation of a tracer */ |
| 97 | static int tracer_enabled = 1; | 97 | static int tracer_enabled = 1; |
| 98 | 98 | ||
| 99 | /* function tracing enabled */ | ||
| 100 | int ftrace_function_enabled; | ||
| 101 | |||
| 99 | /* | 102 | /* |
| 100 | * trace_nr_entries is the number of entries that is allocated | 103 | * trace_nr_entries is the number of entries that is allocated |
| 101 | * for a buffer. Note, the number of entries is always rounded | 104 | * for a buffer. Note, the number of entries is always rounded |
| @@ -134,6 +137,7 @@ static notrace void no_trace_init(struct trace_array *tr) | |||
| 134 | { | 137 | { |
| 135 | int cpu; | 138 | int cpu; |
| 136 | 139 | ||
| 140 | ftrace_function_enabled = 0; | ||
| 137 | if(tr->ctrl) | 141 | if(tr->ctrl) |
| 138 | for_each_online_cpu(cpu) | 142 | for_each_online_cpu(cpu) |
| 139 | tracing_reset(tr->data[cpu]); | 143 | tracing_reset(tr->data[cpu]); |
| @@ -985,7 +989,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 985 | long disabled; | 989 | long disabled; |
| 986 | int cpu; | 990 | int cpu; |
| 987 | 991 | ||
| 988 | if (unlikely(!tracer_enabled)) | 992 | if (unlikely(!ftrace_function_enabled)) |
| 989 | return; | 993 | return; |
| 990 | 994 | ||
| 991 | if (skip_trace(ip)) | 995 | if (skip_trace(ip)) |
| @@ -1010,11 +1014,15 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
| 1010 | 1014 | ||
| 1011 | void tracing_start_function_trace(void) | 1015 | void tracing_start_function_trace(void) |
| 1012 | { | 1016 | { |
| 1017 | ftrace_function_enabled = 0; | ||
| 1013 | register_ftrace_function(&trace_ops); | 1018 | register_ftrace_function(&trace_ops); |
| 1019 | if (tracer_enabled) | ||
| 1020 | ftrace_function_enabled = 1; | ||
| 1014 | } | 1021 | } |
| 1015 | 1022 | ||
| 1016 | void tracing_stop_function_trace(void) | 1023 | void tracing_stop_function_trace(void) |
| 1017 | { | 1024 | { |
| 1025 | ftrace_function_enabled = 0; | ||
| 1018 | unregister_ftrace_function(&trace_ops); | 1026 | unregister_ftrace_function(&trace_ops); |
| 1019 | } | 1027 | } |
| 1020 | #endif | 1028 | #endif |
| @@ -1850,8 +1858,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
| 1850 | m->private = iter; | 1858 | m->private = iter; |
| 1851 | 1859 | ||
| 1852 | /* stop the trace while dumping */ | 1860 | /* stop the trace while dumping */ |
| 1853 | if (iter->tr->ctrl) | 1861 | if (iter->tr->ctrl) { |
| 1854 | tracer_enabled = 0; | 1862 | tracer_enabled = 0; |
| 1863 | ftrace_function_enabled = 0; | ||
| 1864 | } | ||
| 1855 | 1865 | ||
| 1856 | if (iter->trace && iter->trace->open) | 1866 | if (iter->trace && iter->trace->open) |
| 1857 | iter->trace->open(iter); | 1867 | iter->trace->open(iter); |
| @@ -1884,8 +1894,14 @@ int tracing_release(struct inode *inode, struct file *file) | |||
| 1884 | iter->trace->close(iter); | 1894 | iter->trace->close(iter); |
| 1885 | 1895 | ||
| 1886 | /* reenable tracing if it was previously enabled */ | 1896 | /* reenable tracing if it was previously enabled */ |
| 1887 | if (iter->tr->ctrl) | 1897 | if (iter->tr->ctrl) { |
| 1888 | tracer_enabled = 1; | 1898 | tracer_enabled = 1; |
| 1899 | /* | ||
| 1900 | * It is safe to enable function tracing even if it | ||
| 1901 | * isn't used | ||
| 1902 | */ | ||
| 1903 | ftrace_function_enabled = 1; | ||
| 1904 | } | ||
| 1889 | mutex_unlock(&trace_types_lock); | 1905 | mutex_unlock(&trace_types_lock); |
| 1890 | 1906 | ||
| 1891 | seq_release(inode, file); | 1907 | seq_release(inode, file); |
