aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c64
-rw-r--r--kernel/trace/trace_event_perf.c8
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_syscalls.c10
4 files changed, 45 insertions, 40 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 22fa55696760..0e9f9eaade2f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -367,9 +367,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
367 367
368static int __register_ftrace_function(struct ftrace_ops *ops) 368static int __register_ftrace_function(struct ftrace_ops *ops)
369{ 369{
370 if (unlikely(ftrace_disabled))
371 return -ENODEV;
372
373 if (FTRACE_WARN_ON(ops == &global_ops)) 370 if (FTRACE_WARN_ON(ops == &global_ops))
374 return -EINVAL; 371 return -EINVAL;
375 372
@@ -428,9 +425,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
428{ 425{
429 int ret; 426 int ret;
430 427
431 if (ftrace_disabled)
432 return -ENODEV;
433
434 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 428 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
435 return -EBUSY; 429 return -EBUSY;
436 430
@@ -2088,10 +2082,15 @@ static void ftrace_startup_enable(int command)
2088static int ftrace_startup(struct ftrace_ops *ops, int command) 2082static int ftrace_startup(struct ftrace_ops *ops, int command)
2089{ 2083{
2090 bool hash_enable = true; 2084 bool hash_enable = true;
2085 int ret;
2091 2086
2092 if (unlikely(ftrace_disabled)) 2087 if (unlikely(ftrace_disabled))
2093 return -ENODEV; 2088 return -ENODEV;
2094 2089
2090 ret = __register_ftrace_function(ops);
2091 if (ret)
2092 return ret;
2093
2095 ftrace_start_up++; 2094 ftrace_start_up++;
2096 command |= FTRACE_UPDATE_CALLS; 2095 command |= FTRACE_UPDATE_CALLS;
2097 2096
@@ -2113,12 +2112,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
2113 return 0; 2112 return 0;
2114} 2113}
2115 2114
2116static void ftrace_shutdown(struct ftrace_ops *ops, int command) 2115static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2117{ 2116{
2118 bool hash_disable = true; 2117 bool hash_disable = true;
2118 int ret;
2119 2119
2120 if (unlikely(ftrace_disabled)) 2120 if (unlikely(ftrace_disabled))
2121 return; 2121 return -ENODEV;
2122
2123 ret = __unregister_ftrace_function(ops);
2124 if (ret)
2125 return ret;
2122 2126
2123 ftrace_start_up--; 2127 ftrace_start_up--;
2124 /* 2128 /*
@@ -2153,9 +2157,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2153 } 2157 }
2154 2158
2155 if (!command || !ftrace_enabled) 2159 if (!command || !ftrace_enabled)
2156 return; 2160 return 0;
2157 2161
2158 ftrace_run_update_code(command); 2162 ftrace_run_update_code(command);
2163 return 0;
2159} 2164}
2160 2165
2161static void ftrace_startup_sysctl(void) 2166static void ftrace_startup_sysctl(void)
@@ -3060,16 +3065,13 @@ static void __enable_ftrace_function_probe(void)
3060 if (i == FTRACE_FUNC_HASHSIZE) 3065 if (i == FTRACE_FUNC_HASHSIZE)
3061 return; 3066 return;
3062 3067
3063 ret = __register_ftrace_function(&trace_probe_ops); 3068 ret = ftrace_startup(&trace_probe_ops, 0);
3064 if (!ret)
3065 ret = ftrace_startup(&trace_probe_ops, 0);
3066 3069
3067 ftrace_probe_registered = 1; 3070 ftrace_probe_registered = 1;
3068} 3071}
3069 3072
3070static void __disable_ftrace_function_probe(void) 3073static void __disable_ftrace_function_probe(void)
3071{ 3074{
3072 int ret;
3073 int i; 3075 int i;
3074 3076
3075 if (!ftrace_probe_registered) 3077 if (!ftrace_probe_registered)
@@ -3082,9 +3084,7 @@ static void __disable_ftrace_function_probe(void)
3082 } 3084 }
3083 3085
3084 /* no more funcs left */ 3086 /* no more funcs left */
3085 ret = __unregister_ftrace_function(&trace_probe_ops); 3087 ftrace_shutdown(&trace_probe_ops, 0);
3086 if (!ret)
3087 ftrace_shutdown(&trace_probe_ops, 0);
3088 3088
3089 ftrace_probe_registered = 0; 3089 ftrace_probe_registered = 0;
3090} 3090}
@@ -4366,12 +4366,15 @@ core_initcall(ftrace_nodyn_init);
4366static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } 4366static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4367static inline void ftrace_startup_enable(int command) { } 4367static inline void ftrace_startup_enable(int command) { }
4368/* Keep as macros so we do not need to define the commands */ 4368/* Keep as macros so we do not need to define the commands */
4369# define ftrace_startup(ops, command) \ 4369# define ftrace_startup(ops, command) \
4370 ({ \ 4370 ({ \
4371 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 4371 int ___ret = __register_ftrace_function(ops); \
4372 0; \ 4372 if (!___ret) \
4373 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4374 ___ret; \
4373 }) 4375 })
4374# define ftrace_shutdown(ops, command) do { } while (0) 4376# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
4377
4375# define ftrace_startup_sysctl() do { } while (0) 4378# define ftrace_startup_sysctl() do { } while (0)
4376# define ftrace_shutdown_sysctl() do { } while (0) 4379# define ftrace_shutdown_sysctl() do { } while (0)
4377 4380
@@ -4780,9 +4783,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
4780 4783
4781 mutex_lock(&ftrace_lock); 4784 mutex_lock(&ftrace_lock);
4782 4785
4783 ret = __register_ftrace_function(ops); 4786 ret = ftrace_startup(ops, 0);
4784 if (!ret)
4785 ret = ftrace_startup(ops, 0);
4786 4787
4787 mutex_unlock(&ftrace_lock); 4788 mutex_unlock(&ftrace_lock);
4788 4789
@@ -4801,9 +4802,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
4801 int ret; 4802 int ret;
4802 4803
4803 mutex_lock(&ftrace_lock); 4804 mutex_lock(&ftrace_lock);
4804 ret = __unregister_ftrace_function(ops); 4805 ret = ftrace_shutdown(ops, 0);
4805 if (!ret)
4806 ftrace_shutdown(ops, 0);
4807 mutex_unlock(&ftrace_lock); 4806 mutex_unlock(&ftrace_lock);
4808 4807
4809 return ret; 4808 return ret;
@@ -4997,6 +4996,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4997 return NOTIFY_DONE; 4996 return NOTIFY_DONE;
4998} 4997}
4999 4998
4999/* Just a place holder for function graph */
5000static struct ftrace_ops fgraph_ops __read_mostly = {
5001 .func = ftrace_stub,
5002 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5003 FTRACE_OPS_FL_RECURSION_SAFE,
5004};
5005
5000int register_ftrace_graph(trace_func_graph_ret_t retfunc, 5006int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5001 trace_func_graph_ent_t entryfunc) 5007 trace_func_graph_ent_t entryfunc)
5002{ 5008{
@@ -5023,7 +5029,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5023 ftrace_graph_return = retfunc; 5029 ftrace_graph_return = retfunc;
5024 ftrace_graph_entry = entryfunc; 5030 ftrace_graph_entry = entryfunc;
5025 5031
5026 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); 5032 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5027 5033
5028out: 5034out:
5029 mutex_unlock(&ftrace_lock); 5035 mutex_unlock(&ftrace_lock);
@@ -5040,7 +5046,7 @@ void unregister_ftrace_graph(void)
5040 ftrace_graph_active--; 5046 ftrace_graph_active--;
5041 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5047 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5042 ftrace_graph_entry = ftrace_graph_entry_stub; 5048 ftrace_graph_entry = ftrace_graph_entry_stub;
5043 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5049 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5044 unregister_pm_notifier(&ftrace_suspend_notifier); 5050 unregister_pm_notifier(&ftrace_suspend_notifier);
5045 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5051 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5046 5052
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 78e27e3b52ac..e854f420e033 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -24,6 +24,12 @@ static int total_ref_count;
24static int perf_trace_event_perm(struct ftrace_event_call *tp_event, 24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event) 25 struct perf_event *p_event)
26{ 26{
27 if (tp_event->perf_perm) {
28 int ret = tp_event->perf_perm(tp_event, p_event);
29 if (ret)
30 return ret;
31 }
32
27 /* The ftrace function trace is allowed only for root. */ 33 /* The ftrace function trace is allowed only for root. */
28 if (ftrace_event_is_function(tp_event) && 34 if (ftrace_event_is_function(tp_event) &&
29 perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) 35 perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
@@ -173,7 +179,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
173int perf_trace_init(struct perf_event *p_event) 179int perf_trace_init(struct perf_event *p_event)
174{ 180{
175 struct ftrace_event_call *tp_event; 181 struct ftrace_event_call *tp_event;
176 int event_id = p_event->attr.config; 182 u64 event_id = p_event->attr.config;
177 int ret = -EINVAL; 183 int ret = -EINVAL;
178 184
179 mutex_lock(&event_mutex); 185 mutex_lock(&event_mutex);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f919a2e21bf3..a11800ae96de 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2314,6 +2314,9 @@ int event_trace_del_tracer(struct trace_array *tr)
2314 /* Disable any running events */ 2314 /* Disable any running events */
2315 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 2315 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2316 2316
2317 /* Access to events are within rcu_read_lock_sched() */
2318 synchronize_sched();
2319
2317 down_write(&trace_event_sem); 2320 down_write(&trace_event_sem);
2318 __trace_remove_event_dirs(tr); 2321 __trace_remove_event_dirs(tr);
2319 debugfs_remove_recursive(tr->event_dir); 2322 debugfs_remove_recursive(tr->event_dir);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index e4b6d11bdf78..ea90eb5f6f17 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -431,11 +431,6 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
431 if (!tr->sys_refcount_enter) 431 if (!tr->sys_refcount_enter)
432 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 432 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
433 mutex_unlock(&syscall_trace_lock); 433 mutex_unlock(&syscall_trace_lock);
434 /*
435 * Callers expect the event to be completely disabled on
436 * return, so wait for current handlers to finish.
437 */
438 synchronize_sched();
439} 434}
440 435
441static int reg_event_syscall_exit(struct ftrace_event_file *file, 436static int reg_event_syscall_exit(struct ftrace_event_file *file,
@@ -474,11 +469,6 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
474 if (!tr->sys_refcount_exit) 469 if (!tr->sys_refcount_exit)
475 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 470 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
476 mutex_unlock(&syscall_trace_lock); 471 mutex_unlock(&syscall_trace_lock);
477 /*
478 * Callers expect the event to be completely disabled on
479 * return, so wait for current handlers to finish.
480 */
481 synchronize_sched();
482} 472}
483 473
484static int __init init_syscall_trace(struct ftrace_event_call *call) 474static int __init init_syscall_trace(struct ftrace_event_call *call)