aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6ea5a1ae6a98..8e6a0b5c9940 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3092,7 +3092,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
3092 3092
3093#ifdef CONFIG_FUNCTION_GRAPH_TRACER 3093#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3094 3094
3095static atomic_t ftrace_graph_active; 3095static int ftrace_graph_active;
3096static struct notifier_block ftrace_suspend_notifier; 3096static struct notifier_block ftrace_suspend_notifier;
3097 3097
3098int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 3098int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -3244,7 +3244,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3244 mutex_lock(&ftrace_lock); 3244 mutex_lock(&ftrace_lock);
3245 3245
3246 /* we currently allow only one tracer registered at a time */ 3246 /* we currently allow only one tracer registered at a time */
3247 if (atomic_read(&ftrace_graph_active)) { 3247 if (ftrace_graph_active) {
3248 ret = -EBUSY; 3248 ret = -EBUSY;
3249 goto out; 3249 goto out;
3250 } 3250 }
@@ -3252,10 +3252,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3252 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; 3252 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3253 register_pm_notifier(&ftrace_suspend_notifier); 3253 register_pm_notifier(&ftrace_suspend_notifier);
3254 3254
3255 atomic_inc(&ftrace_graph_active); 3255 ftrace_graph_active++;
3256 ret = start_graph_tracing(); 3256 ret = start_graph_tracing();
3257 if (ret) { 3257 if (ret) {
3258 atomic_dec(&ftrace_graph_active); 3258 ftrace_graph_active--;
3259 goto out; 3259 goto out;
3260 } 3260 }
3261 3261
@@ -3273,10 +3273,10 @@ void unregister_ftrace_graph(void)
3273{ 3273{
3274 mutex_lock(&ftrace_lock); 3274 mutex_lock(&ftrace_lock);
3275 3275
3276 if (!unlikely(atomic_read(&ftrace_graph_active))) 3276 if (unlikely(!ftrace_graph_active))
3277 goto out; 3277 goto out;
3278 3278
3279 atomic_dec(&ftrace_graph_active); 3279 ftrace_graph_active--;
3280 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); 3280 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3281 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 3281 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3282 ftrace_graph_entry = ftrace_graph_entry_stub; 3282 ftrace_graph_entry = ftrace_graph_entry_stub;
@@ -3290,7 +3290,7 @@ void unregister_ftrace_graph(void)
3290/* Allocate a return stack for newly created task */ 3290/* Allocate a return stack for newly created task */
3291void ftrace_graph_init_task(struct task_struct *t) 3291void ftrace_graph_init_task(struct task_struct *t)
3292{ 3292{
3293 if (atomic_read(&ftrace_graph_active)) { 3293 if (ftrace_graph_active) {
3294 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 3294 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3295 * sizeof(struct ftrace_ret_stack), 3295 * sizeof(struct ftrace_ret_stack),
3296 GFP_KERNEL); 3296 GFP_KERNEL);