aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_syscalls.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r--kernel/trace/trace_syscalls.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 26f9a8679d3d..a2a3af29c943 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -5,7 +5,11 @@
5#include "trace_output.h" 5#include "trace_output.h"
6#include "trace.h" 6#include "trace.h"
7 7
8static atomic_t refcount; 8/* Keep a counter of the syscall tracing users */
9static int refcount;
10
11/* Prevent from races on thread flags toggling */
12static DEFINE_MUTEX(syscall_trace_lock);
9 13
10/* Option to display the parameters types */ 14/* Option to display the parameters types */
11enum { 15enum {
@@ -96,9 +100,11 @@ void start_ftrace_syscalls(void)
96 unsigned long flags; 100 unsigned long flags;
97 struct task_struct *g, *t; 101 struct task_struct *g, *t;
98 102
103 mutex_lock(&syscall_trace_lock);
104
99 /* Don't enable the flag on the tasks twice */ 105 /* Don't enable the flag on the tasks twice */
100 if (atomic_inc_return(&refcount) != 1) 106 if (++refcount != 1)
101 return; 107 goto unlock;
102 108
103 arch_init_ftrace_syscalls(); 109 arch_init_ftrace_syscalls();
104 read_lock_irqsave(&tasklist_lock, flags); 110 read_lock_irqsave(&tasklist_lock, flags);
@@ -108,6 +114,9 @@ void start_ftrace_syscalls(void)
108 } while_each_thread(g, t); 114 } while_each_thread(g, t);
109 115
110 read_unlock_irqrestore(&tasklist_lock, flags); 116 read_unlock_irqrestore(&tasklist_lock, flags);
117
118unlock:
119 mutex_unlock(&syscall_trace_lock);
111} 120}
112 121
113void stop_ftrace_syscalls(void) 122void stop_ftrace_syscalls(void)
@@ -115,9 +124,11 @@ void stop_ftrace_syscalls(void)
115 unsigned long flags; 124 unsigned long flags;
116 struct task_struct *g, *t; 125 struct task_struct *g, *t;
117 126
127 mutex_lock(&syscall_trace_lock);
128
118 /* There are perhaps still some users */ 129 /* There are perhaps still some users */
119 if (atomic_dec_return(&refcount)) 130 if (--refcount)
120 return; 131 goto unlock;
121 132
122 read_lock_irqsave(&tasklist_lock, flags); 133 read_lock_irqsave(&tasklist_lock, flags);
123 134
@@ -126,6 +137,9 @@ void stop_ftrace_syscalls(void)
126 } while_each_thread(g, t); 137 } while_each_thread(g, t);
127 138
128 read_unlock_irqrestore(&tasklist_lock, flags); 139 read_unlock_irqrestore(&tasklist_lock, flags);
140
141unlock:
142 mutex_unlock(&syscall_trace_lock);
129} 143}
130 144
131void ftrace_syscall_enter(struct pt_regs *regs) 145void ftrace_syscall_enter(struct pt_regs *regs)