aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-17 03:37:20 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-17 03:37:20 -0400
commite4106133341121aeaad732d4613de06d9033b5ac (patch)
tree8d7ecb57fdefdfac5bdff6ee3a82485e2ea4340a
parent7243f2145a9b06e5cf9a49fc9b8b9a4fff6fb42e (diff)
parent2fc1dfbe17e7705c55b7a99da995fa565e26f151 (diff)
Merge branch 'tracing/syscalls' into tracing/core
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/trace.c5
-rw-r--r--kernel/trace/trace_selftest.c16
-rw-r--r--kernel/trace/trace_syscalls.c41
4 files changed, 34 insertions, 29 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 95a0ad191f19..b0a46f889659 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -182,6 +182,7 @@ config FTRACE_SYSCALLS
182 bool "Trace syscalls" 182 bool "Trace syscalls"
183 depends on HAVE_FTRACE_SYSCALLS 183 depends on HAVE_FTRACE_SYSCALLS
184 select TRACING 184 select TRACING
185 select KALLSYMS
185 help 186 help
186 Basic tracer to catch the syscall entry and exit events. 187 Basic tracer to catch the syscall entry and exit events.
187 188
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index efe3202c0209..ae32d3b99b4b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2494,7 +2494,7 @@ static int tracing_set_tracer(const char *buf)
2494 if (!ring_buffer_expanded) { 2494 if (!ring_buffer_expanded) {
2495 ret = tracing_resize_ring_buffer(trace_buf_size); 2495 ret = tracing_resize_ring_buffer(trace_buf_size);
2496 if (ret < 0) 2496 if (ret < 0)
2497 return ret; 2497 goto out;
2498 ret = 0; 2498 ret = 0;
2499 } 2499 }
2500 2500
@@ -4125,7 +4125,8 @@ __init static int tracer_alloc_buffers(void)
4125 &trace_panic_notifier); 4125 &trace_panic_notifier);
4126 4126
4127 register_die_notifier(&trace_die_notifier); 4127 register_die_notifier(&trace_die_notifier);
4128 ret = 0; 4128
4129 return 0;
4129 4130
4130out_free_cpumask: 4131out_free_cpumask:
4131 free_cpumask_var(tracing_reader_cpumask); 4132 free_cpumask_var(tracing_reader_cpumask);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index f907a2b29028..a2ca6f0fef9b 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -414,7 +414,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
414 ret = tracer_init(trace, tr); 414 ret = tracer_init(trace, tr);
415 if (ret) { 415 if (ret) {
416 warn_failed_init_tracer(trace, ret); 416 warn_failed_init_tracer(trace, ret);
417 goto out; 417 goto out_no_start;
418 } 418 }
419 419
420 /* reset the max latency */ 420 /* reset the max latency */
@@ -432,21 +432,16 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
432 tracing_stop(); 432 tracing_stop();
433 /* check both trace buffers */ 433 /* check both trace buffers */
434 ret = trace_test_buffer(tr, NULL); 434 ret = trace_test_buffer(tr, NULL);
435 if (ret) { 435 if (ret)
436 tracing_start();
437 goto out; 436 goto out;
438 }
439 437
440 ret = trace_test_buffer(&max_tr, &count); 438 ret = trace_test_buffer(&max_tr, &count);
441 if (ret) { 439 if (ret)
442 tracing_start();
443 goto out; 440 goto out;
444 }
445 441
446 if (!ret && !count) { 442 if (!ret && !count) {
447 printk(KERN_CONT ".. no entries found .."); 443 printk(KERN_CONT ".. no entries found ..");
448 ret = -1; 444 ret = -1;
449 tracing_start();
450 goto out; 445 goto out;
451 } 446 }
452 447
@@ -475,9 +470,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
475 goto out; 470 goto out;
476 } 471 }
477 472
478 out: 473out:
479 trace->reset(tr);
480 tracing_start(); 474 tracing_start();
475out_no_start:
476 trace->reset(tr);
481 tracing_max_latency = save_max; 477 tracing_max_latency = save_max;
482 478
483 return ret; 479 return ret;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index c72e599230ff..a2a3af29c943 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -5,9 +5,13 @@
5#include "trace_output.h" 5#include "trace_output.h"
6#include "trace.h" 6#include "trace.h"
7 7
8static atomic_t refcount; 8/* Keep a counter of the syscall tracing users */
9static int refcount;
9 10
10/* Our two options */ 11/* Prevent from races on thread flags toggling */
12static DEFINE_MUTEX(syscall_trace_lock);
13
14/* Option to display the parameters types */
11enum { 15enum {
12 TRACE_SYSCALLS_OPT_TYPES = 0x1, 16 TRACE_SYSCALLS_OPT_TYPES = 0x1,
13}; 17};
@@ -18,7 +22,7 @@ static struct tracer_opt syscalls_opts[] = {
18}; 22};
19 23
20static struct tracer_flags syscalls_flags = { 24static struct tracer_flags syscalls_flags = {
21 .val = 0, /* By default: no args types */ 25 .val = 0, /* By default: no parameters types */
22 .opts = syscalls_opts 26 .opts = syscalls_opts
23}; 27};
24 28
@@ -96,8 +100,11 @@ void start_ftrace_syscalls(void)
96 unsigned long flags; 100 unsigned long flags;
97 struct task_struct *g, *t; 101 struct task_struct *g, *t;
98 102
99 if (atomic_inc_return(&refcount) != 1) 103 mutex_lock(&syscall_trace_lock);
100 goto out; 104
105 /* Don't enable the flag on the tasks twice */
106 if (++refcount != 1)
107 goto unlock;
101 108
102 arch_init_ftrace_syscalls(); 109 arch_init_ftrace_syscalls();
103 read_lock_irqsave(&tasklist_lock, flags); 110 read_lock_irqsave(&tasklist_lock, flags);
@@ -107,8 +114,9 @@ void start_ftrace_syscalls(void)
107 } while_each_thread(g, t); 114 } while_each_thread(g, t);
108 115
109 read_unlock_irqrestore(&tasklist_lock, flags); 116 read_unlock_irqrestore(&tasklist_lock, flags);
110out: 117
111 atomic_dec(&refcount); 118unlock:
119 mutex_unlock(&syscall_trace_lock);
112} 120}
113 121
114void stop_ftrace_syscalls(void) 122void stop_ftrace_syscalls(void)
@@ -116,8 +124,11 @@ void stop_ftrace_syscalls(void)
116 unsigned long flags; 124 unsigned long flags;
117 struct task_struct *g, *t; 125 struct task_struct *g, *t;
118 126
119 if (atomic_dec_return(&refcount)) 127 mutex_lock(&syscall_trace_lock);
120 goto out; 128
129 /* There are perhaps still some users */
130 if (--refcount)
131 goto unlock;
121 132
122 read_lock_irqsave(&tasklist_lock, flags); 133 read_lock_irqsave(&tasklist_lock, flags);
123 134
@@ -126,8 +137,9 @@ void stop_ftrace_syscalls(void)
126 } while_each_thread(g, t); 137 } while_each_thread(g, t);
127 138
128 read_unlock_irqrestore(&tasklist_lock, flags); 139 read_unlock_irqrestore(&tasklist_lock, flags);
129out: 140
130 atomic_inc(&refcount); 141unlock:
142 mutex_unlock(&syscall_trace_lock);
131} 143}
132 144
133void ftrace_syscall_enter(struct pt_regs *regs) 145void ftrace_syscall_enter(struct pt_regs *regs)
@@ -137,12 +149,9 @@ void ftrace_syscall_enter(struct pt_regs *regs)
137 struct ring_buffer_event *event; 149 struct ring_buffer_event *event;
138 int size; 150 int size;
139 int syscall_nr; 151 int syscall_nr;
140 int cpu;
141 152
142 syscall_nr = syscall_get_nr(current, regs); 153 syscall_nr = syscall_get_nr(current, regs);
143 154
144 cpu = raw_smp_processor_id();
145
146 sys_data = syscall_nr_to_meta(syscall_nr); 155 sys_data = syscall_nr_to_meta(syscall_nr);
147 if (!sys_data) 156 if (!sys_data)
148 return; 157 return;
@@ -168,12 +177,9 @@ void ftrace_syscall_exit(struct pt_regs *regs)
168 struct syscall_metadata *sys_data; 177 struct syscall_metadata *sys_data;
169 struct ring_buffer_event *event; 178 struct ring_buffer_event *event;
170 int syscall_nr; 179 int syscall_nr;
171 int cpu;
172 180
173 syscall_nr = syscall_get_nr(current, regs); 181 syscall_nr = syscall_get_nr(current, regs);
174 182
175 cpu = raw_smp_processor_id();
176
177 sys_data = syscall_nr_to_meta(syscall_nr); 183 sys_data = syscall_nr_to_meta(syscall_nr);
178 if (!sys_data) 184 if (!sys_data)
179 return; 185 return;
@@ -201,6 +207,7 @@ static int init_syscall_tracer(struct trace_array *tr)
201static void reset_syscall_tracer(struct trace_array *tr) 207static void reset_syscall_tracer(struct trace_array *tr)
202{ 208{
203 stop_ftrace_syscalls(); 209 stop_ftrace_syscalls();
210 tracing_reset_online_cpus(tr);
204} 211}
205 212
206static struct trace_event syscall_enter_event = { 213static struct trace_event syscall_enter_event = {